diff --git a/go.mod b/go.mod index 776a2adc..d4924059 100644 --- a/go.mod +++ b/go.mod @@ -1,88 +1,123 @@ module github.com/bitrise-io/bitrise-webhooks -go 1.23.0 +go 1.24.0 toolchain go1.24.1 require ( - cloud.google.com/go/pubsub v1.42.0 + cloud.google.com/go/pubsub v1.50.1 github.com/bitrise-io/api-utils v0.0.0-20211025122143-6499571b8433 - github.com/bitrise-io/envman v0.0.0-20240730123632-8066eeb61599 + github.com/bitrise-io/envman v0.0.0 github.com/go-playground/webhooks/v6 v6.4.0 github.com/google/go-github/v67 v67.0.0 github.com/gorilla/mux v1.8.1 github.com/pkg/errors v0.9.1 - github.com/stretchr/testify v1.9.0 - github.com/xanzy/go-gitlab v0.108.0 + github.com/stretchr/testify v1.11.1 + github.com/xanzy/go-gitlab v0.115.0 go.uber.org/zap v1.27.0 - google.golang.org/api v0.195.0 - gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 + google.golang.org/api v0.254.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.74.8 ) require ( - cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.13 // indirect - github.com/DataDog/appsec-internal-go v1.7.0 // indirect - github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 // indirect - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 // indirect - github.com/DataDog/datadog-go/v5 v5.5.0 // indirect - github.com/DataDog/go-libddwaf/v3 v3.3.0 // indirect - github.com/DataDog/go-sqllexer v0.0.11 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.17.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/pubsub/v2 v2.0.0 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.67.0 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.67.0 // indirect + github.com/DataDog/datadog-agent/pkg/proto v0.67.0 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.69.0 // indirect + github.com/DataDog/datadog-agent/pkg/trace v0.67.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.67.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.67.0 // indirect + github.com/DataDog/datadog-agent/pkg/version v0.67.0 // indirect + github.com/DataDog/datadog-go/v5 v5.6.0 // indirect + github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 v2.3.0 // indirect + github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.3.0 // indirect + github.com/DataDog/dd-trace-go/v2 v2.3.0 // indirect + github.com/DataDog/go-libddwaf/v4 v4.3.2 // indirect + github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20250721125240-fdf1ef85b633 // indirect + github.com/DataDog/go-sqllexer v0.1.6 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect - github.com/DataDog/sketches-go v1.4.5 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.27.0 // indirect + github.com/DataDog/sketches-go v1.4.7 // indirect + github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/bitrise-io/go-utils v1.0.13 // indirect github.com/blendle/zapdriver v1.3.1 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect - github.com/ebitengine/purego v0.7.1 // indirect + github.com/ebitengine/purego v0.8.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/s2a-go v0.1.8 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect - github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect - github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/outcaste-io/ristretto v0.2.3 // indirect - github.com/philhofer/fwd v1.1.2 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/ryanuber/go-glob v1.0.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/tinylib/msgp v1.1.9 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect + github.com/shirou/gopsutil/v4 v4.25.3 // indirect + github.com/theckman/httpforwarded v0.4.0 // indirect + github.com/tinylib/msgp v1.2.5 // indirect + github.com/tklauser/go-sysconf v0.3.15 // indirect + github.com/tklauser/numcpus v0.10.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/component v1.31.0 // indirect + go.opentelemetry.io/collector/featuregate v1.31.0 // indirect + go.opentelemetry.io/collector/internal/telemetry v0.125.0 // indirect + go.opentelemetry.io/collector/pdata v1.31.0 // indirect + go.opentelemetry.io/collector/semconv v0.125.0 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/log v0.11.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.23.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/crypto v0.43.0 // indirect + golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect + golang.org/x/mod v0.28.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect + golang.org/x/time v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/grpc v1.76.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index b8b97f2f..5f6c26c2 100644 --- a/go.sum +++ b/go.sum @@ -1,95 +1,131 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/auth v0.9.1 h1:+pMtLEV2k0AXKvs/tGZojuj6QaioxfUjOpMsG5Gtx+w= -cloud.google.com/go/auth v0.9.1/go.mod h1:Sw8ocT5mhhXxFklyhT12Eiy0ed6tTrPMCJjSI8KhYLk= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= -cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= -cloud.google.com/go/kms v1.18.5 h1:75LSlVs60hyHK3ubs2OHd4sE63OAMcM2BdSJc2bkuM4= -cloud.google.com/go/kms v1.18.5/go.mod h1:yXunGUGzabH8rjUPImp2ndHiGolHeWJJ0LODLedicIY= -cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= -cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= -cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= -cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk= +cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/pubsub v1.50.1 h1:fzbXpPyJnSGvWXF1jabhQeXyxdbCIkXTpjXHy7xviBM= +cloud.google.com/go/pubsub v1.50.1/go.mod h1:6YVJv3MzWJUVdvQXG081sFvS0dWQOdnV+oTo++q/xFk= +cloud.google.com/go/pubsub/v2 v2.0.0 h1:0qS6mRJ41gD1lNmM/vdm6bR7DQu6coQcVwD+VPf0Bz0= +cloud.google.com/go/pubsub/v2 v2.0.0/go.mod h1:0aztFxNzVQIRSZ8vUr79uH2bS3jwLebwK6q1sgEub+E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0= -github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 h1:/oxF4p/4XUGNpNw2TE7vDu/pJV3elEAZ+jES0/MWtiI= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1/go.mod h1:AVPQWekk3h9AOC7+plBlNB68Sy6UIGFoMMVUDeSoNoI= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 h1:mmkGuCHBFuDBpuwNMcqtY1x1I2fCaPH2Br4xPAAjbkM= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1/go.mod h1:JhAilx32dkIgoDkFXquCTfaWDsAOfe+vfBaxbiZoPI0= -github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= -github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4= -github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec= -github.com/DataDog/go-sqllexer v0.0.11 h1:OfPBjmayreblOXreszbrOTICNZ3qWrA6Bg4sypvxpbw= -github.com/DataDog/go-sqllexer v0.0.11/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.67.0 h1:2mEwRWvhIPHMPK4CMD8iKbsrYBxeMBSuuCXumQAwShU= +github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.67.0/go.mod h1:ejJHsyJTG7NU6c6TDbF7dmckD3g+AUGSdiSXy+ZyaCE= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.67.0 h1:NcvyDVIUA0NbBDbp7QJnsYhoBv548g8bXq886795mCQ= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.67.0/go.mod h1:1oPcs3BUTQhiTkmk789rb7ob105MxNV6OuBa28BdukQ= +github.com/DataDog/datadog-agent/pkg/proto v0.67.0 h1:7dO6mKYRb7qSiXEu7Q2mfeKbhp4hykCAULy4BfMPmsQ= +github.com/DataDog/datadog-agent/pkg/proto v0.67.0/go.mod h1:bKVXB7pxBg0wqXF6YSJ+KU6PeCWKDyJj83kUH1ab+7o= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.69.0 h1:/DsN4R+IkC6t1+4cHSfkxzLtDl84rBbPC5Wa9srBAoM= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.69.0/go.mod h1:Th2LD/IGid5Rza55pzqGu6nUdOv/Rts6wPwLjTyOSTs= +github.com/DataDog/datadog-agent/pkg/trace v0.67.0 h1:dqt+/nObo0JKyaEqIMZgfqGZbx9TfEHpCkrjQ/zzH7k= +github.com/DataDog/datadog-agent/pkg/trace v0.67.0/go.mod h1:zmZoEtKvOnaKHbJGBKH3a4xuyPrSfBaF0ZE3Q3rCoDw= +github.com/DataDog/datadog-agent/pkg/util/log v0.67.0 h1:xrH15QNqeJZkYoXYi44VCIvGvTwlQ3z2iT2QVTGiT7s= +github.com/DataDog/datadog-agent/pkg/util/log v0.67.0/go.mod h1:dfVLR+euzEyg1CeiExgJQq1c1dod42S6IeiRPj8H7Yk= +github.com/DataDog/datadog-agent/pkg/util/scrubber v0.67.0 h1:aIWF85OKxXGo7rVyqJ7jm7lm2qCQrgyXzYyFuw0T2EQ= +github.com/DataDog/datadog-agent/pkg/util/scrubber v0.67.0/go.mod h1:Lfap5FuM4b/Pw9IrTuAvWBWZEmXOvZhCya3dYv4G8O0= +github.com/DataDog/datadog-agent/pkg/version v0.67.0 h1:TB8H8r+laB1Qdttvvc6XJVyLGxp8E6j2f2Mh5IPbYmQ= +github.com/DataDog/datadog-agent/pkg/version v0.67.0/go.mod h1:kvAw/WbI7qLAsDI2wHabZfM7Cv2zraD3JA3323GEB+8= +github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= +github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 v2.3.0 h1:ecO1vHg3uFINdQf/vH2gmlASo7YmAh8QYjA99no87o4= +github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 v2.3.0/go.mod h1:e8a7BPfFWNisuP9hJjvn9W4XbO1YT9NSkF/D29Xc6RQ= +github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.3.0 h1:ZaM8iFAoM33TaUZ9pACkccVMfQ9lFzLvJSCYwE3LcKk= +github.com/DataDog/dd-trace-go/contrib/net/http/v2 v2.3.0/go.mod h1:E5iHsN3Mj4JNTo+eGB0KENF6HeaT8TAwUjKqe/no2SQ= +github.com/DataDog/dd-trace-go/v2 v2.3.0 h1:0Y5kx+Wbod0z8moY0vUbKl6OM0oIV4zAynsVmsq+XT8= +github.com/DataDog/dd-trace-go/v2 v2.3.0/go.mod h1:yFomJ/rqKNLDbS9ohIDibdz8q9GK0MUSSkBdVDCibGA= +github.com/DataDog/go-libddwaf/v4 v4.3.2 h1:YGvW2Of1C4e1yU+p7iibmhN2zEOgi9XEchbhQjBxb/A= +github.com/DataDog/go-libddwaf/v4 v4.3.2/go.mod h1:/AZqP6zw3qGJK5mLrA0PkfK3UQDk1zCI2fUNCt4xftE= +github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20250721125240-fdf1ef85b633 h1:ZRLR9Lbym748e8RznWzmSoK+OfV+8qW6SdNYA4/IqdA= +github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20250721125240-fdf1ef85b633/go.mod h1:YFoTl1xsMzdSRFIu33oCSPS/3+HZAPGpO3oOM96wXCM= +github.com/DataDog/go-sqllexer v0.1.6 h1:skEXpWEVCpeZFIiydoIa2f2rf+ymNpjiIMqpW4w3YAk= +github.com/DataDog/go-sqllexer v0.1.6/go.mod h1:GGpo1h9/BVSN+6NJKaEcJ9Jn44Hqc63Rakeb+24Mjgo= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= -github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE= -github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.27.0 h1:5US5SqqhfkZkg/E64uvn7YmeTwnudJHtlPEH/LOT99w= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.27.0/go.mod h1:VRo4D6rj92AExpVBlq3Gcuol9Nm1bber12KyxRjKGWw= +github.com/DataDog/sketches-go v1.4.7 h1:eHs5/0i2Sdf20Zkj0udVFWuCrXGRFig2Dcfm5rtcTxc= +github.com/DataDog/sketches-go v1.4.7/go.mod h1:eAmQ/EBmtSO+nQp7IZMZVRPT4BQTmIc5RZQ+deGlTPM= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.29.1/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitrise-io/api-utils v0.0.0-20211025122143-6499571b8433 h1:TKFE2oT7SZjXBAJFBdUDAeoGsBvPHRUzxguYzqZw+p4= github.com/bitrise-io/api-utils v0.0.0-20211025122143-6499571b8433/go.mod h1:ikUVPSWA9inuIXM+XzR1aMYiV8HR1oy70UMwcdaDeK4= -github.com/bitrise-io/envman v0.0.0-20240730123632-8066eeb61599 h1:UaudsJR8LbzL7wjz5D2RVtxN7RYnTL0ZoX+lA9yuqDI= -github.com/bitrise-io/envman v0.0.0-20240730123632-8066eeb61599/go.mod h1:7yJQdVdq8BxJYq2xjG0yViQf/aspJLJ/xqk/OnP6lGE= +github.com/bitrise-io/envman v0.0.0 h1:+YmzuGvcNDvS7zWnzKiwhajY3M3DVtSLR+bz2jfSntE= +github.com/bitrise-io/envman v0.0.0/go.mod h1:7yJQdVdq8BxJYq2xjG0yViQf/aspJLJ/xqk/OnP6lGE= github.com/bitrise-io/go-utils v1.0.13 h1:1QENhTS/JlKH9F7+/nB+TtbTcor6jGrE6cQ4CJWfp5U= github.com/bitrise-io/go-utils v1.0.13/go.mod h1:ZY1DI+fEpZuFpO9szgDeICM4QbqoWVt0RSY3tRI1heY= github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg= -github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds= -github.com/ebitengine/purego v0.7.1 h1:6/55d26lG3o9VCZX8lping+bZcmShseiqlh2bnUDiPA= -github.com/ebitengine/purego v0.7.1/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= +github.com/ebitengine/purego v0.8.3 h1:K+0AjQp63JEZTEMZiwsI9g0+hAMNohwUOtY0RPGexmc= +github.com/ebitengine/purego v0.8.3/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= -github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-playground/webhooks/v6 v6.4.0 h1:KLa6y7bD19N48rxJDHM0DpE3T4grV7GxMy1b/aHMWPY= github.com/go-playground/webhooks/v6 v6.4.0/go.mod h1:5lBxopx+cAJiBI4+kyRbuHrEi+hYRDdRHuRR4Ya5Ums= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -97,6 +133,8 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= +github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= @@ -116,99 +154,107 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github/v67 v67.0.0 h1:g11NDAmfaBaCO8qYdI9fsmbaRipHNWRIU/2YGvlh4rg= github.com/google/go-github/v67 v67.0.0/go.mod h1:zH3K7BxjFndr9QSeFibx4lTKkYS3K9nDanoI1NjaOtY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= -github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/justinas/alice v1.2.0/go.mod h1:fN5HRH/reO/zrUflLfTN43t3vXvKzvZIENsNEe7i7qA= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc= +github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.125.0 h1:0dOJCEtabevxxDQmxed69oMzSw+gb3ErCnFwFYZFu0M= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.125.0/go.mod h1:QwzQhtxPThXMUDW1XRXNQ+l0GrI2BRsvNhX6ZuKyAds= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.125.0 h1:F68/Nbpcvo3JZpaWlRUDJtG7xs8FHBZ7A8GOMauDkyc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.125.0/go.mod h1:haO4cJtAk05Y0p7NO9ME660xxtSh54ifCIIT7+PO9C0= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= -github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= +github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY= github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= -github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= +github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE= +github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -225,29 +271,85 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= -github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= -github.com/xanzy/go-gitlab v0.108.0 h1:IEvEUWFR5G1seslRhJ8gC//INiIUqYXuSUoBd7/gFKE= -github.com/xanzy/go-gitlab v0.108.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/theckman/httpforwarded v0.4.0 h1:N55vGJT+6ojTnLY3LQCNliJC4TW0P0Pkeys1G1WpX2w= +github.com/theckman/httpforwarded v0.4.0/go.mod h1:GVkFynv6FJreNbgH/bpOU9ITDZ7a5WuzdNCtIMI1pVI= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= +github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= +github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= +github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= +github.com/vmihailenco/msgpack/v4 v4.3.13 h1:A2wsiTbvp63ilDaWmsk2wjx6xZdxQOvpiNlKBGKKXKI= +github.com/vmihailenco/msgpack/v4 v4.3.13/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/xanzy/go-gitlab v0.115.0 h1:6DmtItNcVe+At/liXSgfE/DZNZrGfalQmBRmOcJjOn8= +github.com/xanzy/go-gitlab v0.115.0/go.mod h1:5XCDtM7AM6WMKmfDdOiEpyRWUqui2iS9ILfvCZ2gJ5M= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI= -go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.einride.tech/aip v0.73.0 h1:bPo4oqBo2ZQeBKo4ZzLb1kxYXTY1ysJhpvQyfuGzvps= +go.einride.tech/aip v0.73.0/go.mod h1:Mj7rFbmXEgw0dq1dqJ7JGMvYCZZVxmGOR3S4ZcV5LvQ= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v1.31.0 h1:9LzU8X1RhV3h8/QsAoTX23aFUfoJ3EUc9O/vK+hFpSI= +go.opentelemetry.io/collector/component v1.31.0/go.mod h1:JbZl/KywXJxpUXPbt96qlEXJSym1zQ2hauMxYMuvlxM= +go.opentelemetry.io/collector/component/componentstatus v0.125.0 h1:zlxGQZYd9kknRZSjRpOYW5SBjl0a5zYFYRPbreobXoU= +go.opentelemetry.io/collector/component/componentstatus v0.125.0/go.mod h1:bHXc2W8bqqo9adOvCgvhcO7pYzJOSpyV4cuQ1wiIl04= +go.opentelemetry.io/collector/component/componenttest v0.125.0 h1:E2mpnMQbkMpYoZ3Q8pHx4kod7kedjwRs1xqDpzCe/84= +go.opentelemetry.io/collector/component/componenttest v0.125.0/go.mod h1:pQtsE1u/SPZdTphP5BZP64XbjXSq6wc+mDut5Ws/JDI= +go.opentelemetry.io/collector/consumer v1.31.0 h1:L+y66ywxLHnAxnUxv0JDwUf5bFj53kMxCCyEfRKlM7s= +go.opentelemetry.io/collector/consumer v1.31.0/go.mod h1:rPsqy5ni+c6xNMUkOChleZYO/nInVY6eaBNZ1FmWJVk= +go.opentelemetry.io/collector/consumer/consumertest v0.125.0 h1:TUkxomGS4DAtjBvcWQd2UY4FDLLEKMQD6iOIDUr/5dM= +go.opentelemetry.io/collector/consumer/consumertest v0.125.0/go.mod h1:vkHf3y85cFLDHARO/cTREVjLjOPAV+cQg7lkC44DWOY= +go.opentelemetry.io/collector/consumer/xconsumer v0.125.0 h1:oTreUlk1KpMSWwuHFnstW+orrjGTyvs2xd3o/Dpy+hI= +go.opentelemetry.io/collector/consumer/xconsumer v0.125.0/go.mod h1:FX0G37r0W+wXRgxxFtwEJ4rlsCB+p0cIaxtU3C4hskw= +go.opentelemetry.io/collector/featuregate v1.31.0 h1:20q7plPQZwmAiaYAa6l1m/i2qDITZuWlhjr4EkmeQls= +go.opentelemetry.io/collector/featuregate v1.31.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= +go.opentelemetry.io/collector/internal/telemetry v0.125.0 h1:6lcGOxw3dAg7LfXTKdN8ZjR+l7KvzLdEiPMhhLwG4r4= +go.opentelemetry.io/collector/internal/telemetry v0.125.0/go.mod h1:5GyFslLqjZgq1DZTtFiluxYhhXrCofHgOOOybodDPGE= +go.opentelemetry.io/collector/pdata v1.31.0 h1:P5WuLr1l2JcIvr6Dw2hl01ltp2ZafPnC4Isv+BLTBqU= +go.opentelemetry.io/collector/pdata v1.31.0/go.mod h1:m41io9nWpy7aCm/uD1L9QcKiZwOP0ldj83JEA34dmlk= +go.opentelemetry.io/collector/pdata/pprofile v0.125.0 h1:Qqlx8w1HpiYZ9RQqjmMQIysI0cHNO1nh3E/fCTeFysA= +go.opentelemetry.io/collector/pdata/pprofile v0.125.0/go.mod h1:p/yK023VxAp8hm27/1G5DPTcMIpnJy3cHGAFUQZGyaQ= +go.opentelemetry.io/collector/pdata/testdata v0.125.0 h1:due1Hl0EEVRVwfCkiamRy5E8lS6yalv0lo8Zl/SJtGw= +go.opentelemetry.io/collector/pdata/testdata v0.125.0/go.mod h1:1GpEWlgdMrd+fWsBk37ZC2YmOP5YU3gFQ4rWuCu9g24= +go.opentelemetry.io/collector/pipeline v0.125.0 h1:oitBgcAFqntDB4ihQJUHJSQ8IHqKFpPkaTVbTYdIUzM= +go.opentelemetry.io/collector/pipeline v0.125.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= +go.opentelemetry.io/collector/processor v1.31.0 h1:+u7sBUpnCBsHYoALp4hfr9VEjLHHYa4uKENGITe0K9Q= +go.opentelemetry.io/collector/processor v1.31.0/go.mod h1:5hDYJ7/hTdfd2tF2Rj5Hs6+mfyFz2O7CaPzVvW1qHQc= +go.opentelemetry.io/collector/processor/processorhelper v0.125.0 h1:QRpX7oFW88DAZhy+Q93npklRoaQr8ue0GKpeup7C/Fk= +go.opentelemetry.io/collector/processor/processorhelper v0.125.0/go.mod h1:oXRvslUuN62wErcoJrcEJYoTXu5wHyNyJsE+/a9Cc9s= +go.opentelemetry.io/collector/processor/processortest v0.125.0 h1:ZVAN4iZPDcWhpzKqnuok2NIuS5hwGVVQUOWkJFR12tA= +go.opentelemetry.io/collector/processor/processortest v0.125.0/go.mod h1:VAw0IRG35cWTBjBtreXeXJEgqkRegfjrH/EuLhNX2+I= +go.opentelemetry.io/collector/processor/xprocessor v0.125.0 h1:VWYPMW1VmDq6xB7M5SYjBpQCCIq3MhQ3W++wU47QpZM= +go.opentelemetry.io/collector/processor/xprocessor v0.125.0/go.mod h1:bCxUyFVlksANg8wjYZqWVsRB33lkLQ294rTrju/IZiM= +go.opentelemetry.io/collector/semconv v0.125.0 h1:SyRP617YGvNSWRSKMy7Lbk9RaJSR+qFAAfyxJOeZe4s= +go.opentelemetry.io/collector/semconv v0.125.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= +go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpoRF40MlwNCA+Oo93kXU= +go.opentelemetry.io/contrib/bridges/otelzap v0.10.0/go.mod h1:oTTm4g7NEtHSV2i/0FeVdPaPgUIZPfQkFbq0vbzqnv0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y= +go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -271,17 +373,21 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4= +golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -289,27 +395,32 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -317,17 +428,18 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -337,6 +449,8 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -344,26 +458,30 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= -google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.254.0 h1:jl3XrGj7lRjnlUvZAbAdhINTLbsg5dbjmR90+pTQvt4= +google.golang.org/api v0.254.0/go.mod h1:5BkSURm3D9kAqjGvBNgf0EcbX6Rnrf6UArKkwBzAyqQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= -google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c h1:AtEkQdl5b6zsybXcbz00j1LwNodDuH6hVifIaNqk7NQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c/go.mod h1:ea2MjsO70ssTfCjiwHgI0ZFqcw45Ksuk2ckf9G468GA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -373,15 +491,17 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 h1:3Cb46zyKIlEWac21tvDF2O4KyMlOHQxrQkyiaUpdwM0= -gopkg.in/DataDog/dd-trace-go.v1 v1.67.0/go.mod h1:6DdiJPKOeJfZyd/IUGCAd5elY8qPGkztK6wbYYsMjag= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/DataDog/dd-trace-go.v1 v1.74.8 h1:h96ji92t9eXbPvSWhJ+lrPWetHiQNYlt48JKRO09NFA= +gopkg.in/DataDog/dd-trace-go.v1 v1.74.8/go.mod h1:LpHbtHsCZBlm1HWrlVOUQcEXwMWZnU6yMvmtd1GvSDI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -391,11 +511,5 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw= -modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= -modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= -modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= -modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= -modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= -modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= -modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index ea6df0ca..4deca443 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,218 @@ +## [0.17.0](https://github.com/googleapis/google-cloud-go/releases/tag/auth%2Fv0.17.0) (2025-10-02) + +### Features + +* Add trust boundary support for service accounts and impersonation (HTTP/gRPC) (#11870) ([5c2b665](https://github.com/googleapis/google-cloud-go/commit/5c2b665f392e6dd90192f107188720aa1357e7da)) +* add trust boundary support for external accounts (#12864) ([a67a146](https://github.com/googleapis/google-cloud-go/commit/a67a146a6a88a6f1ba10c409dfce8015ecd60a64)) + # Changelog +## [0.16.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.4...auth/v0.16.5) (2025-08-14) + + +### Bug Fixes + +* **auth:** Improve error message for unknown credentials type ([#12673](https://github.com/googleapis/google-cloud-go/issues/12673)) ([558b164](https://github.com/googleapis/google-cloud-go/commit/558b16429f621276694405fa5f2091199f2d4c4d)) +* **auth:** Set Content-Type in userTokenProvider.exchangeToken ([#12634](https://github.com/googleapis/google-cloud-go/issues/12634)) ([1197ebc](https://github.com/googleapis/google-cloud-go/commit/1197ebcbca491f8c610da732c7361c90bc6f46d0)) + +## [0.16.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.3...auth/v0.16.4) (2025-08-06) + + +### Bug Fixes + +* **auth:** Add UseDefaultClient: true to metadata.Options ([#12666](https://github.com/googleapis/google-cloud-go/issues/12666)) ([1482191](https://github.com/googleapis/google-cloud-go/commit/1482191e88236693efef68769752638281566766)), refs [#11078](https://github.com/googleapis/google-cloud-go/issues/11078) [#12657](https://github.com/googleapis/google-cloud-go/issues/12657) + +## [0.16.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.2...auth/v0.16.3) (2025-07-17) + + +### Bug Fixes + +* **auth:** Fix race condition in cachedTokenProvider.tokenAsync ([#12586](https://github.com/googleapis/google-cloud-go/issues/12586)) ([73867cc](https://github.com/googleapis/google-cloud-go/commit/73867ccc1e9808d65361bcfc0776bd95fe34dbb3)) + +## [0.16.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.1...auth/v0.16.2) (2025-06-04) + + +### Bug Fixes + +* **auth:** Add back DirectPath misconfiguration logging ([#11162](https://github.com/googleapis/google-cloud-go/issues/11162)) ([8d52da5](https://github.com/googleapis/google-cloud-go/commit/8d52da58da5a0ed77a0f6307d1b561bc045406a1)) +* **auth:** Remove s2a fallback option ([#12354](https://github.com/googleapis/google-cloud-go/issues/12354)) ([d5acc59](https://github.com/googleapis/google-cloud-go/commit/d5acc599cd775ddc404349e75906fa02e8ff133e)) + +## [0.16.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.0...auth/v0.16.1) (2025-04-23) + + +### Bug Fixes + +* **auth:** Clone detectopts before assigning TokenBindingType ([#11881](https://github.com/googleapis/google-cloud-go/issues/11881)) ([2167b02](https://github.com/googleapis/google-cloud-go/commit/2167b020fdc43b517c2b6ecca264a10e357ea035)) + +## [0.16.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.15.0...auth/v0.16.0) (2025-04-14) + + +### Features + +* **auth/credentials:** Return X.509 certificate chain as subject token ([#11948](https://github.com/googleapis/google-cloud-go/issues/11948)) ([d445a3f](https://github.com/googleapis/google-cloud-go/commit/d445a3f66272ffd5c39c4939af9bebad4582631c)), refs [#11757](https://github.com/googleapis/google-cloud-go/issues/11757) +* **auth:** Configure DirectPath bound credentials from AllowedHardBoundTokens ([#11665](https://github.com/googleapis/google-cloud-go/issues/11665)) ([0fc40bc](https://github.com/googleapis/google-cloud-go/commit/0fc40bcf4e4673704df0973e9fa65957395d7bb4)) + + +### Bug Fixes + +* **auth:** Allow non-default SA credentials for DP ([#11828](https://github.com/googleapis/google-cloud-go/issues/11828)) ([3a996b4](https://github.com/googleapis/google-cloud-go/commit/3a996b4129e6d0a34dfda6671f535d5aefb26a82)) +* **auth:** Restore calling DialContext ([#11930](https://github.com/googleapis/google-cloud-go/issues/11930)) ([9ec9a29](https://github.com/googleapis/google-cloud-go/commit/9ec9a29494e93197edbaf45aba28984801e9770a)), refs [#11118](https://github.com/googleapis/google-cloud-go/issues/11118) + +## [0.15.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.1...auth/v0.15.0) (2025-02-19) + + +### Features + +* **auth:** Add hard-bound token request to compute token provider. ([#11588](https://github.com/googleapis/google-cloud-go/issues/11588)) ([0e608bb](https://github.com/googleapis/google-cloud-go/commit/0e608bb5ac3d694c8ad36ca4340071d3a2c78699)) + +## [0.14.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.0...auth/v0.14.1) (2025-01-24) + + +### Documentation + +* **auth:** Add warning about externally-provided credentials ([#11462](https://github.com/googleapis/google-cloud-go/issues/11462)) ([49fb6ff](https://github.com/googleapis/google-cloud-go/commit/49fb6ff4d754895f82c9c4d502fc7547d3b5a941)) + +## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08) + + +### Features + +* **auth:** Add universe domain support to idtoken ([#11059](https://github.com/googleapis/google-cloud-go/issues/11059)) ([72add7e](https://github.com/googleapis/google-cloud-go/commit/72add7e9f8f455af695e8ef79212a4bd3122fb3a)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) +* **auth:** Fix copy of delegates in impersonate.NewIDTokenCredentials ([#11386](https://github.com/googleapis/google-cloud-go/issues/11386)) ([ff7ef8e](https://github.com/googleapis/google-cloud-go/commit/ff7ef8e7ade7171bce3e4f30ff10a2e9f6c27ca0)), refs [#11379](https://github.com/googleapis/google-cloud-go/issues/11379) +* **auth:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) + +## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13) + + +### Features + +* **auth:** Add logging support ([#11079](https://github.com/googleapis/google-cloud-go/issues/11079)) ([c80e31d](https://github.com/googleapis/google-cloud-go/commit/c80e31df5ecb33a810be3dfb9d9e27ac531aa91d)) +* **auth:** Pass logger from auth layer to metadata package ([#11288](https://github.com/googleapis/google-cloud-go/issues/11288)) ([b552efd](https://github.com/googleapis/google-cloud-go/commit/b552efd6ab34e5dfded18438e0fbfd925805614f)) + + +### Bug Fixes + +* **auth:** Check compute cred type before non-default flag for DP ([#11255](https://github.com/googleapis/google-cloud-go/issues/11255)) ([4347ca1](https://github.com/googleapis/google-cloud-go/commit/4347ca141892be8ae813399b4b437662a103bc90)) + +## [0.12.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.0...auth/v0.12.1) (2024-12-10) + + +### Bug Fixes + +* **auth:** Correct typo in link ([#11160](https://github.com/googleapis/google-cloud-go/issues/11160)) ([af6fb46](https://github.com/googleapis/google-cloud-go/commit/af6fb46d7cd694ddbe8c9d63bc4cdcd62b9fb2c1)) + +## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.11.0...auth/v0.12.0) (2024-12-04) + + +### Features + +* **auth:** Add support for providing custom certificate URL ([#11006](https://github.com/googleapis/google-cloud-go/issues/11006)) ([ebf3657](https://github.com/googleapis/google-cloud-go/commit/ebf36579724afb375d3974cf1da38f703e3b7dbc)), refs [#11005](https://github.com/googleapis/google-cloud-go/issues/11005) + + +### Bug Fixes + +* **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188) + +## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21) + + +### Features + +* **auth:** Add universe domain support to mTLS ([#11159](https://github.com/googleapis/google-cloud-go/issues/11159)) ([117748b](https://github.com/googleapis/google-cloud-go/commit/117748ba1cfd4ae62a6a4feb7e30951cb2bc9344)) + +## [0.10.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.1...auth/v0.10.2) (2024-11-12) + + +### Bug Fixes + +* **auth:** Restore use of grpc.Dial ([#11118](https://github.com/googleapis/google-cloud-go/issues/11118)) ([2456b94](https://github.com/googleapis/google-cloud-go/commit/2456b943b7b8aaabd4d8bfb7572c0f477ae0db45)), refs [#7556](https://github.com/googleapis/google-cloud-go/issues/7556) + +## [0.10.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.0...auth/v0.10.1) (2024-11-06) + + +### Bug Fixes + +* **auth:** Restore Application Default Credentials support to idtoken ([#11083](https://github.com/googleapis/google-cloud-go/issues/11083)) ([8771f2e](https://github.com/googleapis/google-cloud-go/commit/8771f2ea9807ab822083808e0678392edff3b4f2)) +* **auth:** Skip impersonate universe domain check if empty ([#11086](https://github.com/googleapis/google-cloud-go/issues/11086)) ([87159c1](https://github.com/googleapis/google-cloud-go/commit/87159c1059d4a18d1367ce62746a838a94964ab6)) + +## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30) + + +### Features + +* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b)) + +## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22) + + +### Bug Fixes + +* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844) +* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114)) + +## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09) + + +### Bug Fixes + +* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962) +* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287)) + +## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01) + + +### Bug Fixes + +* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907) + +## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30) + + +### Bug Fixes + +* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8)) + +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + +## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) + + +### Bug Fixes + +* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804) + +## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30) + + +### Bug Fixes + +* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742) +* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795) + + +### Documentation + +* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437)) + ## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22) diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md index 36de276a..6fe4f076 100644 --- a/vendor/cloud.google.com/go/auth/README.md +++ b/vendor/cloud.google.com/go/auth/README.md @@ -1,4 +1,40 @@ -# auth +# Google Auth Library for Go -This module is currently EXPERIMENTAL and under active development. It is not -yet intended to be used. +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index 2eb78d7b..c6d80158 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package auth provides utilities for managing Google Cloud credentials, +// including functionality for creating, caching, and refreshing OAuth2 tokens. +// It offers customizable options for different OAuth2 flows, such as 2-legged +// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic +// token management. package auth import ( @@ -19,6 +24,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "strings" @@ -27,6 +33,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/jwt" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -130,7 +137,9 @@ func (t *Token) isEmpty() bool { } // Credentials holds Google credentials, including -// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). +// [Application Default Credentials]. +// +// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials type Credentials struct { json []byte projectID CredentialsPropertyProvider @@ -220,9 +229,7 @@ type CredentialsOptions struct { UniverseDomainProvider CredentialsPropertyProvider } -// NewCredentials returns new [Credentials] from the provided options. Most users -// will want to build this object a function from the -// [cloud.google.com/go/auth/credentials] package. +// NewCredentials returns new [Credentials] from the provided options. func NewCredentials(opts *CredentialsOptions) *Credentials { creds := &Credentials{ TokenProvider: opts.TokenProvider, @@ -235,8 +242,8 @@ func NewCredentials(opts *CredentialsOptions) *Credentials { return creds } -// CachedTokenProviderOptions provided options for configuring a -// CachedTokenProvider. +// CachedTokenProviderOptions provides options for configuring a cached +// [TokenProvider]. type CachedTokenProviderOptions struct { // DisableAutoRefresh makes the TokenProvider always return the same token, // even if it is expired. The default is false. Optional. @@ -246,7 +253,7 @@ type CachedTokenProviderOptions struct { // seconds. Optional. ExpireEarly time.Duration // DisableAsyncRefresh configures a synchronous workflow that refreshes - // stale tokens while blocking. The default is false. Optional. + // tokens in a blocking manner. The default is false. Optional. DisableAsyncRefresh bool } @@ -273,12 +280,7 @@ func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { // NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned // by the underlying provider. By default it will refresh tokens asynchronously -// (non-blocking mode) within a window that starts 3 minutes and 45 seconds -// before they expire. The asynchronous (non-blocking) refresh can be changed to -// a synchronous (blocking) refresh using the -// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry -// duration can be configured using the CachedTokenProviderOptions.ExpireEarly -// option. +// a few minutes before they expire. func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { if ctp, ok := tp.(*cachedTokenProvider); ok { return ctp @@ -321,7 +323,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err defer c.mu.Unlock() return c.cachedToken, nil case stale: - c.tokenAsync(ctx) + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) // Return the stale token immediately to not block customer requests to Cloud services. c.mu.Lock() defer c.mu.Unlock() @@ -336,13 +340,14 @@ func (c *cachedTokenProvider) tokenState() tokenState { c.mu.Lock() defer c.mu.Unlock() t := c.cachedToken + now := timeNow() if t == nil || t.Value == "" { return invalid } else if t.Expiry.IsZero() { return fresh - } else if timeNow().After(t.Expiry.Round(0)) { + } else if now.After(t.Expiry.Round(0)) { return invalid - } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) { return stale } return fresh @@ -357,9 +362,6 @@ func (c *cachedTokenProvider) tokenState() tokenState { // blocking call to Token should likely return the same error on the main goroutine. func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { fn := func() { - c.mu.Lock() - c.isRefreshRunning = true - c.mu.Unlock() t, err := c.tp.Token(ctx) c.mu.Lock() defer c.mu.Unlock() @@ -375,6 +377,7 @@ func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { c.mu.Lock() defer c.mu.Unlock() if !c.isRefreshRunning && !c.isRefreshErr { + c.isRefreshRunning = true go fn() } } @@ -480,6 +483,8 @@ type Options2LO struct { Audience string // PrivateClaims allows specifying any custom claims for the JWT. Optional. PrivateClaims map[string]interface{} + // UniverseDomain is the default service domain for a given Cloud universe. + UniverseDomain string // Client is the client to be used to make the underlying token requests. // Optional. @@ -487,6 +492,11 @@ type Options2LO struct { // UseIDToken requests that the token returned be an ID token if one is // returned from the server. Optional. UseIDToken bool + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options2LO) client() *http.Client { @@ -517,12 +527,13 @@ func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { if err := opts.validate(); err != nil { return nil, err } - return tokenProvider2LO{opts: opts, Client: opts.client()}, nil + return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil } type tokenProvider2LO struct { opts *Options2LO Client *http.Client + logger *slog.Logger } func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { @@ -557,10 +568,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) resp, body, err := internal.DoRequest(tp.Client, req) if err != nil { return nil, fmt.Errorf("auth: cannot fetch token: %w", err) } + tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index 6f70fa35..a2d5c310 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,8 +37,12 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. -func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { - return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ +func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider { + return auth.NewCachedTokenProvider(&computeProvider{ + scopes: opts.Scopes, + client: client, + tokenBindingType: opts.TokenBindingType, + }, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, DisableAsyncRefresh: opts.DisableAsyncRefresh, }) @@ -46,7 +50,9 @@ func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { // computeProvider fetches tokens from the google cloud metadata service. type computeProvider struct { - scopes []string + scopes []string + client *metadata.Client + tokenBindingType TokenBindingType } type metadataTokenResp struct { @@ -55,17 +61,27 @@ type metadataTokenResp struct { TokenType string `json:"token_type"` } -func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { +func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) { tokenURI, err := url.Parse(computeTokenURI) if err != nil { return nil, err } - if len(cs.scopes) > 0 { + hasScopes := len(cs.scopes) > 0 + if hasScopes || cs.tokenBindingType != NoBinding { v := url.Values{} - v.Set("scopes", strings.Join(cs.scopes, ",")) + if hasScopes { + v.Set("scopes", strings.Join(cs.scopes, ",")) + } + switch cs.tokenBindingType { + case MTLSHardBinding: + v.Set("transport", "mtls") + v.Set("binding-enforcement", "on") + case ALTSHardBinding: + v.Set("transport", "alts") + } tokenURI.RawQuery = v.Encode() } - tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) + tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String()) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } @@ -76,11 +92,11 @@ func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { if res.ExpiresInSec == 0 || res.AccessToken == "" { return nil, errors.New("credentials: incomplete token received from metadata") } - return &auth.Token{ + token := &auth.Token{ Value: res.AccessToken, Type: res.TokenType, Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), Metadata: computeTokenMetadata, - }, nil - + } + return token, nil } diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index cce62241..6700e33e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -19,6 +19,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "os" "time" @@ -26,7 +27,9 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/trustboundary" "cloud.google.com/go/compute/metadata" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -49,6 +52,23 @@ var ( allowOnGCECheck = true ) +// TokenBindingType specifies the type of binding used when requesting a token +// whether to request a hard-bound token using mTLS or an instance identity +// bound token using ALTS. +type TokenBindingType int + +const ( + // NoBinding specifies that requested tokens are not required to have a + // binding. This is the default option. + NoBinding TokenBindingType = iota + // MTLSHardBinding specifies that a hard-bound token should be requested + // using an mTLS with S2A channel. + MTLSHardBinding + // ALTSHardBinding specifies that an instance identity bound token should + // be requested using an ALTS channel. + ALTSHardBinding +) + // OnGCE reports whether this process is running in Google Cloud. func OnGCE() bool { // TODO(codyoss): once all libs use this auth lib move metadata check here @@ -76,6 +96,10 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if err := opts.validate(); err != nil { return nil, err } + trustBoundaryEnabled, err := trustboundary.IsEnabled() + if err != nil { + return nil, err + } if len(opts.CredentialsJSON) > 0 { return readCredentialsFileJSON(opts.CredentialsJSON, opts) } @@ -96,12 +120,30 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { } if OnGCE() { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: opts.logger(), + UseDefaultClient: true, + }) + gceUniverseDomainProvider := &internal.ComputeUniverseDomainProvider{ + MetadataClient: metadataClient, + } + + tp := computeTokenProvider(opts, metadataClient) + if trustBoundaryEnabled { + gceConfigProvider := trustboundary.NewGCEConfigProvider(gceUniverseDomainProvider) + var err error + tp, err = trustboundary.NewProvider(opts.client(), gceConfigProvider, opts.logger(), tp) + if err != nil { + return nil, fmt.Errorf("credentials: failed to initialize GCE trust boundary provider: %w", err) + } + + } return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: computeTokenProvider(opts), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() + TokenProvider: tp, + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadataClient.ProjectIDWithContext(ctx) }), - UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, + UniverseDomainProvider: gceUniverseDomainProvider, }), nil } @@ -114,6 +156,10 @@ type DetectOptions struct { // https://www.googleapis.com/auth/cloud-platform. Required if Audience is // not provided. Scopes []string + // TokenBindingType specifies the type of binding used when requesting a + // token whether to request a hard-bound token using mTLS or an instance + // identity bound token using ALTS. Optional. + TokenBindingType TokenBindingType // Audience that credentials tokens should have. Only applicable for 2LO // flows with service accounts. If specified, scopes should not be provided. Audience string @@ -142,10 +188,26 @@ type DetectOptions struct { // CredentialsFile overrides detection logic and sources a credential file // from the provided filepath. If provided, CredentialsJSON must not be. // Optional. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). CredentialsFile string // CredentialsJSON overrides detection logic and uses the JSON bytes as the // source for the credential. If provided, CredentialsFile must not be. // Optional. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). CredentialsJSON []byte // UseSelfSignedJWT directs service account based credentials to create a // self-signed JWT with the private key found in the file, skipping any @@ -158,6 +220,11 @@ type DetectOptions struct { // The default value is "googleapis.com". This option is ignored for // authentication flows that do not support universe domain. Optional. UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *DetectOptions) validate() error { @@ -193,6 +260,10 @@ func (o *DetectOptions) client() *http.Client { return internal.DefaultClient() } +func (o *DetectOptions) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { b, err := os.ReadFile(filename) if err != nil { @@ -253,6 +324,7 @@ func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { AuthURL: c.AuthURI, TokenURL: c.TokenURI, Client: opts.client(), + Logger: opts.logger(), EarlyTokenExpiry: opts.EarlyTokenRefresh, AuthHandlerOpts: handleOpts, // TODO(codyoss): refactor this out. We need to add in auto-detection diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index b426e16d..d2a04247 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -25,6 +25,7 @@ import ( "cloud.google.com/go/auth/credentials/internal/impersonate" internalauth "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/trustboundary" ) func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { @@ -33,9 +34,11 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { return nil, err } - var projectID, quotaProjectID, universeDomain string + var projectID, universeDomain string var tp auth.TokenProvider switch fileType { + case credsfile.UnknownCredType: + return nil, errors.New("credentials: unsupported unidentified file type") case credsfile.ServiceAccountKey: f, err := credsfile.ParseServiceAccount(b) if err != nil { @@ -56,7 +59,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ExternalAccountKey: f, err := credsfile.ParseExternalAccount(b) @@ -67,7 +69,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) case credsfile.ExternalAccountAuthorizedUserKey: f, err := credsfile.ParseExternalAccountAuthorizedUser(b) @@ -78,7 +79,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ImpersonatedServiceAccountKey: f, err := credsfile.ParseImpersonatedServiceAccount(b) @@ -108,9 +108,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, }), - JSON: b, - ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), - QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), }), nil } @@ -127,22 +127,44 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ - Email: f.ClientEmail, - PrivateKey: []byte(f.PrivateKey), - PrivateKeyID: f.PrivateKeyID, - Scopes: opts.scopes(), - TokenURL: f.TokenURL, - Subject: opts.Subject, - Client: opts.client(), + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: opts.scopes(), + TokenURL: f.TokenURL, + Subject: opts.Subject, + Client: opts.client(), + Logger: opts.logger(), + UniverseDomain: ud, } if opts2LO.TokenURL == "" { opts2LO.TokenURL = jwtTokenURL } - return auth.New2LOTokenProvider(opts2LO) + + tp, err := auth.New2LOTokenProvider(opts2LO) + if err != nil { + return nil, err + } + + trustBoundaryEnabled, err := trustboundary.IsEnabled() + if err != nil { + return nil, err + } + if !trustBoundaryEnabled { + return tp, nil + } + saConfig := trustboundary.NewServiceAccountConfigProvider(opts2LO.Email, opts2LO.UniverseDomain) + return trustboundary.NewProvider(opts.client(), saConfig, opts.logger(), tp) } func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) { @@ -156,6 +178,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) EarlyTokenExpiry: opts.EarlyTokenRefresh, RefreshToken: f.RefreshToken, Client: opts.client(), + Logger: opts.logger(), } return auth.New3LOTokenProvider(opts3LO) } @@ -174,12 +197,45 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions Scopes: opts.scopes(), WorkforcePoolUserProject: f.WorkforcePoolUserProject, Client: opts.client(), + Logger: opts.logger(), IsDefaultClient: opts.Client == nil, } if f.ServiceAccountImpersonation != nil { externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds } - return externalaccount.NewTokenProvider(externalOpts) + tp, err := externalaccount.NewTokenProvider(externalOpts) + if err != nil { + return nil, err + } + trustBoundaryEnabled, err := trustboundary.IsEnabled() + if err != nil { + return nil, err + } + if !trustBoundaryEnabled { + return tp, nil + } + + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + var configProvider trustboundary.ConfigProvider + + if f.ServiceAccountImpersonationURL == "" { + // No impersonation, this is a direct external account credential. + // The trust boundary is based on the workload/workforce pool. + var err error + configProvider, err = trustboundary.NewExternalAccountConfigProvider(f.Audience, ud) + if err != nil { + return nil, err + } + } else { + // Impersonation is used. The trust boundary is based on the target service account. + targetSAEmail, err := impersonate.ExtractServiceAccountEmail(f.ServiceAccountImpersonationURL) + if err != nil { + return nil, fmt.Errorf("credentials: could not extract target service account email for trust boundary: %w", err) + } + configProvider = trustboundary.NewServiceAccountConfigProvider(targetSAEmail, ud) + } + + return trustboundary.NewProvider(opts.client(), configProvider, opts.logger(), tp) } func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) { @@ -192,8 +248,26 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU ClientSecret: f.ClientSecret, Scopes: opts.scopes(), Client: opts.client(), + Logger: opts.logger(), + } + tp, err := externalaccountuser.NewTokenProvider(externalOpts) + if err != nil { + return nil, err + } + trustBoundaryEnabled, err := trustboundary.IsEnabled() + if err != nil { + return nil, err + } + if !trustBoundaryEnabled { + return tp, nil + } + + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + configProvider, err := trustboundary.NewExternalAccountConfigProvider(f.Audience, ud) + if err != nil { + return nil, err } - return externalaccountuser.NewTokenProvider(externalOpts) + return trustboundary.NewProvider(opts.client(), configProvider, opts.logger(), tp) } func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { @@ -201,22 +275,42 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") } - tp, err := fileCredentials(f.CredSource, opts) + sourceTP, err := fileCredentials(f.CredSource, opts) if err != nil { return nil, err } - return impersonate.NewTokenProvider(&impersonate.Options{ - URL: f.ServiceAccountImpersonationURL, - Scopes: opts.scopes(), - Tp: tp, - Delegates: f.Delegates, - Client: opts.client(), - }) + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + impOpts := &impersonate.Options{ + URL: f.ServiceAccountImpersonationURL, + Scopes: opts.scopes(), + Tp: sourceTP, + Delegates: f.Delegates, + Client: opts.client(), + Logger: opts.logger(), + UniverseDomain: ud, + } + tp, err := impersonate.NewTokenProvider(impOpts) + if err != nil { + return nil, err + } + trustBoundaryEnabled, err := trustboundary.IsEnabled() + if err != nil { + return nil, err + } + if !trustBoundaryEnabled { + return tp, nil + } + targetSAEmail, err := impersonate.ExtractServiceAccountEmail(f.ServiceAccountImpersonationURL) + if err != nil { + return nil, fmt.Errorf("credentials: could not extract target service account email for trust boundary: %w", err) + } + targetSAConfig := trustboundary.NewServiceAccountConfigProvider(targetSAEmail, ud) + return trustboundary.NewProvider(opts.client(), targetSAConfig, opts.logger(), tp) } - func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { return gdch.NewTokenProvider(f, &gdch.Options{ STSAudience: opts.STSAudience, Client: opts.client(), + Logger: opts.logger(), }) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index a34f6b06..9ecd1f64 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -23,6 +23,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "os" @@ -32,6 +33,7 @@ import ( "time" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) var ( @@ -87,6 +89,7 @@ type awsSubjectProvider struct { reqOpts *RequestOptions Client *http.Client + logger *slog.Logger } func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -94,32 +97,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) if sp.RegionalCredVerificationURL == "" { sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL } - if sp.requestSigner == nil { - headers := make(map[string]string) - if sp.shouldUseMetadataServer() { - awsSessionToken, err := sp.getAWSSessionToken(ctx) - if err != nil { - return "", err - } - - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken - } - } - - awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) if err != nil { return "", err } - if sp.region, err = sp.getRegion(ctx, headers); err != nil { - return "", err - } - sp.requestSigner = &awsRequestSigner{ - RegionName: sp.region, - AwsSecurityCredentials: awsSecurityCredentials, + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken } } + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) @@ -194,10 +195,12 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e } req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) + sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) } @@ -227,10 +230,12 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string] for name, value := range headers { req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) } @@ -285,10 +290,12 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context for name, value := range headers { req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return result, err } + sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) } @@ -310,10 +317,12 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index 112186a9..f4f49f17 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "regexp" "strconv" @@ -28,6 +29,7 @@ import ( "cloud.google.com/go/auth/credentials/internal/impersonate" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -104,6 +106,11 @@ type Options struct { // This is important for X509 credentials which should create a new client if the default was used // but should respect a client explicitly passed in by the user. IsDefaultClient bool + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } // SubjectTokenProvider can be used to supply a subject token to exchange for a @@ -224,6 +231,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { return nil, err } opts.resolveTokenURL() + logger := internallog.New(opts.Logger) stp, err := newSubjectTokenProvider(opts) if err != nil { return nil, err @@ -238,6 +246,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { client: client, opts: opts, stp: stp, + logger: logger, } if opts.ServiceAccountImpersonationURL == "" { @@ -254,6 +263,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { Scopes: scopes, Tp: auth.NewCachedTokenProvider(tp, nil), TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, + Logger: logger, }) if err != nil { return nil, err @@ -269,6 +279,7 @@ type subjectTokenProvider interface { // tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. type tokenProvider struct { client *http.Client + logger *slog.Logger opts *Options stp subjectTokenProvider } @@ -310,6 +321,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { Authentication: clientAuth, Headers: header, ExtraOpts: options, + Logger: tp.logger, }) if err != nil { return nil, err @@ -330,12 +342,14 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { // newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a // subjectTokenProvider func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { + logger := internallog.New(o.Logger) reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} if o.AwsSecurityCredentialsProvider != nil { return &awsSubjectProvider{ securityCredentialsProvider: o.AwsSecurityCredentialsProvider, TargetResource: o.Audience, reqOpts: reqOpts, + logger: logger, }, nil } else if o.SubjectTokenProvider != nil { return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil @@ -352,6 +366,7 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { CredVerificationURL: o.CredentialSource.URL, TargetResource: o.Audience, Client: o.Client, + logger: logger, } if o.CredentialSource.IMDSv2SessionTokenURL != "" { awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL @@ -362,7 +377,13 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { } else if o.CredentialSource.File != "" { return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil } else if o.CredentialSource.URL != "" { - return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil + return &urlSubjectProvider{ + URL: o.CredentialSource.URL, + Headers: o.CredentialSource.Headers, + Format: o.CredentialSource.Format, + Client: o.Client, + Logger: logger, + }, nil } else if o.CredentialSource.Executable != nil { ec := o.CredentialSource.Executable if ec.Command == "" { @@ -392,7 +413,10 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") } - return &x509Provider{}, nil + return &x509Provider{ + TrustChainPath: o.CredentialSource.Certificate.TrustChainPath, + ConfigFilePath: o.CredentialSource.Certificate.CertificateConfigLocation, + }, nil } return nil, errors.New("credentials: unable to parse credential source") } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index 0a020599..754ecf4f 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -19,10 +19,12 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -38,6 +40,7 @@ type urlSubjectProvider struct { Headers map[string]string Format *credsfile.Format Client *http.Client + Logger *slog.Logger } func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -49,10 +52,12 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) for key, val := range sp.Headers { req.Header.Add(key, val) } + sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) } + sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return "", fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go index 115df588..d86ca593 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -17,27 +17,184 @@ package externalaccount import ( "context" "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/fs" "net/http" + "os" + "strings" "time" "cloud.google.com/go/auth/internal/transport/cert" ) -// x509Provider implements the subjectTokenProvider type for -// x509 workload identity credentials. Because x509 credentials -// rely on an mTLS connection to represent the 3rd party identity -// rather than a subject token, this provider will always return -// an empty string when a subject token is requested by the external account -// token provider. +// x509Provider implements the subjectTokenProvider type for x509 workload +// identity credentials. This provider retrieves and formats a JSON array +// containing the leaf certificate and trust chain (if provided) as +// base64-encoded strings. This JSON array serves as the subject token for +// mTLS authentication. type x509Provider struct { + // TrustChainPath is the path to the file containing the trust chain certificates. + // The file should contain one or more PEM-encoded certificates. + TrustChainPath string + // ConfigFilePath is the path to the configuration file containing the path + // to the leaf certificate file. + ConfigFilePath string } +const pemCertificateHeader = "-----BEGIN CERTIFICATE-----" + func (xp *x509Provider) providerType() string { return x509ProviderType } -func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { - return "", nil +// loadLeafCertificate loads and parses the leaf certificate from the specified +// configuration file. It retrieves the certificate path from the config file, +// reads the certificate file, and parses the certificate data. +func loadLeafCertificate(configFilePath string) (*x509.Certificate, error) { + // Get the path to the certificate file from the configuration file. + path, err := cert.GetCertificatePath(configFilePath) + if err != nil { + return nil, fmt.Errorf("failed to get certificate path from config file: %w", err) + } + leafCertBytes, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read leaf certificate file: %w", err) + } + // Parse the certificate bytes. + return parseCertificate(leafCertBytes) +} + +// encodeCert encodes a x509.Certificate to a base64 string. +func encodeCert(cert *x509.Certificate) string { + // cert.Raw contains the raw DER-encoded certificate. Encode the raw certificate bytes to base64. + return base64.StdEncoding.EncodeToString(cert.Raw) +} + +// parseCertificate parses a PEM-encoded certificate from the given byte slice. +func parseCertificate(certData []byte) (*x509.Certificate, error) { + if len(certData) == 0 { + return nil, errors.New("invalid certificate data: empty input") + } + // Decode the PEM-encoded data. + block, _ := pem.Decode(certData) + if block == nil { + return nil, errors.New("invalid PEM-encoded certificate data: no PEM block found") + } + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("invalid PEM-encoded certificate data: expected CERTIFICATE block type, got %s", block.Type) + } + // Parse the DER-encoded certificate. + certificate, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %w", err) + } + return certificate, nil +} + +// readTrustChain reads a file of PEM-encoded X.509 certificates and returns a slice of parsed certificates. +// It splits the file content into PEM certificate blocks and parses each one. +func readTrustChain(trustChainPath string) ([]*x509.Certificate, error) { + certificateTrustChain := []*x509.Certificate{} + + // If no trust chain path is provided, return an empty slice. + if trustChainPath == "" { + return certificateTrustChain, nil + } + + // Read the trust chain file. + trustChainData, err := os.ReadFile(trustChainPath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, fmt.Errorf("trust chain file not found: %w", err) + } + return nil, fmt.Errorf("failed to read trust chain file: %w", err) + } + + // Split the file content into PEM certificate blocks. + certBlocks := strings.Split(string(trustChainData), pemCertificateHeader) + + // Iterate over each certificate block. + for _, certBlock := range certBlocks { + // Trim whitespace from the block. + certBlock = strings.TrimSpace(certBlock) + + if certBlock != "" { + // Add the PEM header to the block. + certData := pemCertificateHeader + "\n" + certBlock + + // Parse the certificate data. + cert, err := parseCertificate([]byte(certData)) + if err != nil { + return nil, fmt.Errorf("error parsing certificate from trust chain file: %w", err) + } + + // Append the certificate to the trust chain. + certificateTrustChain = append(certificateTrustChain, cert) + } + } + + return certificateTrustChain, nil +} + +// subjectToken retrieves the X.509 subject token. It loads the leaf +// certificate and, if a trust chain path is configured, the trust chain +// certificates. It then constructs a JSON array containing the base64-encoded +// leaf certificate and each base64-encoded certificate in the trust chain. +// The leaf certificate must be at the top of the trust chain file. This JSON +// array is used as the subject token for mTLS authentication. +func (xp *x509Provider) subjectToken(context.Context) (string, error) { + // Load the leaf certificate. + leafCert, err := loadLeafCertificate(xp.ConfigFilePath) + if err != nil { + return "", fmt.Errorf("failed to load leaf certificate: %w", err) + } + + // Read the trust chain. + trustChain, err := readTrustChain(xp.TrustChainPath) + if err != nil { + return "", fmt.Errorf("failed to read trust chain: %w", err) + } + + // Initialize the certificate chain with the leaf certificate. + certChain := []string{encodeCert(leafCert)} + + // If there is a trust chain, add certificates to the certificate chain. + if len(trustChain) > 0 { + firstCert := encodeCert(trustChain[0]) + + // If the first certificate in the trust chain is not the same as the leaf certificate, add it to the chain. + if firstCert != certChain[0] { + certChain = append(certChain, firstCert) + } + + // Iterate over the remaining certificates in the trust chain. + for i := 1; i < len(trustChain); i++ { + encoded := encodeCert(trustChain[i]) + + // Return an error if the current certificate is the same as the leaf certificate. + if encoded == certChain[0] { + return "", errors.New("the leaf certificate must be at the top of the trust chain file") + } + + // Add the current certificate to the chain. + certChain = append(certChain, encoded) + } + } + + // Convert the certificate chain to a JSON array of base64-encoded strings. + jsonChain, err := json.Marshal(certChain) + if err != nil { + return "", fmt.Errorf("failed to format certificate data: %w", err) + } + + // Return the JSON-formatted certificate chain. + return string(jsonChain), nil + } // createX509Client creates a new client that is configured with mTLS, using the diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go index 0d788547..ae39206e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -17,12 +17,14 @@ package externalaccountuser import ( "context" "errors" + "log/slog" "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) // Options stores the configuration for fetching tokens with external authorized @@ -51,6 +53,8 @@ type Options struct { // Client for token request. Client *http.Client + // Logger for logging. + Logger *slog.Logger } func (c *Options) validate() bool { @@ -90,6 +94,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { RefreshToken: opts.RefreshToken, Authentication: clientAuth, Headers: headers, + Logger: internallog.New(tp.o.Logger), }) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index 720045d3..c2d320fd 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -16,12 +16,13 @@ package gdch import ( "context" - "crypto/rsa" + "crypto" "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "os" @@ -32,6 +33,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/auth/internal/jwt" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -51,6 +53,7 @@ var ( type Options struct { STSAudience string Client *http.Client + Logger *slog.Logger } // NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a @@ -62,7 +65,7 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok if o.STSAudience == "" { return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") } - pk, err := internal.ParseKey([]byte(f.PrivateKey)) + signer, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, err } @@ -75,10 +78,11 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), tokenURL: f.TokenURL, aud: o.STSAudience, - pk: pk, + signer: signer, pkID: f.PrivateKeyID, certPool: certPool, client: o.Client, + logger: internallog.New(o.Logger), } return tp, nil } @@ -97,11 +101,12 @@ type gdchProvider struct { serviceIdentity string tokenURL string aud string - pk *rsa.PrivateKey + signer crypto.Signer pkID string certPool *x509.CertPool client *http.Client + logger *slog.Logger } func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { @@ -120,7 +125,7 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(g.pkID), } - payload, err := jwt.EncodeJWS(&h, &claims, g.pk) + payload, err := jwt.EncodeJWS(&h, &claims, g.signer) if err != nil { return nil, err } @@ -136,10 +141,12 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } + g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, &auth.Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go new file mode 100644 index 00000000..705462c1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go @@ -0,0 +1,105 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" +) + +var ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN" +) + +// IDTokenIAMOptions provides configuration for [IDTokenIAMOptions.Token]. +type IDTokenIAMOptions struct { + // Client is required. + Client *http.Client + // Logger is required. + Logger *slog.Logger + UniverseDomain auth.CredentialsPropertyProvider + ServiceAccountEmail string + GenerateIDTokenRequest +} + +// GenerateIDTokenRequest holds the request to the IAM generateIdToken RPC. +type GenerateIDTokenRequest struct { + Audience string `json:"audience"` + IncludeEmail bool `json:"includeEmail"` + // Delegates are the ordered, fully-qualified resource name for service + // accounts in a delegation chain. Each service account must be granted + // roles/iam.serviceAccountTokenCreator on the next service account in the + // chain. The delegates must have the following format: + // projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}. The - wildcard + // character is required; replacing it with a project ID is invalid. + // Optional. + Delegates []string `json:"delegates,omitempty"` +} + +// GenerateIDTokenResponse holds the response from the IAM generateIdToken RPC. +type GenerateIDTokenResponse struct { + Token string `json:"token"` +} + +// Token call IAM generateIdToken with the configuration provided in [IDTokenIAMOptions]. +func (o IDTokenIAMOptions) Token(ctx context.Context) (*auth.Token, error) { + universeDomain, err := o.UniverseDomain.GetProperty(ctx) + if err != nil { + return nil, err + } + endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1) + url := fmt.Sprintf("%s/v1/%s:generateIdToken", endpoint, internal.FormatIAMServiceAccountResource(o.ServiceAccountEmail)) + + bodyBytes, err := json.Marshal(o.GenerateIDTokenRequest) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + o.Logger.DebugContext(ctx, "impersonated idtoken request", "request", internallog.HTTPRequest(req, bodyBytes)) + resp, body, err := internal.DoRequest(o.Client, req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err) + } + o.Logger.DebugContext(ctx, "impersonated idtoken response", "response", internallog.HTTPResponse(resp, body)) + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var tokenResp GenerateIDTokenResponse + if err := json.Unmarshal(body, &tokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) + } + return &auth.Token{ + Value: tokenResp.Token, + // Generated ID tokens are good for one hour. + Expiry: time.Now().Add(1 * time.Hour), + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go index ed53afa5..8253376e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -20,11 +20,15 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" + "regexp" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport/headers" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -32,6 +36,8 @@ const ( authHeaderKey = "Authorization" ) +var serviceAccountEmailRegex = regexp.MustCompile(`serviceAccounts/(.+?):generateAccessToken`) + // generateAccesstokenReq is used for service account impersonation type generateAccessTokenReq struct { Delegates []string `json:"delegates,omitempty"` @@ -74,6 +80,13 @@ type Options struct { // Client configures the underlying client used to make network requests // when fetching tokens. Required. Client *http.Client + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger + // UniverseDomain is the default service domain for a given Cloud universe. + UniverseDomain string } func (o *Options) validate() error { @@ -88,6 +101,7 @@ func (o *Options) validate() error { // Token performs the exchange to get a temporary service account token to allow access to GCP. func (o *Options) Token(ctx context.Context) (*auth.Token, error) { + logger := internallog.New(o.Logger) lifetime := defaultTokenLifetime if o.TokenLifetimeSeconds != 0 { lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) @@ -106,13 +120,17 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err) } req.Header.Set("Content-Type", "application/json") - if err := setAuthHeader(ctx, o.Tp, req); err != nil { + sourceToken, err := o.Tp.Token(ctx) + if err != nil { return nil, err } + headers.SetAuthHeader(sourceToken, req) + logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b)) resp, body, err := internal.DoRequest(o.Client, req) if err != nil { return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) } + logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } @@ -125,22 +143,26 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { if err != nil { return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err) } - return &auth.Token{ + token := &auth.Token{ Value: accessTokenResp.AccessToken, Expiry: expiry, Type: internal.TokenTypeBearer, - }, nil + } + return token, nil } -func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error { - t, err := tp.Token(ctx) - if err != nil { - return err - } - typ := t.Type - if typ == "" { - typ = internal.TokenTypeBearer +// ExtractServiceAccountEmail extracts the service account email from the impersonation URL. +// The impersonation URL is expected to be in the format: +// https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}:generateAccessToken +// or +// https://iamcredentials.googleapis.com/v1/projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}:generateAccessToken +// Returns an error if the email cannot be extracted. +func ExtractServiceAccountEmail(impersonationURL string) (string, error) { + matches := serviceAccountEmailRegex.FindStringSubmatch(impersonationURL) + + if len(matches) < 2 { + return "", fmt.Errorf("credentials: invalid impersonation URL format: %s", impersonationURL) } - r.Header.Set(authHeaderKey, typ+" "+t.Value) - return nil + + return matches[1], nil } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go index 768a9daf..e1d2b150 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "log/slog" "net/http" "net/url" "strconv" @@ -26,6 +27,7 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -40,6 +42,7 @@ const ( // Options stores the configuration for making an sts exchange request. type Options struct { Client *http.Client + Logger *slog.Logger Endpoint string Request *TokenRequest Authentication ClientAuthentication @@ -80,6 +83,7 @@ func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { opts.Authentication.InjectAuthentication(data, opts.Headers) encodedData := data.Encode() + logger := internallog.New(opts.Logger) req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) if err != nil { @@ -93,10 +97,12 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo } req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) + logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData))) resp, body, err := internal.DoRequest(opts.Client, req) if err != nil { return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) } + logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4..8d335cce 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -16,8 +16,10 @@ package credentials import ( "context" - "crypto/rsa" + "crypto" + "errors" "fmt" + "log/slog" "strings" "time" @@ -35,7 +37,10 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { - pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } + signer, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) } @@ -43,8 +48,9 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions email: f.ClientEmail, audience: opts.Audience, scopes: opts.scopes(), - pk: pk, + signer: signer, pkID: f.PrivateKeyID, + logger: opts.logger(), }, nil } @@ -52,8 +58,9 @@ type selfSignedTokenProvider struct { email string audience string scopes []string - pk *rsa.PrivateKey + signer crypto.Signer pkID string + logger *slog.Logger } func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { @@ -73,9 +80,10 @@ func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(tp.pkID), } - msg, err := jwt.EncodeJWS(h, c, tp.pk) + tok, err := jwt.EncodeJWS(h, c, tp.signer) if err != nil { return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) } - return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil + tp.logger.Debug("created self-signed JWT", "token", tok) + return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index efc91c2b..69d6d003 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -20,13 +20,18 @@ import ( "os" "strconv" "strings" + "time" "cloud.google.com/go/auth" - "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal/compute" + "golang.org/x/time/rate" "google.golang.org/grpc" grpcgoogle "google.golang.org/grpc/credentials/google" ) +var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} + func isDirectPathEnabled(endpoint string, opts *Options) bool { if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath { return false @@ -55,7 +60,7 @@ func checkDirectPathEndPoint(endpoint string) bool { return true } -func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool { +func isTokenProviderComputeEngine(tp auth.TokenProvider) bool { if tp == nil { return false } @@ -75,6 +80,16 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool return true } +func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool { + if tp == nil { + return false + } + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } + return isTokenProviderComputeEngine(tp) +} + func isDirectPathXdsUsed(o *Options) bool { // Method 1: Enable DirectPath xDS by env; if b, _ := strconv.ParseBool(os.Getenv(enableDirectPathXdsEnvVar)); b { @@ -87,14 +102,36 @@ func isDirectPathXdsUsed(o *Options) bool { return false } +func isDirectPathBoundTokenEnabled(opts *InternalOptions) bool { + for _, ev := range opts.AllowHardBoundTokens { + if ev == "ALTS" { + return true + } + } + return false +} + // configureDirectPath returns some dial options and an endpoint to use if the // configuration allows the use of direct path. If it does not the provided // grpcOpts and endpoint are returned. -func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { - if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) { +func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string, error) { + logRateLimiter.Do(func() { + logDirectPathMisconfig(endpoint, creds, opts) + }) + if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. + defaultCredetialsOptions := grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}} + if isDirectPathBoundTokenEnabled(opts.InternalOptions) && isTokenProviderComputeEngine(creds) { + optsClone := opts.resolveDetectOptions() + optsClone.TokenBindingType = credentials.ALTSHardBinding + altsCreds, err := credentials.DetectDefault(optsClone) + if err != nil { + return nil, "", err + } + defaultCredetialsOptions.ALTSPerRPCCreds = &grpcCredentialsProvider{creds: altsCreds} + } grpcOpts = []grpc.DialOption{ - grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} + grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(defaultCredetialsOptions))} if timeoutDialerOption != nil { grpcOpts = append(grpcOpts, timeoutDialerOption) } @@ -119,5 +156,22 @@ func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint str } // TODO: add support for system parameters (quota project, request reason) via chained interceptor. } - return grpcOpts, endpoint + return grpcOpts, endpoint, nil +} + +func logDirectPathMisconfig(endpoint string, creds *auth.Credentials, o *Options) { + + // Case 1: does not enable DirectPath + if !isDirectPathEnabled(endpoint, o) { + o.logger().Warn("DirectPath is disabled. To enable, please set the EnableDirectPath option along with the EnableDirectPathXds option.") + } else { + // Case 2: credential is not correctly set + if !isTokenProviderDirectPathCompatible(creds, o) { + o.logger().Warn("DirectPath is disabled. Please make sure the token source is fetched from GCE metadata server and the default service account is used.") + } + // Case 3: not running on GCE + if !compute.OnComputeEngine() { + o.logger().Warn("DirectPath is disabled. DirectPath is only available in a GCE environment.") + } + } } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 0442a593..6bcd3ef5 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package grpctransport provides functionality for managing gRPC client +// connections to Google Cloud services. package grpctransport import ( @@ -19,16 +21,22 @@ import ( "crypto/tls" "errors" "fmt" + "log/slog" "net/http" + "os" + "sync" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" - "go.opencensus.io/plugin/ocgrpc" + "cloud.google.com/go/auth/internal/transport/headers" + "github.com/googleapis/gax-go/v2/internallog" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/stats" ) const ( @@ -38,7 +46,7 @@ const ( // Check env to decide if using google-c2p resolver for DirectPath traffic. enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) var ( @@ -46,6 +54,27 @@ var ( timeoutDialerOption grpc.DialOption ) +// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across +// all dial connections to avoid the memory leak documented in +// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 +// +// TODO: When this module depends on a version of otelgrpc containing the fix, +// replace this singleton with inline usage for simplicity. +// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797. +var ( + initOtelStatsHandlerOnce sync.Once + otelStatsHandler stats.Handler +) + +// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all +// dial connections. +func otelGRPCStatsHandler() stats.Handler { + initOtelStatsHandlerOnce.Do(func() { + otelStatsHandler = otelgrpc.NewClientHandler() + }) + return otelStatsHandler +} + // ClientCertProvider is a function that returns a TLS client certificate to be // used when opening TLS connections. It follows the same semantics as // [crypto/tls.Config.GetClientCertificate]. @@ -90,6 +119,11 @@ type Options struct { // APIKey specifies an API key to be used as the basis for authentication. // If set DetectOpts are ignored. APIKey string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -105,6 +139,10 @@ func (o *Options) client() *http.Client { return nil } +func (o *Options) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func (o *Options) validate() error { if o == nil { return errors.New("grpctransport: opts required to be non-nil") @@ -146,6 +184,9 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = credentials.GoogleMTLSTokenURL } + if do.Logger == nil { + do.Logger = o.logger() + } return do } @@ -164,6 +205,10 @@ type InternalOptions struct { EnableDirectPathXds bool // EnableJWTWithScope specifies if scope can be used with self-signed JWT. EnableJWTWithScope bool + // AllowHardBoundTokens allows libraries to request a hard-bound token. + // Obtaining hard-bound tokens requires the connection to be established + // using either ALTS or mTLS with S2A. + AllowHardBoundTokens []string // DefaultAudience specifies a default audience to be used as the audience // field ("aud") for the JWT token authentication. DefaultAudience string @@ -214,6 +259,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, + Logger: opts.logger(), } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate @@ -221,13 +267,13 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er tOpts.EnableDirectPath = io.EnableDirectPath tOpts.EnableDirectPathXds = io.EnableDirectPathXds } - transportCreds, endpoint, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts) + transportCreds, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts) if err != nil { return nil, err } if !secure { - transportCreds = grpcinsecure.NewCredentials() + transportCreds.TransportCredentials = grpcinsecure.NewCredentials() } // Initialize gRPC dial options with transport-level security options. @@ -256,8 +302,21 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er if opts.Credentials != nil { creds = opts.Credentials } else { + // This condition is only met for non-DirectPath clients because + // TransportTypeMTLSS2A is used only when InternalOptions.EnableDirectPath + // is false. + optsClone := opts.resolveDetectOptions() + if transportCreds.TransportType == transport.TransportTypeMTLSS2A { + // Check that the client allows requesting hard-bound token for the transport type mTLS using S2A. + for _, ev := range opts.InternalOptions.AllowHardBoundTokens { + if ev == "MTLS_S2A" { + optsClone.TokenBindingType = credentials.MTLSHardBinding + break + } + } + } var err error - creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + creds, err = credentials.DetectDefault(optsClone) if err != nil { return nil, err } @@ -271,7 +330,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er if metadata == nil { metadata = make(map[string]string, 1) } - metadata[quotaProjectHeaderKey] = qp + // Don't overwrite user specified quota + if _, ok := metadata[quotaProjectHeaderKey]; !ok { + metadata[quotaProjectHeaderKey] = qp + } } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ @@ -280,18 +342,20 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er clientUniverseDomain: opts.UniverseDomain, }), ) - // Attempt Direct Path - grpcOpts, endpoint = configureDirectPath(grpcOpts, opts, endpoint, creds) + grpcOpts, transportCreds.Endpoint, err = configureDirectPath(grpcOpts, opts, transportCreds.Endpoint, creds) + if err != nil { + return nil, err + } } // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. - grpcOpts = addOCStatsHandler(grpcOpts, opts) + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) - return grpc.DialContext(ctx, endpoint, grpcOpts...) + return grpc.DialContext(ctx, transportCreds.Endpoint, grpcOpts...) } // grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. @@ -325,15 +389,23 @@ type grpcCredentialsProvider struct { clientUniverseDomain string } -// getClientUniverseDomain returns the default service domain for a given Cloud universe. -// The default value is "googleapis.com". This is the universe domain -// configured for the client, which will be compared to the universe domain -// that is separately configured for the credentials. +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (c *grpcCredentialsProvider) getClientUniverseDomain() string { - if c.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return c.clientUniverseDomain + return internal.DefaultUniverseDomain } func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { @@ -357,30 +429,20 @@ func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri .. } } metadata := make(map[string]string, len(c.metadata)+1) - setAuthMetadata(token, metadata) + headers.SetAuthMetadata(token, metadata) for k, v := range c.metadata { metadata[k] = v } return metadata, nil } -// setAuthMetadata uses the provided token to set the Authorization metadata. -// If the token.Type is empty, the type is assumed to be Bearer. -func setAuthMetadata(token *auth.Token, m map[string]string) { - typ := token.Type - if typ == "" { - typ = internal.TokenTypeBearer - } - m["authorization"] = typ + " " + token.Value -} - func (c *grpcCredentialsProvider) RequireTransportSecurity() bool { return c.secure } -func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { +func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { if opts.DisableTelemetry { return dialOpts } - return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) + return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler())) } diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 969c8d4d..c9126535 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -12,18 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. package httptransport import ( "crypto/tls" "errors" "fmt" + "log/slog" "net/http" "cloud.google.com/go/auth" detect "cloud.google.com/go/auth/credentials" - "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" + "cloud.google.com/go/auth/internal/transport/headers" + "github.com/googleapis/gax-go/v2/internallog" ) // ClientCertProvider is a function that returns a TLS client certificate to be @@ -67,6 +71,11 @@ type Options struct { // configured for the client, which will be compared to the universe domain // that is separately configured for the credentials. UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -99,6 +108,10 @@ func (o *Options) client() *http.Client { return nil } +func (o *Options) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func (o *Options) resolveDetectOptions() *detect.DetectOptions { io := o.InternalOptions // soft-clone these so we are not updating a ref the user holds and may reuse @@ -123,6 +136,9 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = detect.GoogleMTLSTokenURL } + if do.Logger == nil { + do.Logger = o.logger() + } return do } @@ -145,14 +161,21 @@ type InternalOptions struct { // service. DefaultScopes []string // SkipValidation bypasses validation on Options. It should only be used - // internally for clients that needs more control over their transport. + // internally for clients that need more control over their transport. SkipValidation bool + // SkipUniverseDomainValidation skips the verification that the universe + // domain configured for the client matches the universe domain configured + // for the credentials. It should only be used internally for clients that + // need more control over their transport. The default is false. + SkipUniverseDomainValidation bool } // AddAuthorizationMiddleware adds a middleware to the provided client's // transport that sets the Authorization header with the value produced by the // provided [cloud.google.com/go/auth.Credentials]. An error is returned only // if client or creds is nil. +// +// This function does not support setting a universe domain value on the client. func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { if client == nil || creds == nil { return fmt.Errorf("httptransport: client and tp must not be nil") @@ -171,7 +194,6 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er client.Transport = &authTransport{ creds: creds, base: base, - // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls. } return nil } @@ -189,6 +211,7 @@ func NewClient(opts *Options) (*http.Client, error) { ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, + Logger: opts.logger(), } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate @@ -213,12 +236,10 @@ func NewClient(opts *Options) (*http.Client, error) { }, nil } -// SetAuthHeader uses the provided token to set the Authorization header on a -// request. If the token.Type is empty, the type is assumed to be Bearer. +// SetAuthHeader uses the provided token to set the Authorization and trust +// boundary headers on an http.Request. If the token.Type is empty, the type is +// assumed to be Bearer. This is the recommended way to set authorization +// headers on a custom http.Request. func SetAuthHeader(token *auth.Token, req *http.Request) { - typ := token.Type - if typ == "" { - typ = internal.TokenTypeBearer - } - req.Header.Set("Authorization", typ+" "+token.Value) + headers.SetAuthHeader(token, req) } diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go deleted file mode 100644 index 467c477c..00000000 --- a/vendor/cloud.google.com/go/auth/httptransport/trace.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptransport - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const ( - httpHeaderMaxSize = 200 - cloudTraceHeader = `X-Cloud-Trace-Context` -) - -// asserts the httpFormat fulfills this foreign interface -var _ propagation.HTTPFormat = (*httpFormat)(nil) - -// httpFormat implements propagation.httpFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Cloud Trace. -type httpFormat struct{} - -// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. -func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(cloudTraceHeader) - // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 32) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Cloud Trace header. -func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(cloudTraceHeader, header) -} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 07eea474..3feb997c 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "net" "net/http" + "os" "time" "cloud.google.com/go/auth" @@ -26,12 +27,13 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" - "go.opencensus.io/plugin/ochttp" + "cloud.google.com/go/auth/internal/transport/headers" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) const ( - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { @@ -41,7 +43,7 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht - trans = addOCTransport(trans, opts) + trans = addOpenTelemetryTransport(trans, opts) switch { case opts.DisableAuthentication: // Do nothing. @@ -76,13 +78,21 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err if headers == nil { headers = make(map[string][]string, 1) } - headers.Set(quotaProjectHeaderKey, qp) + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } + } + var skipUD bool + if iOpts := opts.InternalOptions; iOpts != nil { + skipUD = iOpts.SkipUniverseDomainValidation } creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) trans = &authTransport{ - base: trans, - creds: creds, - clientUniverseDomain: opts.UniverseDomain, + base: trans, + creds: creds, + clientUniverseDomain: opts.UniverseDomain, + skipUniverseDomainValidation: skipUD, } } return trans, nil @@ -94,7 +104,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - trans := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() trans.MaxIdleConnsPerHost = 100 if clientCertSource != nil { @@ -155,29 +169,37 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { return rt.RoundTrip(&newReq) } -func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { +func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { if opts.DisableTelemetry { return trans } - return &ochttp.Transport{ - Base: trans, - Propagation: &httpFormat{}, - } + return otelhttp.NewTransport(trans) } type authTransport struct { - creds *auth.Credentials - base http.RoundTripper - clientUniverseDomain string + creds *auth.Credentials + base http.RoundTripper + clientUniverseDomain string + skipUniverseDomainValidation bool } -// getClientUniverseDomain returns the universe domain configured for the client. -// The default value is "googleapis.com". +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (t *authTransport) getClientUniverseDomain() string { - if t.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return t.clientUniverseDomain + return internal.DefaultUniverseDomain } // RoundTrip authorizes and authenticates the request with an @@ -197,7 +219,7 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { if err != nil { return nil, err } - if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + if !t.skipUniverseDomainValidation && token.MetadataString("auth.google.tokenSource") != "compute-metadata" { credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) if err != nil { return nil, err @@ -207,7 +229,7 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { } } req2 := req.Clone(req.Context()) - SetAuthHeader(token, req2) + headers.SetAuthHeader(token, req2) reqBodyClosed = true return t.base.RoundTrip(req2) } diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go new file mode 100644 index 00000000..05c7e8bd --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -0,0 +1,65 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "log" + "runtime" + "strings" + "sync" +) + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool +) + +// OnComputeEngine returns whether the client is running on GCE. +// +// This is a copy of the gRPC internal googlecloud.OnGCE() func at: +// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go +// The functionality is similar to the metadata.OnGCE() func at: +// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/metadata.go +// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. +// In particular, OnComputeEngine() will return false on Serverless. +func OnComputeEngine() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err) + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go new file mode 100644 index 00000000..af490bf4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go @@ -0,0 +1,22 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go new file mode 100644 index 00000000..d92178df --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go @@ -0,0 +1,23 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import "os" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go new file mode 100644 index 00000000..16be9df3 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go index 3be6e5bb..60634730 100644 --- a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -127,6 +127,7 @@ type ExecutableConfig struct { type CertificateConfig struct { UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` CertificateConfigLocation string `json:"certificate_config_location"` + TrustChainPath string `json:"trust_chain_path"` } // ServiceAccountImpersonationInfo has impersonation configuration. diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 4308345e..72a8a6b7 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -16,7 +16,7 @@ package internal import ( "context" - "crypto/rsa" + "crypto" "crypto/x509" "encoding/json" "encoding/pem" @@ -38,12 +38,21 @@ const ( // QuotaProjectEnvVar is the environment variable for setting the quota // project. QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - projectEnvVar = "GOOGLE_CLOUD_PROJECT" - maxBodySize = 1 << 20 + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 // DefaultUniverseDomain is the default value for universe domain. // Universe domain is the default service domain for a given Cloud universe. DefaultUniverseDomain = "googleapis.com" + + // TrustBoundaryNoOp is a constant indicating no trust boundary is enforced. + TrustBoundaryNoOp = "0x0" + + // TrustBoundaryDataKey is the key used to store trust boundary data in a token's metadata. + TrustBoundaryDataKey = "google.auth.trust_boundary_data" ) type clonableTransport interface { @@ -69,25 +78,27 @@ func DefaultClient() *http.Client { } // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an crypto.Signer. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { +func ParseKey(key []byte) (crypto.Signer, error) { block, _ := pem.Decode(key) if block != nil { key = block.Bytes } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) + var parsedKey crypto.PrivateKey + var err error + parsedKey, err = x509.ParsePKCS8PrivateKey(key) if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) } } - parsed, ok := parsedKey.(*rsa.PrivateKey) + parsed, ok := parsedKey.(crypto.Signer) if !ok { - return nil, errors.New("private key is invalid") + return nil, errors.New("private key is not a signer") } return parsed, nil } @@ -176,6 +187,7 @@ func (p StaticProperty) GetProperty(context.Context) (string, error) { // ComputeUniverseDomainProvider fetches the credentials universe domain from // the google cloud metadata service. type ComputeUniverseDomainProvider struct { + MetadataClient *metadata.Client universeDomainOnce sync.Once universeDomain string universeDomainErr error @@ -185,7 +197,7 @@ type ComputeUniverseDomainProvider struct { // metadata service. func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { c.universeDomainOnce.Do(func() { - c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient) }) if c.universeDomainErr != nil { return "", c.universeDomainErr @@ -194,14 +206,14 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string } // httpGetMetadataUniverseDomain is a package var for unit test substitution. -var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { +var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - return metadata.GetWithContext(ctx, "universe/universe_domain") + return client.GetWithContext(ctx, "universe/universe-domain") } -func getMetadataUniverseDomain(ctx context.Context) (string, error) { - universeDomain, err := httpGetMetadataUniverseDomain(ctx) +func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx, client) if err == nil { return universeDomain, nil } @@ -211,3 +223,62 @@ func getMetadataUniverseDomain(ctx context.Context) (string, error) { } return "", err } + +// FormatIAMServiceAccountResource sets a service account name in an IAM resource +// name. +func FormatIAMServiceAccountResource(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +// TrustBoundaryData represents the trust boundary data associated with a token. +// It contains information about the regions or environments where the token is valid. +type TrustBoundaryData struct { + // Locations is the list of locations that the token is allowed to be used in. + Locations []string + // EncodedLocations represents the locations in an encoded format. + EncodedLocations string +} + +// NewTrustBoundaryData returns a new TrustBoundaryData with the specified locations and encoded locations. +func NewTrustBoundaryData(locations []string, encodedLocations string) *TrustBoundaryData { + // Ensure consistency by treating a nil slice as an empty slice. + if locations == nil { + locations = []string{} + } + locationsCopy := make([]string, len(locations)) + copy(locationsCopy, locations) + return &TrustBoundaryData{ + Locations: locationsCopy, + EncodedLocations: encodedLocations, + } +} + +// NewNoOpTrustBoundaryData returns a new TrustBoundaryData with no restrictions. +func NewNoOpTrustBoundaryData() *TrustBoundaryData { + return &TrustBoundaryData{ + Locations: []string{}, + EncodedLocations: TrustBoundaryNoOp, + } +} + +// TrustBoundaryHeader returns the value for the x-allowed-locations header and a bool +// indicating if the header should be set. The return values are structured to +// handle three distinct states required by the backend: +// 1. Header not set: (value="", present=false) -> data is empty. +// 2. Header set to an empty string: (value="", present=true) -> data is a no-op. +// 3. Header set to a value: (value="...", present=true) -> data has locations. +func (t TrustBoundaryData) TrustBoundaryHeader() (value string, present bool) { + if t.EncodedLocations == "" { + // If the data is empty, the header should not be present. + return "", false + } + + // If data is not empty, the header should always be present. + present = true + value = "" + if t.EncodedLocations != TrustBoundaryNoOp { + value = t.EncodedLocations + } + // For a no-op, the backend requires an empty string. + return value, present +} diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go index dc28b3c3..9bd55f51 100644 --- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -111,7 +111,7 @@ func (c *Claims) encode() (string, error) { } // EncodeJWS encodes the data using the provided key as a JSON web signature. -func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { +func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) { head, err := header.encode() if err != nil { return "", err @@ -123,7 +123,7 @@ func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { ss := fmt.Sprintf("%s.%s", head, claims) h := sha256.New() h.Write([]byte(ss)) - sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/auth/internal/retry/retry.go b/vendor/cloud.google.com/go/auth/internal/retry/retry.go new file mode 100644 index 00000000..276cc4a3 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/retry/retry.go @@ -0,0 +1,117 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package retry + +import ( + "context" + "io" + "math/rand" + "net/http" + "time" +) + +const ( + maxRetryAttempts = 5 +) + +var ( + syscallRetryable = func(error) bool { return false } +) + +// defaultBackoff is basically equivalent to gax.Backoff without the need for +// the dependency. +type defaultBackoff struct { + max time.Duration + mul float64 + cur time.Duration +} + +func (b *defaultBackoff) Pause() time.Duration { + d := time.Duration(1 + rand.Int63n(int64(b.cur))) + b.cur = time.Duration(float64(b.cur) * b.mul) + if b.cur > b.max { + b.cur = b.max + } + return d +} + +// Sleep is the equivalent of gax.Sleep without the need for the dependency. +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +// New returns a new Retryer with the default backoff strategy. +func New() *Retryer { + return &Retryer{bo: &defaultBackoff{ + cur: 100 * time.Millisecond, + max: 30 * time.Second, + mul: 2, + }} +} + +type backoff interface { + Pause() time.Duration +} + +// Retryer is a retryer for HTTP requests. +type Retryer struct { + bo backoff + attempts int +} + +// Retry determines if a request should be retried. +func (r *Retryer) Retry(status int, err error) (time.Duration, bool) { + if status == http.StatusOK { + return 0, false + } + retryOk := shouldRetry(status, err) + if !retryOk { + return 0, false + } + if r.attempts == maxRetryAttempts { + return 0, false + } + r.attempts++ + return r.bo.Pause(), true +} + +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 26e037c1..14bca966 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -20,6 +20,7 @@ import ( "crypto/x509" "errors" "log" + "log/slog" "net" "net/http" "net/url" @@ -30,7 +31,6 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport/cert" "github.com/google/s2a-go" - "github.com/google/s2a-go/fallback" "google.golang.org/grpc/credentials" ) @@ -51,8 +51,14 @@ const ( mtlsMDSKey = "/run/google-mds-mtls/client.key" ) -var ( - errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") +// Type represents the type of transport used. +type Type int + +const ( + // TransportTypeUnknown represents an unknown transport type and is the default option. + TransportTypeUnknown Type = iota + // TransportTypeMTLSS2A represents the mTLS transport type using S2A. + TransportTypeMTLSS2A ) // Options is a struct that is duplicated information from the individual @@ -60,13 +66,14 @@ var ( // fields on httptransport.Options and grpctransport.Options. type Options struct { Endpoint string - DefaultMTLSEndpoint string DefaultEndpointTemplate string + DefaultMTLSEndpoint string ClientCertProvider cert.Provider Client *http.Client UniverseDomain string EnableDirectPath bool EnableDirectPathXds bool + Logger *slog.Logger } // getUniverseDomain returns the default service domain for a given Cloud @@ -94,6 +101,16 @@ func (o *Options) defaultEndpoint() string { return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1) } +// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the +// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultMTLSEndpoint() string { + if o.DefaultMTLSEndpoint == "" { + return "" + } + return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + // mergedEndpoint merges a user-provided Endpoint of format host[:port] with the // default endpoint. func (o *Options) mergedEndpoint() (string, error) { @@ -112,13 +129,20 @@ func fixScheme(baseURL string) string { return baseURL } +// GRPCTransportCredentials embeds interface TransportCredentials with additional data. +type GRPCTransportCredentials struct { + credentials.TransportCredentials + Endpoint string + TransportType Type +} + // GetGRPCTransportCredsAndEndpoint returns an instance of // [google.golang.org/grpc/credentials.TransportCredentials], and the -// corresponding endpoint to use for GRPC client. -func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) { +// corresponding endpoint and transport type to use for GRPC client. +func GetGRPCTransportCredsAndEndpoint(opts *Options) (*GRPCTransportCredentials, error) { config, err := getTransportConfig(opts) if err != nil { - return nil, "", err + return nil, err } defaultTransportCreds := credentials.NewTLS(&tls.Config{ @@ -133,32 +157,27 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) if err != nil { log.Printf("Loading MTLS MDS credentials failed: %v", err) - return defaultTransportCreds, config.endpoint, nil + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil + } } } else if config.s2aAddress != "" { s2aAddr = config.s2aAddress } else { - return defaultTransportCreds, config.endpoint, nil - } - - var fallbackOpts *s2a.FallbackOptions - // In case of S2A failure, fall back to the endpoint that would've been used without S2A. - if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { - fallbackOpts = &s2a.FallbackOptions{ - FallbackClientHandshakeFunc: fallbackHandshake, - } + return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil } s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ S2AAddress: s2aAddr, TransportCreds: transportCredsForS2A, - FallbackOpts: fallbackOpts, }) if err != nil { // Use default if we cannot initialize S2A client transport credentials. - return defaultTransportCreds, config.endpoint, nil + return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil } - return s2aTransportCreds, config.s2aMTLSEndpoint, nil + return &GRPCTransportCredentials{s2aTransportCreds, config.s2aMTLSEndpoint, TransportTypeMTLSS2A}, nil } // GetHTTPTransportConfig returns a client certificate source and a function for @@ -177,7 +196,11 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) if err != nil { log.Printf("Loading MTLS MDS credentials failed: %v", err) - return config.clientCertSource, nil, nil + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } } } else if config.s2aAddress != "" { s2aAddr = config.s2aAddress @@ -185,23 +208,9 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, return config.clientCertSource, nil, nil } - var fallbackOpts *s2a.FallbackOptions - // In case of S2A failure, fall back to the endpoint that would've been used without S2A. - if fallbackURL, err := url.Parse(config.endpoint); err == nil { - if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { - fallbackOpts = &s2a.FallbackOptions{ - FallbackDialer: &s2a.FallbackDialer{ - Dialer: fallbackDialer, - ServerAddr: fallbackServerAddr, - }, - } - } - } - dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ S2AAddress: s2aAddr, TransportCreds: transportCredsForS2A, - FallbackOpts: fallbackOpts, }) return nil, dialTLSContextFunc, nil } @@ -248,12 +257,9 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { if !shouldUseS2A(clientCertSource, opts) { return &defaultTransportConfig, nil } - if !opts.isUniverseDomainGDU() { - return nil, errUniverseNotSupportedMTLS - } - s2aAddress := GetS2AAddress() - mtlsS2AAddress := GetMTLSS2AAddress() + s2aAddress := GetS2AAddress(opts.Logger) + mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger) if s2aAddress == "" && mtlsS2AAddress == "" { return &defaultTransportConfig, nil } @@ -262,7 +268,7 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { endpoint: endpoint, s2aAddress: s2aAddress, mtlsS2AAddress: mtlsS2AAddress, - s2aMTLSEndpoint: opts.DefaultMTLSEndpoint, + s2aMTLSEndpoint: opts.defaultMTLSEndpoint(), }, nil } @@ -308,24 +314,23 @@ type transportConfig struct { // getEndpoint returns the endpoint for the service, taking into account the // user-provided endpoint override "settings.Endpoint". // -// If no endpoint override is specified, we will either return the default endpoint or -// the default mTLS endpoint if a client certificate is available. +// If no endpoint override is specified, we will either return the default +// endpoint or the default mTLS endpoint if a client certificate is available. // -// You can override the default endpoint choice (mtls vs. regular) by setting the -// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// You can override the default endpoint choice (mTLS vs. regular) by setting +// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable. // // If the endpoint override is an address (host:port) rather than full base // URL (ex. https://...), then the user-provided address will be merged into // the default endpoint. For example, WithEndpoint("myhost:8000") and -// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz" +// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return +// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS +// endpoint. func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) { if opts.Endpoint == "" { mtlsMode := getMTLSMode() if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { - if !opts.isUniverseDomainGDU() { - return "", errUniverseNotSupportedMTLS - } - return opts.DefaultMTLSEndpoint, nil + return opts.defaultMTLSEndpoint(), nil } return opts.defaultEndpoint(), nil } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go index 36651591..6c954ae1 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -16,7 +16,6 @@ package cert import ( "crypto/tls" - "errors" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -37,10 +36,9 @@ type ecpSource struct { func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, client.ErrCredUnavailable) { - return nil, errSourceUnavailable - } - return nil, err + // TODO(codyoss): once this is fixed upstream can handle this error a + // little better here. But be safe for now and assume unavailable. + return nil, errSourceUnavailable } return (&ecpSource{ diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go index 3227aba2..738cb216 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) { file, err := os.ReadFile(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Secure Connect is not supported. - return nil, errSourceUnavailable - } - return nil, err + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable } var metadata secureConnectMetadata diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index e8675bf8..b2a3be23 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -37,6 +37,36 @@ type certificateConfig struct { CertConfigs certConfigs `json:"cert_configs"` } +// getconfigFilePath determines the path to the certificate configuration file. +// It first checks for the presence of an environment variable that specifies +// the file path. If the environment variable is not set, it falls back to +// a default configuration file path. +func getconfigFilePath() string { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + return envFilePath + } + return util.GetDefaultConfigFilePath() + +} + +// GetCertificatePath retrieves the certificate file path from the provided +// configuration file. If the configFilePath is empty, it attempts to load +// the configuration from a well-known gcloud location. +// This function is exposed to allow other packages, such as the +// externalaccount package, to retrieve the certificate path without needing +// to load the entire certificate configuration. +func GetCertificatePath(configFilePath string) (string, error) { + if configFilePath == "" { + configFilePath = getconfigFilePath() + } + certFile, _, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return "", err + } + return certFile, nil +} + // NewWorkloadX509CertProvider creates a certificate source // that reads a certificate and private key file from the local file system. // This is intended to be used for workload identity federation. @@ -47,14 +77,8 @@ type certificateConfig struct { // a well-known gcloud location. func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { if configFilePath == "" { - envFilePath := util.GetConfigFilePathFromEnv() - if envFilePath != "" { - configFilePath = envFilePath - } else { - configFilePath = util.GetDefaultConfigFilePath() - } + configFilePath = getconfigFilePath() } - certFile, keyFile, err := getCertAndKeyFiles(configFilePath) if err != nil { return nil, err @@ -82,10 +106,7 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) func getCertAndKeyFiles(configFilePath string) (string, string, error) { jsonFile, err := os.Open(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - return "", "", errSourceUnavailable - } - return "", "", err + return "", "", errSourceUnavailable } byteValue, err := io.ReadAll(jsonFile) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/headers/headers.go b/vendor/cloud.google.com/go/auth/internal/transport/headers/headers.go new file mode 100644 index 00000000..5483a763 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/headers/headers.go @@ -0,0 +1,61 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package headers + +import ( + "net/http" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +// SetAuthHeader uses the provided token to set the Authorization and trust +// boundary headers on a request. If the token.Type is empty, the type is +// assumed to be Bearer. +func SetAuthHeader(token *auth.Token, req *http.Request) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + req.Header.Set("Authorization", typ+" "+token.Value) + + if headerVal, setHeader := getTrustBoundaryHeader(token); setHeader { + req.Header.Set("x-allowed-locations", headerVal) + } +} + +// SetAuthMetadata uses the provided token to set the Authorization and trust +// boundary metadata. If the token.Type is empty, the type is assumed to be +// Bearer. +func SetAuthMetadata(token *auth.Token, m map[string]string) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + m["authorization"] = typ + " " + token.Value + + if headerVal, setHeader := getTrustBoundaryHeader(token); setHeader { + m["x-allowed-locations"] = headerVal + } +} + +func getTrustBoundaryHeader(token *auth.Token) (val string, present bool) { + if data, ok := token.Metadata[internal.TrustBoundaryDataKey]; ok { + if tbd, ok := data.(internal.TrustBoundaryData); ok { + return tbd.TrustBoundaryHeader() + } + } + return "", false +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 4df73edc..a6330995 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -15,9 +15,11 @@ package transport import ( + "context" "encoding/json" "fmt" "log" + "log/slog" "os" "strconv" "sync" @@ -38,8 +40,8 @@ var ( // GetS2AAddress returns the S2A address to be reached via plaintext connection. // Returns empty string if not set or invalid. -func GetS2AAddress() string { - getMetadataMTLSAutoConfig() +func GetS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) if !mtlsConfiguration.valid() { return "" } @@ -48,8 +50,8 @@ func GetS2AAddress() string { // GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. // Returns empty string if not set or invalid. -func GetMTLSS2AAddress() string { - getMetadataMTLSAutoConfig() +func GetMTLSS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) if !mtlsConfiguration.valid() { return "" } @@ -73,22 +75,25 @@ type s2aAddresses struct { MTLSAddress string `json:"mtls_address"` } -func getMetadataMTLSAutoConfig() { +func getMetadataMTLSAutoConfig(logger *slog.Logger) { var err error mtlsOnce.Do(func() { - mtlsConfiguration, err = queryConfig() + mtlsConfiguration, err = queryConfig(logger) if err != nil { log.Printf("Getting MTLS config failed: %v", err) } }) } -var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.Get(configEndpointSuffix) +var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: logger, + }) + return metadataClient.GetWithContext(context.Background(), configEndpointSuffix) } -func queryConfig() (*mtlsConfig, error) { - resp, err := httpGetMetadataMTLSConfig() +func queryConfig(logger *slog.Logger) (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig(logger) if err != nil { return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index 718a6b17..5c8721ef 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -37,6 +37,7 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt } newDo := &credentials.DetectOptions{ // Simple types + TokenBindingType: oldDo.TokenBindingType, Audience: oldDo.Audience, Subject: oldDo.Subject, EarlyTokenRefresh: oldDo.EarlyTokenRefresh, @@ -46,9 +47,10 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt UseSelfSignedJWT: oldDo.UseSelfSignedJWT, UniverseDomain: oldDo.UniverseDomain, - // These fields are are pointer types that we just want to use exactly - // as the user set, copy the ref + // These fields are pointer types that we just want to use exactly as + // the user set, copy the ref Client: oldDo.Client, + Logger: oldDo.Logger, AuthHandlerOptions: oldDo.AuthHandlerOptions, } @@ -81,12 +83,14 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri // DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { - trans := baseTransport() + trans := BaseTransport() trans.TLSClientConfig = tlsConfig return &http.Client{Transport: trans} } -func baseTransport() *http.Transport { +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ diff --git a/vendor/cloud.google.com/go/auth/internal/trustboundary/external_accounts_config_providers.go b/vendor/cloud.google.com/go/auth/internal/trustboundary/external_accounts_config_providers.go new file mode 100644 index 00000000..8fa5600b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/trustboundary/external_accounts_config_providers.go @@ -0,0 +1,100 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trustboundary + +import ( + "context" + "fmt" + "regexp" +) + +const ( + workloadAllowedLocationsEndpoint = "https://iamcredentials.%s/v1/projects/%s/locations/global/workloadIdentityPools/%s/allowedLocations" + workforceAllowedLocationsEndpoint = "https://iamcredentials.%s/v1/locations/global/workforcePools/%s/allowedLocations" +) + +var ( + workforceAudiencePattern = regexp.MustCompile(`//iam\.([^/]+)/locations/global/workforcePools/([^/]+)`) + workloadAudiencePattern = regexp.MustCompile(`//iam\.([^/]+)/projects/([^/]+)/locations/global/workloadIdentityPools/([^/]+)`) +) + +// NewExternalAccountConfigProvider creates a new ConfigProvider for external accounts. +func NewExternalAccountConfigProvider(audience, inputUniverseDomain string) (ConfigProvider, error) { + var audienceDomain, projectNumber, poolID string + var isWorkload bool + + matches := workloadAudiencePattern.FindStringSubmatch(audience) + if len(matches) == 4 { // Expecting full match, domain, projectNumber, poolID + audienceDomain = matches[1] + projectNumber = matches[2] + poolID = matches[3] + isWorkload = true + } else { + matches = workforceAudiencePattern.FindStringSubmatch(audience) + if len(matches) == 3 { // Expecting full match, domain, poolID + audienceDomain = matches[1] + poolID = matches[2] + isWorkload = false + } else { + return nil, fmt.Errorf("trustboundary: unknown audience format: %q", audience) + } + } + + effectiveUniverseDomain := inputUniverseDomain + if effectiveUniverseDomain == "" { + effectiveUniverseDomain = audienceDomain + } else if audienceDomain != "" && effectiveUniverseDomain != audienceDomain { + return nil, fmt.Errorf("trustboundary: provided universe domain (%q) does not match domain in audience (%q)", inputUniverseDomain, audienceDomain) + } + + if isWorkload { + return &workloadIdentityPoolConfigProvider{ + projectNumber: projectNumber, + poolID: poolID, + universeDomain: effectiveUniverseDomain, + }, nil + } + return &workforcePoolConfigProvider{ + poolID: poolID, + universeDomain: effectiveUniverseDomain, + }, nil +} + +type workforcePoolConfigProvider struct { + poolID string + universeDomain string +} + +func (p *workforcePoolConfigProvider) GetTrustBoundaryEndpoint(ctx context.Context) (string, error) { + return fmt.Sprintf(workforceAllowedLocationsEndpoint, p.universeDomain, p.poolID), nil +} + +func (p *workforcePoolConfigProvider) GetUniverseDomain(ctx context.Context) (string, error) { + return p.universeDomain, nil +} + +type workloadIdentityPoolConfigProvider struct { + projectNumber string + poolID string + universeDomain string +} + +func (p *workloadIdentityPoolConfigProvider) GetTrustBoundaryEndpoint(ctx context.Context) (string, error) { + return fmt.Sprintf(workloadAllowedLocationsEndpoint, p.universeDomain, p.projectNumber, p.poolID), nil +} + +func (p *workloadIdentityPoolConfigProvider) GetUniverseDomain(ctx context.Context) (string, error) { + return p.universeDomain, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/trustboundary/trust_boundary.go b/vendor/cloud.google.com/go/auth/internal/trustboundary/trust_boundary.go new file mode 100644 index 00000000..bf898fff --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/trustboundary/trust_boundary.go @@ -0,0 +1,392 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trustboundary + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "os" + "strings" + "sync" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/retry" + "cloud.google.com/go/auth/internal/transport/headers" + "github.com/googleapis/gax-go/v2/internallog" +) + +const ( + // serviceAccountAllowedLocationsEndpoint is the URL for fetching allowed locations for a given service account email. + serviceAccountAllowedLocationsEndpoint = "https://iamcredentials.%s/v1/projects/-/serviceAccounts/%s/allowedLocations" +) + +// isEnabled wraps isTrustBoundaryEnabled with sync.OnceValues to ensure it's +// called only once. +var isEnabled = sync.OnceValues(isTrustBoundaryEnabled) + +// IsEnabled returns if the trust boundary feature is enabled and an error if +// the configuration is invalid. The underlying check is performed only once. +func IsEnabled() (bool, error) { + return isEnabled() +} + +// isTrustBoundaryEnabled checks if the trust boundary feature is enabled via +// GOOGLE_AUTH_TRUST_BOUNDARY_ENABLED environment variable. +// +// If the environment variable is not set, it is considered false. +// +// The environment variable is interpreted as a boolean with the following +// (case-insensitive) rules: +// - "true", "1" are considered true. +// - "false", "0" are considered false. +// +// Any other values will return an error. +func isTrustBoundaryEnabled() (bool, error) { + const envVar = "GOOGLE_AUTH_TRUST_BOUNDARY_ENABLED" + val, ok := os.LookupEnv(envVar) + if !ok { + return false, nil + } + val = strings.ToLower(val) + switch val { + case "true", "1": + return true, nil + case "false", "0": + return false, nil + default: + return false, fmt.Errorf(`invalid value for %s: %q. Must be one of "true", "false", "1", or "0"`, envVar, val) + } +} + +// ConfigProvider provides specific configuration for trust boundary lookups. +type ConfigProvider interface { + // GetTrustBoundaryEndpoint returns the endpoint URL for the trust boundary lookup. + GetTrustBoundaryEndpoint(ctx context.Context) (url string, err error) + // GetUniverseDomain returns the universe domain associated with the credential. + // It may return an error if the universe domain cannot be determined. + GetUniverseDomain(ctx context.Context) (string, error) +} + +// AllowedLocationsResponse is the structure of the response from the Trust Boundary API. +type AllowedLocationsResponse struct { + // Locations is the list of allowed locations. + Locations []string `json:"locations"` + // EncodedLocations is the encoded representation of the allowed locations. + EncodedLocations string `json:"encodedLocations"` +} + +// fetchTrustBoundaryData fetches the trust boundary data from the API. +func fetchTrustBoundaryData(ctx context.Context, client *http.Client, url string, token *auth.Token, logger *slog.Logger) (*internal.TrustBoundaryData, error) { + if logger == nil { + logger = slog.New(slog.NewTextHandler(io.Discard, nil)) + } + if client == nil { + return nil, errors.New("trustboundary: HTTP client is required") + } + + if url == "" { + return nil, errors.New("trustboundary: URL cannot be empty") + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("trustboundary: failed to create trust boundary request: %w", err) + } + + if token == nil || token.Value == "" { + return nil, errors.New("trustboundary: access token required for lookup API authentication") + } + headers.SetAuthHeader(token, req) + logger.DebugContext(ctx, "trust boundary request", "request", internallog.HTTPRequest(req, nil)) + + retryer := retry.New() + var response *http.Response + for { + response, err = client.Do(req) + + var statusCode int + if response != nil { + statusCode = response.StatusCode + } + pause, shouldRetry := retryer.Retry(statusCode, err) + + if !shouldRetry { + break + } + + if response != nil { + // Drain and close the body to reuse the connection + io.Copy(io.Discard, response.Body) + response.Body.Close() + } + + if err := retry.Sleep(ctx, pause); err != nil { + return nil, err + } + } + + if err != nil { + return nil, fmt.Errorf("trustboundary: failed to fetch trust boundary: %w", err) + } + defer response.Body.Close() + + body, err := io.ReadAll(response.Body) + if err != nil { + return nil, fmt.Errorf("trustboundary: failed to read trust boundary response: %w", err) + } + + logger.DebugContext(ctx, "trust boundary response", "response", internallog.HTTPResponse(response, body)) + + if response.StatusCode != http.StatusOK { + return nil, fmt.Errorf("trustboundary: trust boundary request failed with status: %s, body: %s", response.Status, string(body)) + } + + apiResponse := AllowedLocationsResponse{} + if err := json.Unmarshal(body, &apiResponse); err != nil { + return nil, fmt.Errorf("trustboundary: failed to unmarshal trust boundary response: %w", err) + } + + if apiResponse.EncodedLocations == "" { + return nil, errors.New("trustboundary: invalid API response: encodedLocations is empty") + } + + return internal.NewTrustBoundaryData(apiResponse.Locations, apiResponse.EncodedLocations), nil +} + +// serviceAccountConfig holds configuration for SA trust boundary lookups. +// It implements the ConfigProvider interface. +type serviceAccountConfig struct { + ServiceAccountEmail string + UniverseDomain string +} + +// NewServiceAccountConfigProvider creates a new config for service accounts. +func NewServiceAccountConfigProvider(saEmail, universeDomain string) ConfigProvider { + return &serviceAccountConfig{ + ServiceAccountEmail: saEmail, + UniverseDomain: universeDomain, + } +} + +// GetTrustBoundaryEndpoint returns the formatted URL for fetching allowed locations +// for the configured service account and universe domain. +func (sac *serviceAccountConfig) GetTrustBoundaryEndpoint(ctx context.Context) (url string, err error) { + if sac.ServiceAccountEmail == "" { + return "", errors.New("trustboundary: service account email cannot be empty for config") + } + ud := sac.UniverseDomain + if ud == "" { + ud = internal.DefaultUniverseDomain + } + return fmt.Sprintf(serviceAccountAllowedLocationsEndpoint, ud, sac.ServiceAccountEmail), nil +} + +// GetUniverseDomain returns the configured universe domain, defaulting to +// [internal.DefaultUniverseDomain] if not explicitly set. +func (sac *serviceAccountConfig) GetUniverseDomain(ctx context.Context) (string, error) { + if sac.UniverseDomain == "" { + return internal.DefaultUniverseDomain, nil + } + return sac.UniverseDomain, nil +} + +// DataProvider fetches and caches trust boundary Data. +// It implements the DataProvider interface and uses a ConfigProvider +// to get type-specific details for the lookup. +type DataProvider struct { + client *http.Client + configProvider ConfigProvider + data *internal.TrustBoundaryData + logger *slog.Logger + base auth.TokenProvider +} + +// NewProvider wraps the provided base [auth.TokenProvider] to create a new +// provider that injects tokens with trust boundary data. It uses the provided +// HTTP client and configProvider to fetch the data and attach it to the token's +// metadata. +func NewProvider(client *http.Client, configProvider ConfigProvider, logger *slog.Logger, base auth.TokenProvider) (*DataProvider, error) { + if client == nil { + return nil, errors.New("trustboundary: HTTP client cannot be nil for DataProvider") + } + if configProvider == nil { + return nil, errors.New("trustboundary: ConfigProvider cannot be nil for DataProvider") + } + p := &DataProvider{ + client: client, + configProvider: configProvider, + logger: internallog.New(logger), + base: base, + } + return p, nil +} + +// Token retrieves a token from the base provider and injects it with trust +// boundary data. +func (p *DataProvider) Token(ctx context.Context) (*auth.Token, error) { + // Get the original token. + token, err := p.base.Token(ctx) + if err != nil { + return nil, err + } + + tbData, err := p.GetTrustBoundaryData(ctx, token) + if err != nil { + return nil, fmt.Errorf("trustboundary: error fetching the trust boundary data: %w", err) + } + if tbData != nil { + if token.Metadata == nil { + token.Metadata = make(map[string]interface{}) + } + token.Metadata[internal.TrustBoundaryDataKey] = *tbData + } + return token, nil +} + +// GetTrustBoundaryData retrieves the trust boundary data. +// It first checks the universe domain: if it's non-default, a NoOp is returned. +// Otherwise, it checks a local cache. If the data is not cached as NoOp, +// it fetches new data from the endpoint provided by its ConfigProvider, +// using the given accessToken for authentication. Results are cached. +// If fetching fails, it returns previously cached data if available, otherwise the fetch error. +func (p *DataProvider) GetTrustBoundaryData(ctx context.Context, token *auth.Token) (*internal.TrustBoundaryData, error) { + // Check the universe domain. + uniDomain, err := p.configProvider.GetUniverseDomain(ctx) + if err != nil { + return nil, fmt.Errorf("trustboundary: error getting universe domain: %w", err) + } + if uniDomain != "" && uniDomain != internal.DefaultUniverseDomain { + if p.data == nil || p.data.EncodedLocations != internal.TrustBoundaryNoOp { + p.data = internal.NewNoOpTrustBoundaryData() + } + return p.data, nil + } + + // Check cache for a no-op result from a previous API call. + cachedData := p.data + if cachedData != nil && cachedData.EncodedLocations == internal.TrustBoundaryNoOp { + return cachedData, nil + } + + // Get the endpoint + url, err := p.configProvider.GetTrustBoundaryEndpoint(ctx) + if err != nil { + return nil, fmt.Errorf("trustboundary: error getting the lookup endpoint: %w", err) + } + + // Proceed to fetch new data. + newData, fetchErr := fetchTrustBoundaryData(ctx, p.client, url, token, p.logger) + + if fetchErr != nil { + // Fetch failed. Fallback to cachedData if available. + if cachedData != nil { + return cachedData, nil // Successful fallback + } + // No cache to fallback to. + return nil, fmt.Errorf("trustboundary: failed to fetch trust boundary data for endpoint %s and no cache available: %w", url, fetchErr) + } + + // Fetch successful. Update cache. + p.data = newData + return newData, nil +} + +// GCEConfigProvider implements ConfigProvider for GCE environments. +// It lazily fetches and caches the necessary metadata (service account email, universe domain) +// from the GCE metadata server. +type GCEConfigProvider struct { + // universeDomainProvider provides the universe domain and underlying metadata client. + universeDomainProvider *internal.ComputeUniverseDomainProvider + + // Caching for service account email + saOnce sync.Once + saEmail string + saEmailErr error + + // Caching for universe domain + udOnce sync.Once + ud string + udErr error +} + +// NewGCEConfigProvider creates a new GCEConfigProvider +// which uses the provided gceUDP to interact with the GCE metadata server. +func NewGCEConfigProvider(gceUDP *internal.ComputeUniverseDomainProvider) *GCEConfigProvider { + // The validity of gceUDP and its internal MetadataClient will be checked + // within the GetTrustBoundaryEndpoint and GetUniverseDomain methods. + return &GCEConfigProvider{ + universeDomainProvider: gceUDP, + } +} + +func (g *GCEConfigProvider) fetchSA(ctx context.Context) { + if g.universeDomainProvider == nil || g.universeDomainProvider.MetadataClient == nil { + g.saEmailErr = errors.New("trustboundary: GCEConfigProvider not properly initialized (missing ComputeUniverseDomainProvider or MetadataClient)") + return + } + mdClient := g.universeDomainProvider.MetadataClient + saEmail, err := mdClient.EmailWithContext(ctx, "default") + if err != nil { + g.saEmailErr = fmt.Errorf("trustboundary: GCE config: failed to get service account email: %w", err) + return + } + g.saEmail = saEmail +} + +func (g *GCEConfigProvider) fetchUD(ctx context.Context) { + if g.universeDomainProvider == nil || g.universeDomainProvider.MetadataClient == nil { + g.udErr = errors.New("trustboundary: GCEConfigProvider not properly initialized (missing ComputeUniverseDomainProvider or MetadataClient)") + return + } + ud, err := g.universeDomainProvider.GetProperty(ctx) + if err != nil { + g.udErr = fmt.Errorf("trustboundary: GCE config: failed to get universe domain: %w", err) + return + } + if ud == "" { + ud = internal.DefaultUniverseDomain + } + g.ud = ud +} + +// GetTrustBoundaryEndpoint constructs the trust boundary lookup URL for a GCE environment. +// It uses cached metadata (service account email, universe domain) after the first call. +func (g *GCEConfigProvider) GetTrustBoundaryEndpoint(ctx context.Context) (string, error) { + g.saOnce.Do(func() { g.fetchSA(ctx) }) + if g.saEmailErr != nil { + return "", g.saEmailErr + } + g.udOnce.Do(func() { g.fetchUD(ctx) }) + if g.udErr != nil { + return "", g.udErr + } + return fmt.Sprintf(serviceAccountAllowedLocationsEndpoint, g.ud, g.saEmail), nil +} + +// GetUniverseDomain retrieves the universe domain from the GCE metadata server. +// It uses a cached value after the first call. +func (g *GCEConfigProvider) GetUniverseDomain(ctx context.Context) (string, error) { + g.udOnce.Do(func() { g.fetchUD(ctx) }) + if g.udErr != nil { + return "", g.udErr + } + return g.ud, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/version.go b/vendor/cloud.google.com/go/auth/internal/version.go new file mode 100644 index 00000000..e2f56cf4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/version.go @@ -0,0 +1,20 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +package internal + +// Version is the current tagged release of the library. +const Version = "0.17.0" diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index 7faf6e0c..42716752 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,33 @@ # Changelog +## [0.2.8](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.7...auth/oauth2adapt/v0.2.8) (2025-03-17) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd)) + +## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) + +## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21) + + +### Bug Fixes + +* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161) + +## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30) + + +### Bug Fixes + +* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52)) + ## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go index 9835ac57..9cc33e5e 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -26,6 +26,13 @@ import ( "golang.org/x/oauth2/google" ) +const ( + oauth2TokenSourceKey = "oauth2.google.tokenSource" + oauth2ServiceAccountKey = "oauth2.google.serviceAccount" + authTokenSourceKey = "auth.google.tokenSource" + authServiceAccountKey = "auth.google.serviceAccount" +) + // TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] // into a [cloud.google.com/go/auth.TokenProvider]. func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { @@ -47,10 +54,21 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { } return nil, err } + // Preserve compute token metadata, for both types of tokens. + metadata := map[string]interface{}{} + if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok { + metadata[authTokenSourceKey] = val + metadata[oauth2TokenSourceKey] = val + } + if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok { + metadata[authServiceAccountKey] = val + metadata[oauth2ServiceAccountKey] = val + } return &auth.Token{ - Value: tok.AccessToken, - Type: tok.Type(), - Expiry: tok.Expiry, + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + Metadata: metadata, }, nil } @@ -76,11 +94,29 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { } return nil, err } - return &oauth2.Token{ + tok2 := &oauth2.Token{ AccessToken: tok.Value, TokenType: tok.Type, Expiry: tok.Expiry, - }, nil + } + // Preserve token metadata. + m := tok.Metadata + if m != nil { + // Copy map to avoid concurrent map writes error (#11161). + metadata := make(map[string]interface{}, len(m)+2) + for k, v := range m { + metadata[k] = v + } + // Append compute token metadata in converted form. + if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" { + metadata[oauth2TokenSourceKey] = val + } + if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" { + metadata[oauth2ServiceAccountKey] = val + } + tok2 = tok2.WithExtra(metadata) + } + return tok2, nil } // AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index 97a57f46..07804dc1 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "mime" "net/http" "net/url" @@ -28,6 +29,7 @@ import ( "time" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) // AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for @@ -69,6 +71,11 @@ type Options3LO struct { // AuthHandlerOpts provides a set of options for doing a // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. AuthHandlerOpts *AuthorizationHandlerOptions + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options3LO) validate() error { @@ -96,6 +103,10 @@ func (o *Options3LO) validate() error { return nil } +func (o *Options3LO) logger() *slog.Logger { + return internallog.New(o.Logger) +} + // PKCEOptions holds parameters to support PKCE. type PKCEOptions struct { // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. @@ -293,12 +304,15 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin if o.AuthStyle == StyleInHeader { req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) } + logger := o.logger() + logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) // Make request resp, body, err := internal.DoRequest(o.client(), req) if err != nil { return nil, refreshToken, err } + logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body)) failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 tokError := &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 9594e1e2..e384683c 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,75 @@ # Changes +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.8.4...compute/metadata/v0.9.0) (2025-09-24) + + +### Features + +* **compute/metadata:** Retry on HTTP 429 ([#12932](https://github.com/googleapis/google-cloud-go/issues/12932)) ([1e91f5c](https://github.com/googleapis/google-cloud-go/commit/1e91f5c07acacd38ecdd4ff3e83e092b745e0bc2)) + +## [0.8.4](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.8.3...compute/metadata/v0.8.4) (2025-09-18) + + +### Bug Fixes + +* **compute/metadata:** Set subClient for UseDefaultClient case ([#12911](https://github.com/googleapis/google-cloud-go/issues/12911)) ([9e2646b](https://github.com/googleapis/google-cloud-go/commit/9e2646b1821231183fd775bb107c062865eeaccd)) + +## [0.8.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.8.2...compute/metadata/v0.8.3) (2025-09-17) + + +### Bug Fixes + +* **compute/metadata:** Disable Client timeouts for subscription client ([#12910](https://github.com/googleapis/google-cloud-go/issues/12910)) ([187a58a](https://github.com/googleapis/google-cloud-go/commit/187a58a540494e1e8562b046325b8cad8cf7af4a)) + +## [0.8.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.8.1...compute/metadata/v0.8.2) (2025-09-17) + + +### Bug Fixes + +* **compute/metadata:** Racy test and uninitialized subClient ([#12892](https://github.com/googleapis/google-cloud-go/issues/12892)) ([4943ca2](https://github.com/googleapis/google-cloud-go/commit/4943ca2bf83908a23806247bc4252dfb440d09cc)), refs [#12888](https://github.com/googleapis/google-cloud-go/issues/12888) + +## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.8.0...compute/metadata/v0.8.1) (2025-09-16) + + +### Bug Fixes + +* **compute/metadata:** Use separate client for subscribe methods ([#12885](https://github.com/googleapis/google-cloud-go/issues/12885)) ([76b80f8](https://github.com/googleapis/google-cloud-go/commit/76b80f8df9bf9339d175407e8c15936fe1ac1c9c)) + +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.7.0...compute/metadata/v0.8.0) (2025-08-06) + + +### Features + +* **compute/metadata:** Add Options.UseDefaultClient ([#12657](https://github.com/googleapis/google-cloud-go/issues/12657)) ([1a88209](https://github.com/googleapis/google-cloud-go/commit/1a8820900f20e038291c4bb2c5284a449196e81f)), refs [#11078](https://github.com/googleapis/google-cloud-go/issues/11078) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.6.0...compute/metadata/v0.7.0) (2025-05-13) + + +### Features + +* **compute/metadata:** Allow canceling GCE detection ([#11786](https://github.com/googleapis/google-cloud-go/issues/11786)) ([78100fe](https://github.com/googleapis/google-cloud-go/commit/78100fe7e28cd30f1e10b47191ac3c9839663b64)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13) + + +### Features + +* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + ## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) diff --git a/vendor/cloud.google.com/go/compute/metadata/log.go b/vendor/cloud.google.com/go/compute/metadata/log.go new file mode 100644 index 00000000..8ec673b8 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/log.go @@ -0,0 +1,149 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" +) + +// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog +// to avoid the dependency. The compute/metadata module is used by too many +// non-client library modules that can't justify the dependency. + +// The handler returned if logging is not enabled. +type noOpHandler struct{} + +func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false +} + +func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error { + return nil +} + +func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return h +} + +func (h noOpHandler) WithGroup(_ string) slog.Handler { + return h +} + +// httpRequest returns a lazily evaluated [slog.LogValuer] for a +// [http.Request] and the associated body. +func httpRequest(req *http.Request, body []byte) slog.LogValuer { + return &request{ + req: req, + payload: body, + } +} + +type request struct { + req *http.Request + payload []byte +} + +func (r *request) LogValue() slog.Value { + if r == nil || r.req == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method)) + groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String())) + + var headerAttr []slog.Attr + for k, val := range r.req.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +// httpResponse returns a lazily evaluated [slog.LogValuer] for a +// [http.Response] and the associated body. +func httpResponse(resp *http.Response, body []byte) slog.LogValuer { + return &response{ + resp: resp, + payload: body, + } +} + +type response struct { + resp *http.Response + payload []byte +} + +func (r *response) LogValue() slog.Value { + if r == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode))) + + var headerAttr []slog.Attr + for k, val := range r.resp.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +func processPayload(payload []byte) (slog.Attr, bool) { + peekChar := payload[0] + if peekChar == '{' { + // JSON object + var m map[string]any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else if peekChar == '[' { + // JSON array + var m []any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else { + // Everything else + buf := &bytes.Buffer{} + if err := json.Compact(buf, payload); err != nil { + // Write raw payload incase of error + buf.Write(payload) + } + return slog.String("payload", buf.String()), true + } + return slog.Attr{}, false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 345080b7..6bd18916 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -22,8 +22,10 @@ package metadata // import "cloud.google.com/go/compute/metadata" import ( "context" "encoding/json" + "errors" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -60,19 +62,27 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var defaultClient = &Client{hc: newDefaultHTTPClient()} +var defaultClient = &Client{ + hc: newDefaultHTTPClient(true), + subClient: newDefaultHTTPClient(false), + logger: slog.New(noOpHandler{}), +} -func newDefaultHTTPClient() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - IdleConnTimeout: 60 * time.Second, - }, - Timeout: 5 * time.Second, +func newDefaultHTTPClient(enableTimeouts bool) *http.Client { + transport := &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + } + c := &http.Client{ + Transport: transport, } + if enableTimeouts { + transport.IdleConnTimeout = 60 * time.Second + c.Timeout = 5 * time.Second + } + return c } // NotDefinedError is returned when requested metadata is not defined. @@ -113,80 +123,18 @@ var ( // NOTE: True returned from `OnGCE` does not guarantee that the metadata server // is accessible from this process and have all the metadata defined. func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE + return OnGCEWithContext(context.Background()) } -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/googleapis/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc +// OnGCEWithContext reports whether this process is running on Google Compute Platforms. +// This function's return value is memoized for better performance. +// NOTE: True returned from `OnGCEWithContext` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. +func OnGCEWithContext(ctx context.Context) bool { + onGCEOnce.Do(func() { + onGCE = defaultClient.OnGCEWithContext(ctx) + }) + return onGCE } // Subscribe calls Client.SubscribeWithContext on the default client. @@ -409,21 +357,160 @@ func strsContains(ss []string, s string) bool { // A Client provides metadata. type Client struct { hc *http.Client + // subClient by default is a HTTP Client that is only used for subscribe + // methods that should not specify a timeout. If the user specifies a client + // this with be the same as 'hc'. + subClient *http.Client + logger *slog.Logger +} + +// Options for configuring a [Client]. +type Options struct { + // Client is the HTTP client used to make requests. Optional. + // If UseDefaultClient is true, this field is ignored. + // If this field is nil, a new default http.Client will be created. + Client *http.Client + // Logger is used to log information about HTTP request and responses. + // If not provided, nothing will be logged. Optional. + Logger *slog.Logger + // UseDefaultClient specifies that the client should use the same default + // internal http.Client that is used in functions such as GetWithContext. + // This is useful for sharing a single TCP connection pool across requests. + // The difference vs GetWithContext is the ability to use this struct + // to provide a custom logger. If this field is true, the Client + // field is ignored. + UseDefaultClient bool } // NewClient returns a Client that can be used to fetch metadata. // Returns the client that uses the specified http.Client for HTTP requests. -// If nil is specified, returns the default client. +// If nil is specified, returns the default internal Client that is +// also used in functions such as GetWithContext. This is useful for sharing +// a single TCP connection pool across requests. func NewClient(c *http.Client) *Client { if c == nil { + // Preserve original behavior for nil argument. return defaultClient } - return &Client{hc: c} + // Return a new client with a no-op logger for backward compatibility. + return &Client{hc: c, subClient: c, logger: slog.New(noOpHandler{})} +} + +// NewWithOptions returns a Client that is configured with the provided Options. +func NewWithOptions(opts *Options) *Client { + // Preserve original behavior for nil opts. + if opts == nil { + return defaultClient + } + + // Handle explicit request for the internal default http.Client. + if opts.UseDefaultClient { + logger := opts.Logger + if logger == nil { + logger = slog.New(noOpHandler{}) + } + return &Client{hc: defaultClient.hc, subClient: defaultClient.subClient, logger: logger} + } + + // Handle isolated client creation. + client := opts.Client + subClient := opts.Client + if client == nil { + client = newDefaultHTTPClient(true) + subClient = newDefaultHTTPClient(false) + } + logger := opts.Logger + if logger == nil { + logger = slog.New(noOpHandler{}) + } + return &Client{hc: client, subClient: subClient, logger: logger} +} + +// NOTE: metadataRequestStrategy is assigned to a variable for test stubbing purposes. +var metadataRequestStrategy = func(ctx context.Context, httpClient *http.Client, resc chan bool) { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := httpClient.Do(req.WithContext(ctx)) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" +} + +// NOTE: dnsRequestStrategy is assigned to a variable for test stubbing purposes. +var dnsRequestStrategy = func(ctx context.Context, resc chan bool) { + resolver := &net.Resolver{} + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) +} + +// OnGCEWithContext reports whether this process is running on Google Compute Platforms. +// NOTE: True returned from `OnGCEWithContext` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. +func (c *Client) OnGCEWithContext(ctx context.Context) bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/googleapis/google-cloud-go/issues/194 + go metadataRequestStrategy(ctx, c.hc, resc) + go dnsRequestStrategy(ctx, resc) + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + // Ensure cancellations from the calling context are respected. + waitContext, cancelWait := context.WithTimeout(ctx, 5*time.Second) + defer cancelWait() + select { + case res = <-resc: + return res + case <-waitContext.Done(): + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc } // getETag returns a value from the metadata service as well as the associated ETag. // This func is otherwise equivalent to Get. func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) { + return c.getETagWithSubClient(ctx, suffix, false) +} + +func (c *Client) getETagWithSubClient(ctx context.Context, suffix string, enableSubClient bool) (value, etag string, err error) { // Using a fixed IP makes it very difficult to spoof the metadata service in // a container, which is an important use-case for local testing of cloud // deployments. To enable spoofing of the metadata service, the environment @@ -448,14 +535,30 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string req.Header.Set("User-Agent", userAgent) var res *http.Response var reqErr error + var body []byte retryer := newRetryer() + hc := c.hc + if enableSubClient { + hc = c.subClient + } for { - res, reqErr = c.hc.Do(req) + c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil)) + res, reqErr = hc.Do(req) var code int if res != nil { code = res.StatusCode + body, err = io.ReadAll(res.Body) + if err != nil { + res.Body.Close() + return "", "", err + } + c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body)) + res.Body.Close() } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } @@ -466,18 +569,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string if reqErr != nil { return "", "", reqErr } - defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - all, err := io.ReadAll(res.Body) - if err != nil { - return "", "", err - } if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} + return "", "", &Error{Code: res.StatusCode, Message: string(body)} } - return string(all), res.Header.Get("Etag"), nil + return string(body), res.Header.Get("Etag"), nil } // Get returns a value from the metadata service. @@ -791,7 +889,7 @@ func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn fun const failedSubscribeSleep = time.Second * 5 // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(ctx, suffix) + val, lastETag, err := c.getETagWithSubClient(ctx, suffix, true) if err != nil { return err } @@ -807,8 +905,11 @@ func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn fun suffix += "?wait_for_change=true&last_etag=" } for { - val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag)) + val, etag, err := c.getETagWithSubClient(ctx, suffix+url.QueryEscape(lastETag), true) if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return err + } if _, deleted := err.(NotDefinedError); !deleted { time.Sleep(failedSubscribeSleep) continue // Retry on other errors. diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go index 3d4bc75d..d516f30f 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry.go @@ -95,6 +95,9 @@ func shouldRetry(status int, err error) bool { if 500 <= status && status <= 599 { return true } + if status == http.StatusTooManyRequests { + return true + } if err == io.ErrUnexpectedEOF { return true } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f89..2e53f012 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go index e0704fa6..d57ae1b2 100644 --- a/vendor/cloud.google.com/go/compute/metadata/syscheck.go +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go @@ -20,7 +20,9 @@ package metadata // doing network requests) suggests that we're running on GCE. If this // returns true, testOnGCE tries a bit harder to reach its metadata // server. -func systemInfoSuggestsGCE() bool { +// +// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes. +var systemInfoSuggestsGCE = func() bool { // We don't currently have checks for other GOOS return false } diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go index 74689acb..17ba5a3a 100644 --- a/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go @@ -21,8 +21,10 @@ import ( "strings" ) -func systemInfoSuggestsGCE() bool { +// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes. +var systemInfoSuggestsGCE = func() bool { b, _ := os.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(b)) return name == "Google" || name == "Google Compute Engine" } diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go index c0ce6278..f57a5b14 100644 --- a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go @@ -22,7 +22,8 @@ import ( "golang.org/x/sys/windows/registry" ) -func systemInfoSuggestsGCE() bool { +// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes. +var systemInfoSuggestsGCE = func() bool { k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE) if err != nil { return false diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 63d8364f..7839f3b8 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,86 @@ # Changes +## [1.5.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.5.1...iam/v1.5.2) (2025-04-15) + + +### Bug Fixes + +* **iam:** Update google.golang.org/api to 0.229.0 ([3319672](https://github.com/googleapis/google-cloud-go/commit/3319672f3dba84a7150772ccb5433e02dab7e201)) + +## [1.5.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.5.0...iam/v1.5.1) (2025-04-15) + + +### Documentation + +* **iam:** Formatting update for ListPolicyBindingsRequest ([dfdf404](https://github.com/googleapis/google-cloud-go/commit/dfdf404138728724aa6305c5c465ecc6fe5b1264)) +* **iam:** Minor doc update for ListPrincipalAccessBoundaryPoliciesResponse ([20f762c](https://github.com/googleapis/google-cloud-go/commit/20f762c528726a3f038d3e1f37e8a4952118badf)) +* **iam:** Minor doc update for ListPrincipalAccessBoundaryPoliciesResponse ([20f762c](https://github.com/googleapis/google-cloud-go/commit/20f762c528726a3f038d3e1f37e8a4952118badf)) + +## [1.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.2...iam/v1.5.0) (2025-03-31) + + +### Features + +* **iam:** New client(s) ([#11933](https://github.com/googleapis/google-cloud-go/issues/11933)) ([d5cb2e5](https://github.com/googleapis/google-cloud-go/commit/d5cb2e58334c6963cc46885f565fe3b19c52cb63)) + +## [1.4.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.1...iam/v1.4.2) (2025-03-13) + + +### Bug Fixes + +* **iam:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd)) + +## [1.4.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.0...iam/v1.4.1) (2025-03-06) + + +### Bug Fixes + +* **iam:** Fix out-of-sync version.go ([28f0030](https://github.com/googleapis/google-cloud-go/commit/28f00304ebb13abfd0da2f45b9b79de093cca1ec)) + +## [1.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.1...iam/v1.4.0) (2025-02-12) + + +### Features + +* **iam/admin:** Regenerate client ([#11570](https://github.com/googleapis/google-cloud-go/issues/11570)) ([eab87d7](https://github.com/googleapis/google-cloud-go/commit/eab87d73bea884c636ec88f03b9aa90102a2833f)), refs [#8219](https://github.com/googleapis/google-cloud-go/issues/8219) + +## [1.3.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.0...iam/v1.3.1) (2025-01-02) + + +### Bug Fixes + +* **iam:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) + +## [1.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.2...iam/v1.3.0) (2024-12-04) + + +### Features + +* **iam:** Add ResourcePolicyMember to google/iam/v1 ([8dedb87](https://github.com/googleapis/google-cloud-go/commit/8dedb878c070cc1e92d62bb9b32358425e3ceffb)) + +## [1.2.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.1...iam/v1.2.2) (2024-10-23) + + +### Bug Fixes + +* **iam:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e)) +* **iam:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e)) + +## [1.2.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.0...iam/v1.2.1) (2024-09-12) + + +### Bug Fixes + +* **iam:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) + +## [1.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.13...iam/v1.2.0) (2024-08-20) + + +### Features + +* **iam:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) + ## [1.1.13](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.12...iam/v1.1.13) (2024-08-08) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 619b4c4f..2b57ae3b 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc v4.25.3 // source: google/iam/v1/iam_policy.proto @@ -65,11 +65,9 @@ type SetIamPolicyRequest struct { func (x *SetIamPolicyRequest) Reset() { *x = SetIamPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SetIamPolicyRequest) String() string { @@ -80,7 +78,7 @@ func (*SetIamPolicyRequest) ProtoMessage() {} func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -132,11 +130,9 @@ type GetIamPolicyRequest struct { func (x *GetIamPolicyRequest) Reset() { *x = GetIamPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetIamPolicyRequest) String() string { @@ -147,7 +143,7 @@ func (*GetIamPolicyRequest) ProtoMessage() {} func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -194,11 +190,9 @@ type TestIamPermissionsRequest struct { func (x *TestIamPermissionsRequest) Reset() { *x = TestIamPermissionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestIamPermissionsRequest) String() string { @@ -209,7 +203,7 @@ func (*TestIamPermissionsRequest) ProtoMessage() {} func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -251,11 +245,9 @@ type TestIamPermissionsResponse struct { func (x *TestIamPermissionsResponse) Reset() { *x = TestIamPermissionsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestIamPermissionsResponse) String() string { @@ -266,7 +258,7 @@ func (*TestIamPermissionsResponse) ProtoMessage() {} func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -363,16 +355,15 @@ var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, - 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, + 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, + 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -421,56 +412,6 @@ func file_google_iam_v1_iam_policy_proto_init() { } file_google_iam_v1_options_proto_init() file_google_iam_v1_policy_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*SetIamPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*GetIamPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*TestIamPermissionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*TestIamPermissionsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index f1c1c084..745de05b 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc v4.25.3 // source: google/iam/v1/options.proto @@ -64,11 +64,9 @@ type GetPolicyOptions struct { func (x *GetPolicyOptions) Reset() { *x = GetPolicyOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_options_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_options_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetPolicyOptions) String() string { @@ -79,7 +77,7 @@ func (*GetPolicyOptions) ProtoMessage() {} func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_options_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -152,20 +150,6 @@ func file_google_iam_v1_options_proto_init() { if File_google_iam_v1_options_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GetPolicyOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index 4dda5d6d..0eba1508 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc v4.25.3 // source: google/iam/v1/policy.proto @@ -337,11 +337,9 @@ type Policy struct { func (x *Policy) Reset() { *x = Policy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Policy) String() string { @@ -352,7 +350,7 @@ func (*Policy) ProtoMessage() {} func (x *Policy) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_policy_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -462,11 +460,9 @@ type Binding struct { func (x *Binding) Reset() { *x = Binding{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Binding) String() string { @@ -477,7 +473,7 @@ func (*Binding) ProtoMessage() {} func (x *Binding) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_policy_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -579,11 +575,9 @@ type AuditConfig struct { func (x *AuditConfig) Reset() { *x = AuditConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AuditConfig) String() string { @@ -594,7 +588,7 @@ func (*AuditConfig) ProtoMessage() {} func (x *AuditConfig) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_policy_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -658,11 +652,9 @@ type AuditLogConfig struct { func (x *AuditLogConfig) Reset() { *x = AuditLogConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AuditLogConfig) String() string { @@ -673,7 +665,7 @@ func (*AuditLogConfig) ProtoMessage() {} func (x *AuditLogConfig) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_policy_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -716,11 +708,9 @@ type PolicyDelta struct { func (x *PolicyDelta) Reset() { *x = PolicyDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PolicyDelta) String() string { @@ -731,7 +721,7 @@ func (*PolicyDelta) ProtoMessage() {} func (x *PolicyDelta) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_policy_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -784,11 +774,9 @@ type BindingDelta struct { func (x *BindingDelta) Reset() { *x = BindingDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_policy_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BindingDelta) String() string { @@ -799,7 +787,7 @@ func (*BindingDelta) ProtoMessage() {} func (x *BindingDelta) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_policy_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -869,11 +857,9 @@ type AuditConfigDelta struct { func (x *AuditConfigDelta) Reset() { *x = AuditConfigDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_iam_v1_policy_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AuditConfigDelta) String() string { @@ -884,7 +870,7 @@ func (*AuditConfigDelta) ProtoMessage() {} func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message { mi := &file_google_iam_v1_policy_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1072,92 +1058,6 @@ func file_google_iam_v1_policy_proto_init() { if File_google_iam_v1_policy_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Policy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Binding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*AuditConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*AuditLogConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*PolicyDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*BindingDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*AuditConfigDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go new file mode 100644 index 00000000..c3339e26 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go @@ -0,0 +1,185 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.2 +// protoc v4.25.3 +// source: google/iam/v1/resource_policy_member.proto + +package iampb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Output-only policy member strings of a Google Cloud resource's built-in +// identity. +type ResourcePolicyMember struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // IAM policy binding member referring to a Google Cloud resource by + // user-assigned name (https://google.aip.dev/122). If a resource is deleted + // and recreated with the same name, the binding will be applicable to the new + // resource. + // + // Example: + // `principal://parametermanager.googleapis.com/projects/12345/name/locations/us-central1-a/parameters/my-parameter` + IamPolicyNamePrincipal string `protobuf:"bytes,1,opt,name=iam_policy_name_principal,json=iamPolicyNamePrincipal,proto3" json:"iam_policy_name_principal,omitempty"` + // IAM policy binding member referring to a Google Cloud resource by + // system-assigned unique identifier (https://google.aip.dev/148#uid). If a + // resource is deleted and recreated with the same name, the binding will not + // be applicable to the new resource + // + // Example: + // `principal://parametermanager.googleapis.com/projects/12345/uid/locations/us-central1-a/parameters/a918fed5` + IamPolicyUidPrincipal string `protobuf:"bytes,2,opt,name=iam_policy_uid_principal,json=iamPolicyUidPrincipal,proto3" json:"iam_policy_uid_principal,omitempty"` +} + +func (x *ResourcePolicyMember) Reset() { + *x = ResourcePolicyMember{} + mi := &file_google_iam_v1_resource_policy_member_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourcePolicyMember) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourcePolicyMember) ProtoMessage() {} + +func (x *ResourcePolicyMember) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_resource_policy_member_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourcePolicyMember.ProtoReflect.Descriptor instead. +func (*ResourcePolicyMember) Descriptor() ([]byte, []int) { + return file_google_iam_v1_resource_policy_member_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourcePolicyMember) GetIamPolicyNamePrincipal() string { + if x != nil { + return x.IamPolicyNamePrincipal + } + return "" +} + +func (x *ResourcePolicyMember) GetIamPolicyUidPrincipal() string { + if x != nil { + return x.IamPolicyUidPrincipal + } + return "" +} + +var File_google_iam_v1_resource_policy_member_proto protoreflect.FileDescriptor + +var file_google_iam_v1_resource_policy_member_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, + 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, + 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, + 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x19, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, + 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x69, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x69, 0x6e, + 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x3c, 0x0a, 0x18, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x5f, 0x75, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x15, 0x69, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, + 0x70, 0x61, 0x6c, 0x42, 0x87, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x19, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, + 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, + 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_iam_v1_resource_policy_member_proto_rawDescOnce sync.Once + file_google_iam_v1_resource_policy_member_proto_rawDescData = file_google_iam_v1_resource_policy_member_proto_rawDesc +) + +func file_google_iam_v1_resource_policy_member_proto_rawDescGZIP() []byte { + file_google_iam_v1_resource_policy_member_proto_rawDescOnce.Do(func() { + file_google_iam_v1_resource_policy_member_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_resource_policy_member_proto_rawDescData) + }) + return file_google_iam_v1_resource_policy_member_proto_rawDescData +} + +var file_google_iam_v1_resource_policy_member_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_iam_v1_resource_policy_member_proto_goTypes = []any{ + (*ResourcePolicyMember)(nil), // 0: google.iam.v1.ResourcePolicyMember +} +var file_google_iam_v1_resource_policy_member_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_resource_policy_member_proto_init() } +func file_google_iam_v1_resource_policy_member_proto_init() { + if File_google_iam_v1_resource_policy_member_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_resource_policy_member_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_resource_policy_member_proto_goTypes, + DependencyIndexes: file_google_iam_v1_resource_policy_member_proto_depIdxs, + MessageInfos: file_google_iam_v1_resource_policy_member_proto_msgTypes, + }.Build() + File_google_iam_v1_resource_policy_member_proto = out.File + file_google_iam_v1_resource_policy_member_proto_rawDesc = nil + file_google_iam_v1_resource_policy_member_proto_goTypes = nil + file_google_iam_v1_resource_policy_member_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/pubsub/CHANGES.md b/vendor/cloud.google.com/go/pubsub/CHANGES.md index 64aeb66a..398481a2 100644 --- a/vendor/cloud.google.com/go/pubsub/CHANGES.md +++ b/vendor/cloud.google.com/go/pubsub/CHANGES.md @@ -1,5 +1,161 @@ # Changes +## [1.50.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.50.0...pubsub/v1.50.1) (2025-09-04) + + +### Bug Fixes + +* **pubsub/v2:** Update flowcontrol metrics even when disabled ([#12590](https://github.com/googleapis/google-cloud-go/issues/12590)) ([c153495](https://github.com/googleapis/google-cloud-go/commit/c1534952c4a6c3a52dd9e3aab295d27d4107016c)) + + +### Documentation + +* **pubsub:** Update migration docs with seek ([#12642](https://github.com/googleapis/google-cloud-go/issues/12642)) ([40538c3](https://github.com/googleapis/google-cloud-go/commit/40538c3a8cbbd9a54deb6cdb204809d487aef21b)) + +## [1.50.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.49.0...pubsub/v1.50.0) (2025-07-28) + + +### Features + +* **pubsub/v2:** Add new v2 library ([#12218](https://github.com/googleapis/google-cloud-go/issues/12218)) ([c798f62](https://github.com/googleapis/google-cloud-go/commit/c798f62f908140686b8e2a365cccf9608fb5ab95)) + + +### Bug Fixes + +* **pubsub:** Update google.golang.org/api to 0.229.0 ([3319672](https://github.com/googleapis/google-cloud-go/commit/3319672f3dba84a7150772ccb5433e02dab7e201)) + + +### Documentation + +* **pubsub:** Add docs comment to MaxOutstandingBytes ([#12601](https://github.com/googleapis/google-cloud-go/issues/12601)) ([76ddb34](https://github.com/googleapis/google-cloud-go/commit/76ddb3498f2c986b156abaeaf7efeab1c9490725)) + +## [1.49.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.48.1...pubsub/v1.49.0) (2025-04-07) + + +### Features + +* **pubsub:** Support message transforms ([#11957](https://github.com/googleapis/google-cloud-go/issues/11957)) ([84bf25b](https://github.com/googleapis/google-cloud-go/commit/84bf25ba1c0e01b2f19167d4d3c914c56f87b924)) + +## [1.48.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.48.0...pubsub/v1.48.1) (2025-04-01) + + +### Bug Fixes + +* **pubsub/pstest:** Message ordering issue ([#11603](https://github.com/googleapis/google-cloud-go/issues/11603)) ([1d6ffc0](https://github.com/googleapis/google-cloud-go/commit/1d6ffc02cd211368eabbc8e4f02392952c603703)) +* **pubsub:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd)) + + +### Documentation + +* **pubsub:** Update documentation for JavaScriptUDF to indicate that the `message_id` metadata field is optional instead of required ([f437f08](https://github.com/googleapis/google-cloud-go/commit/f437f0871a88abbeb918ce7364d0299a513cc311)) + +## [1.48.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.47.0...pubsub/v1.48.0) (2025-03-12) + + +### Features + +* **pubsub/pstest:** Support listening on custom address ([#11606](https://github.com/googleapis/google-cloud-go/issues/11606)) ([63865a2](https://github.com/googleapis/google-cloud-go/commit/63865a202b95fe0ab283b032b15b198f10188861)) +* **pubsub:** Add support for message transforms to Topic and Subscription ([59fe58a](https://github.com/googleapis/google-cloud-go/commit/59fe58aba61abf69bfb7549c0a03b21bdb4b8b2f)) +* **pubsub:** Deprecate `enabled` field for message transforms and add `disabled` field ([dd0d1d7](https://github.com/googleapis/google-cloud-go/commit/dd0d1d7b41884c9fc9b5fe808139cccd29e1e486)) + + +### Documentation + +* **pubsub:** A comment for field `code` in message `.google.pubsub.v1.JavaScriptUDF` is changed ([#11553](https://github.com/googleapis/google-cloud-go/issues/11553)) ([678944b](https://github.com/googleapis/google-cloud-go/commit/678944b30e389781687209caf3e3b9d35739a6f0)) +* **pubsub:** Deprecate `enabled` field for message transforms and add `disabled` field ([dd0d1d7](https://github.com/googleapis/google-cloud-go/commit/dd0d1d7b41884c9fc9b5fe808139cccd29e1e486)) +* **pubsub:** Fix link for AnalyticsHubSubscriptionInfo ([59fe58a](https://github.com/googleapis/google-cloud-go/commit/59fe58aba61abf69bfb7549c0a03b21bdb4b8b2f)) + +## [1.47.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.46.0...pubsub/v1.47.0) (2025-01-31) + + +### Features + +* **pubsub:** Support new forms of topic ingestion ([#11537](https://github.com/googleapis/google-cloud-go/issues/11537)) ([46d6ed4](https://github.com/googleapis/google-cloud-go/commit/46d6ed475e6ae6b96f3e11e17496fd75fd8ea7c4)) + +## [1.46.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.45.3...pubsub/v1.46.0) (2025-01-24) + + +### Features + +* **pubsub:** Add Kafka-based sources to IngestionDataSourceSettings proto and IngestionFailureEvent proto ([e4e1a49](https://github.com/googleapis/google-cloud-go/commit/e4e1a498f21b0792e0a7662f82f9e062e5aa0fe9)) + + +### Bug Fixes + +* **pubsub:** Fix defer call in for loop ([#11175](https://github.com/googleapis/google-cloud-go/issues/11175)) ([7aec711](https://github.com/googleapis/google-cloud-go/commit/7aec711200d8e79686370f5d40915e21df7651a4)) +* **pubsub:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) + +## [1.45.3](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.45.2...pubsub/v1.45.3) (2024-12-04) + + +### Bug Fixes + +* **pubsub:** Convert stream ack deadline seconds from duration ([#11214](https://github.com/googleapis/google-cloud-go/issues/11214)) ([b2b05e4](https://github.com/googleapis/google-cloud-go/commit/b2b05e4515d8e59d7b3bf39432c6356b4450a17b)) + +## [1.45.2](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.45.1...pubsub/v1.45.2) (2024-12-03) + + +### Bug Fixes + +* **pubsub/pstest:** Make invalid filter return error instead of panic ([#11087](https://github.com/googleapis/google-cloud-go/issues/11087)) ([45e1ce7](https://github.com/googleapis/google-cloud-go/commit/45e1ce70e2757b78b868768b93e05da8858bab85)) +* **pubsub:** Only init batch span if trace enabled ([#11193](https://github.com/googleapis/google-cloud-go/issues/11193)) ([f843d50](https://github.com/googleapis/google-cloud-go/commit/f843d50f849c5014eba33d923085f0add41365a6)) +* **pubsub:** Use official semconv variable whenever possible ([#10904](https://github.com/googleapis/google-cloud-go/issues/10904)) ([1ce4b6d](https://github.com/googleapis/google-cloud-go/commit/1ce4b6dc31653ca6f28c50d5149d74b827caaeaa)) + + +### Documentation + +* **pubsub:** MinExtensionPeriod defaults to 60 seconds ([#10791](https://github.com/googleapis/google-cloud-go/issues/10791)) ([cc88fe1](https://github.com/googleapis/google-cloud-go/commit/cc88fe1c8d834903e2b5f8d3a7cc9bbd65c70b29)) + +## [1.45.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.45.0...pubsub/v1.45.1) (2024-10-24) + + +### Bug Fixes + +* **pubsub:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e)) +* **pubsub:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e)) + + +### Documentation + +* **pubsub:** Add doc links to top level package doc ([#11029](https://github.com/googleapis/google-cloud-go/issues/11029)) ([fe2ec56](https://github.com/googleapis/google-cloud-go/commit/fe2ec569029d2052885063b6fca90e1a27424b4e)) + +## [1.45.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.44.0...pubsub/v1.45.0) (2024-10-22) + + +### Features + +* **pubsub:** Add IngestionFailureEvent to the external proto ([f0b05e2](https://github.com/googleapis/google-cloud-go/commit/f0b05e260435d5e8889b9a0ca0ab215fcde169ab)) +* **pubsub:** Add support for ingestion platform logging settings ([#10969](https://github.com/googleapis/google-cloud-go/issues/10969)) ([c60241f](https://github.com/googleapis/google-cloud-go/commit/c60241f46db2b021d799f621851a352f2baec96e)) + +## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.43.0...pubsub/v1.44.0) (2024-10-08) + + +### Features + +* **pubsub:** Add ingestion Cloud Storage fields and Platform Logging fields to Topic ([7250d71](https://github.com/googleapis/google-cloud-go/commit/7250d714a638dcd5df3fbe0e91c5f1250c3f80f9)) +* **pubsub:** Add support for cloud storage ingestion topics ([#10959](https://github.com/googleapis/google-cloud-go/issues/10959)) ([1a11675](https://github.com/googleapis/google-cloud-go/commit/1a116759ce0d25fdcb5776bf73c52408ae1ec985)) +* **pubsub:** Return listing information for subscriptions created via Analytics Hub ([fdb4ea9](https://github.com/googleapis/google-cloud-go/commit/fdb4ea99189657880e5f0e0dce16bef1c3aa0d2f)) + + +### Documentation + +* **pubsub:** Update documentation for 31 day subscription message retention ([#10845](https://github.com/googleapis/google-cloud-go/issues/10845)) ([9b4b2fa](https://github.com/googleapis/google-cloud-go/commit/9b4b2fa87864906aeae3a8fda460466f951bc6c9)) + +## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.42.0...pubsub/v1.43.0) (2024-09-09) + + +### Features + +* **pubsub:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) +* **pubsub:** Allow trace extraction from protobuf message ([#10827](https://github.com/googleapis/google-cloud-go/issues/10827)) ([caa826c](https://github.com/googleapis/google-cloud-go/commit/caa826cea826473ebf4c806b57b0c3b0a2f0f365)) + + +### Bug Fixes + +* **pubsub:** Add attributes before startSpan ([#10800](https://github.com/googleapis/google-cloud-go/issues/10800)) ([48addbf](https://github.com/googleapis/google-cloud-go/commit/48addbff725ee2bb226ce0ab926415c27fd4ffad)) +* **pubsub:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) +* **pubsub:** Close grpc streams on retry ([#10624](https://github.com/googleapis/google-cloud-go/issues/10624)) ([79a0e11](https://github.com/googleapis/google-cloud-go/commit/79a0e118c88190cbe1b56250a75b67bd98b0d7f2)) + ## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.41.0...pubsub/v1.42.0) (2024-08-19) diff --git a/vendor/cloud.google.com/go/pubsub/MIGRATING.md b/vendor/cloud.google.com/go/pubsub/MIGRATING.md new file mode 100644 index 00000000..b5a34134 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/MIGRATING.md @@ -0,0 +1,546 @@ +# Migrating from Go PubSub v1 to v2 + +This guide shows how to migrate from the Go PubSub client library v1 version cloud.google.com/go to the v2 version cloud.google.com/go/pubsub/v2. + +Note: The code snippets in this guide are meant to be a quick way of comparing the differences between the v1 and v2 packages and **don’t compile as-is**. For a list of all the samples, see the [updated samples](https://cloud.google.com/pubsub/docs/samples). + +In line with Google's [OSS Library Breaking Change Policy](https://opensource.google/documentation/policies/library-breaking-change), support for the Go PubSub client library v1 version will continue until July 31st, 2026. This includes continued bug fixes and security patches for v1 version, but no new features would be introduced. We encourage all users to migrate to the Go PubSub client library v2 version before support expires for the earlier v1 version. + +## New imports + +There are two new packages: + +* [cloud.google.com/go/v2](http://cloud.google.com/go/v2): The new main v2 package. + +* [cloud.google.com/go/v2/apiv1/pubsubpb](http://cloud.google.com/go/v2/apiv1/pubsubpb): The auto-generated protobuf Go types that are used as arguments for admin operations. + +For other relevant packages, see Additional References. + +## Overview of the migration process + +The following is an overview of the migration process. You can find more details about the classes in the later part of this document. + +1. Import the new [cloud.google.com/go/v2](http://cloud.google.com/go/v2) package. + +2. Migrate admin operations such as `CreateTopic` and `DeleteTopic` to the v2 version admin API. + +3. Replace all instances of `Topic()` and `Subscription()` calls with `Publisher()` and `Subscriber()`. + +4. Change the data plane client instantiation method. If you previously called `CreateTopic` and used the returned `Topic` to call the `Publish` RPC, you must now instead instantiate a `Publisher` client, and then use that to call `Publish`. + +5. Change the subscriber settings that are renamed in the v2 version. + +6. Remove references to deprecated settings `Synchronous`, `BufferedByteLimit`, and `UseLegacyFlowControl`. + +7. Rename migrated error type: `ErrTopicStopped` to `ErrPublisherStopped`. + +## Admin operations + +The Pub/Sub admin plane is used to manage Pub/Sub resources like topics, subscriptions, and schemas. These admin operations include `Create`, `Get`, `Update`, `List`, and `Delete`. For subscriptions, seek and snapshots are also part of this layer. + +One of the key differences between the v1 and v2 versions is the change to the admin API. Two new clients called `TopicAdminClient` and `SubscriptionAdminClient` are added that handle the admin operations for topics and subscriptions respectively. + +For topics and subscriptions, you can access these admin clients as fields of the main client: `pubsub.Client.TopicAdminClient` and `pubsub.Client.SubscriptionAdminClient`. These clients are pre-initialized when calling `pubsub.NewClient`, and takes in the same `ClientOptions` when `NewClient` is called. + +There is a mostly one-to-one mapping of existing admin methods to the new admin methods. There are some exceptions that are noted below. + +### General RPCs + +The new gRPC-based admin client generally takes in Go protobuf types and returns protobuf response types. If you have used other Google Cloud Go libraries like Compute Engine or Secret Manager, the process is similar. + +Here is an example comparing a topic creation method in v1 and v2 libraries. In this case, [CreateTopic](https://pkg.go.dev/cloud.google.com/go/pubsub/v2/apiv1#TopicAdminClient.CreateTopic) takes in a generated protobuf type, [pubsubpb.Topic](https://pkg.go.dev/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb#Topic) that is based on the topic defined in [pubsub.proto](https://github.com/googleapis/googleapis/blob/3808680f22d715ef59493e67a6fe82e5ae3e00dd/google/pubsub/v1/pubsub.proto#L678). A key difference here is that the `Name` field of the proto type is the **fully qualified name** for the topic (e.g. `projects/my-project/topics/my-topic`), rather than just the resource ID (e.g. `my-topic`). In addition, specifying this name is part of the [Topic](https://pkg.go.dev/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb#Topic) struct rather than an argument for CreateTopic. + +```go +// v1 way to create a topic + +import ( + pubsub "cloud.google.com/go/pubsub" +) +... +projectID := "my-project" +topicID := "my-topic" +client, err := pubsub.NewClient(ctx, projectID) + +topic, err := client.CreateTopic(ctx, topicID) +``` + +```go +// v2 way to create a topic +import ( + "cloud.google.com/go/pubsub/v2" + "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb" +) +... +projectID := "my-project" +topicID := "my-topic" +client, err := pubsub.NewClient(ctx, projectID) + +topicpb := &pubsubpb.Topic{ + Name: fmt.Sprintf("projects/%s/topics/%s", projectID, topicID), +} +topic, err := client.TopicAdminClient.CreateTopic(ctx, topicpb) +``` + +The v1 library's `CreateTopicWithConfig` is fully removed. You can specify topic configurations by passing in the fields into [pubsubpb.Topic](https://pkg.go.dev/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb#Topic) while calling `TopicAdminClient.CreateTopic`. + +```go +// v1 way to create a topic with settings + +import ( + pubsub "cloud.google.com/go/pubsub" +) +... +projectID := "my-project" +topicID := "my-topic" +client, err := pubsub.NewClient(ctx, projectID) + +// Create a new topic with the given name and config. +topicConfig := &pubsub.TopicConfig{ + RetentionDuration: 24 * time.Hour, + MessageStoragePolicy: pubsub.MessageStoragePolicy{ + AllowedPersistenceRegions: []string{"us-east1"}, + }, +} +topic, err := client.CreateTopicWithConfig(ctx, "topicName", topicConfig) +``` + +```go +// v2 way to create a topic with settings +import ( + "cloud.google.com/go/pubsub/v2" + "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb" +) +... +projectID := "my-project" +topicID := "my-topic" +client, err := pubsub.NewClient(ctx, projectID) + +topicpb := &pubsubpb.Topic{ + Name: fmt.Sprintf("projects/%s/topics/%s", projectID, topicID), + MessageRetentionDuration: durationpb.New(24 * time.Hour), + MessageStoragePolicy: &pubsubpb.MessageStoragePolicy{ + AllowedPersistenceRegions: []string{"us-central1"}, + }, +} +topic, err := client.TopicAdminClient.CreateTopic(ctx, topicpb) +``` + +For code that creates a subscription, the migration process is similar to the topic creation method. Use the `pubsubpb.Subscription` type and `SubscriptionAdminClient.CreateSubscription` method. + +```go +s := &pubsubpb.Subscription{ + Name: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, subID), +} +topic, err := client.SubscriptionAdminClient.CreateSubscription(ctx, s) +``` + +The [new proto types](https://pkg.go.dev/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb) and their fields might differ slightly from the current v1 version types. The new types are based on the Pub/Sub proto. Here are some of those differences: + +* In the `CreateTopic` example shown in an earlier part of this guide, the message retention duration is defined as `RetentionDuration` in the v1 as a Go duration, but in the v2 version it is `MessageRetentionDuration` of type [durationpb.Duration](https://pkg.go.dev/google.golang.org/protobuf/types/known/durationpb#hdr-Conversion_from_a_Go_Duration). + +* Generated protobuf code doesn't follow Go styling guides for initialisms. For example, `KMSKeyName` is defined as `KmsKeyName` in the v2 version. + +* The v1 version uses custom optional types for certain fields for durations and boolean values. In the v2, `time.Duration` fields are defined by a protobuf specific [durationpb.Duration](https://pkg.go.dev/google.golang.org/protobuf/types/known/durationpb). Optional booleans now use Go boolean values directly. + +```go +// V2 subscription of initializing a subscription with configuration. +s := &pubsubpb.Subscription{ + Name: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, subID), + TopicMessageRetentionDuration: durationpb.New(1 * time.Hour), + EnableExactlyOnceDelivery: true, +} +topic, err := client.SubscriptionAdminClient.CreateSubscription(ctx, s) +``` + +For more information, see the method calls and arguments defined by the [new clients](https://pkg.go.dev/cloud.google.com/go/pubsub/v2/apiv1) and [Go protobuf types](https://pkg.go.dev/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb). + +### Delete RPCs + +Let’s look at the differences for another operation: DeleteTopic. + +```go +// v1 way to delete a topic +import ( + pubsub "cloud.google.com/go/pubsub" +) +... +projectID := "my-project" +topicID := "my-topic" +client, err := pubsub.NewClient(ctx, projectID) + +topic := client.Topic(topicID) +topic.Delete(ctx) +``` + +```go +// v2 way to delete a topic +import ( + "cloud.google.com/go/pubsub/v2" + "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb" +) +... +projectID := "my-project" +topicID := "my-topic" +client, err := pubsub.NewClient(ctx, projectID) + +req := &pubsubpb.DeleteTopicRequest{ + Topic: fmt.Sprintf("projects/%s/topics/%s", projectID, topicID), +} +client.TopicAdminClient.DeleteTopic(ctx, req) +``` + +In this case, you have to instantiate a `DeleteTopicRequest` struct and pass that into the `DeleteTopic` call. This includes specifying the **full path** of the topic, which includes the project ID, instead of just the topic ID. + +### Update RPCs + +When trying to update resources, you will need to declare the new object you are modifying by creating a proto object, and explicitly defining the field name. + +You may need to specify a [FieldMask protobuf type](https://pkg.go.dev/google.golang.org/protobuf/types/known/fieldmaskpb) along with the resource you are modifying if you only want to edit specific fields and leave the others the same. The strings to pass into the update field mask must be the name of the field of the resource you are editing, written in `snake_case` (such as `enable_exactly_once_delivery` or `message_storage_policy`). These must match the field names in the [resource message definition in proto](https://github.com/googleapis/googleapis/blob/master/google/pubsub/v1/pubsub.proto). + +If a field mask is not present on update, the operation applies to all fields (as if a field mask of all fields has been specified) and overrides the entire resource. + +```go +// v1 way to update subscriptions +projectID := "my-project" +subID := "my-subscription" +client, err := pubsub.NewClient(ctx, projectID) + +cfg := pubsub.SubscriptionConfigToUpdate{EnableExactlyOnceDelivery: true} +subConfig, err := client.Subscription(subID).Update(ctx, cfg) +``` + +```go +// v2 way to update subscriptions +import ( + "cloud.google.com/go/pubsub/v2" + pb "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb" + "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +projectID := "my-project" +subID := "my-subscription" +client, err := pubsub.NewClient(ctx, projectID) +updateReq := &pb.UpdateSubscriptionRequest{ + Subscription: &pb.Subscription{ + Name: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, subID), + EnableExactlyOnceDelivery: true + }, + UpdateMask: &fieldmaskpb.FieldMask{ + Paths: []string{"enable_exactly_once_delivery"}, + }, +} +sub, err := client.SubscriptionAdminClient.UpdateSubscription(ctx, updateReq) +``` + +### Exists method removed + +The `Exists` methods for topic, subscription, and schema are removed in the v2 version. You can check if a resource exists by performing a Get call: (e.g. `GetTopic`). + +For publishing and subscribing, we recommend following the pattern of [optimistically expecting a resource to exist](https://cloud.google.com/pubsub/docs/samples/pubsub-optimistic-subscribe#pubsub_optimistic_subscribe-go) and then handling the `NOT_FOUND` error, which saves a network call if the resource does exist. + +### RPCs involving one-of fields + +RPCs that include one-of fields require instantiating specific Go generated protobuf structs that satisfy the interface type. This may involve generating structs that look duplicated. This is because in the generated code, the outer struct is the interface that satisfies the one-of condition while the inner struct is a wrapper around the actual one-of. + +Let’s look at an example: + +```go +// v1 way to create topic ingestion from kinesis + +import ( + "cloud.google.com/go/pubsub" +) +... +cfg := &pubsub.TopicConfig{ + IngestionDataSourceSettings: &pubsub.IngestionDataSourceSettings{ + Source: &pubsub.IngestionDataSourceAWSKinesis{ + StreamARN: streamARN, + ConsumerARN: consumerARN, + AWSRoleARN: awsRoleARN, + GCPServiceAccount: gcpServiceAccount, + }, + }, +} + +topic, err := client.CreateTopicWithConfig(ctx, topicID, cfg) +``` + +```go +// v2 way to create topic ingestion from kinesis + +import ( + "cloud.google.com/go/pubsub/v2" + pb "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb" +) +... +topicpb := &pb.Topic{ + IngestionDataSourceSettings: &pb.IngestionDataSourceSettings{ + Source: &pb.IngestionDataSourceSettings_AwsKinesis_{ + AwsKinesis: &pb.IngestionDataSourceSettings_AwsKinesis{ + StreamArn: streamARN, + ConsumerArn: consumerARN, + AwsRoleArn: awsRoleARN, + GcpServiceAccount: gcpServiceAccount, + }, + }, + }, +} + +topic, err := client.TopicAdminClient.CreateTopic(ctx, topicpb) +``` + +In the above example, `IngestionDataSourceSettings_AwsKinesis_` is a wrapper struct around `IngestionDataSourceSettings_AwsKinesis`. The former satisfies the interface type of being an ingestion data source, while the latter contains the actual fields of the settings. + +Another example of an instantiation is with [Single Message Transforms](https://cloud.google.com/pubsub/docs/smts/smts-overview). + +```go +import ( + "cloud.google.com/go/pubsub" +) +projectID := "my-project" +topicID := "my-topic" +client, err := pubsub.NewClient(ctx, projectID) +... + +code := `function redactSSN(message, metadata) {...}` +transform := pubsub.MessageTransform{ + Transform: pubsub.JavaScriptUDF{ + FunctionName: "redactSSN", + Code: code, + }, +} +cfg := &pubsub.TopicConfig{ + MessageTransforms: []pubsub.MessageTransform{transform}, +} +t, err := client.CreateTopicWithConfig(ctx, topicID, cfg) +``` + +```go +import ( + "cloud.google.com/go/pubsub/v2" + pb "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb" +) +... +projectID := "my-project" +topicID := "my-topic" +client, err := pubsub.NewClient(ctx, projectID) + +code := `function redactSSN(message, metadata) {...}` +transform := pb.MessageTransform{ + Transform: &pb.MessageTransform_JavascriptUdf{ + JavascruptUdf: &pb.JavascriptUDF { + FunctionName: "redactSSN", + Code: code, + }, + }, +} + +topicpb := &pb.Topic{ + Name: fmt.Sprintf("projects/%s/topics/%s", projectID, topicID), + MessageTransforms: []*pb.MessageTransform{transform}, +} +topic, err := client.TopicAdminClient.CreateTopic(ctx, topicpb) +``` + +In this case, `MessageTransform_JavascriptUdf` satisfies the interface, while `JavascriptUdf` holds the actual strings relevant for the message transform. + +### Seek / snapshots + +Seek and snapshot RPCs are also part of the admin layer. Use the [SubscriptionAdminClient](https://pkg.go.dev/cloud.google.com/go/pubsub/v2/apiv1#SubscriptionAdminClient) to Seek to specific time or snapshot. + +```go +// v2 way to call seek on a subscription + +import ( + "cloud.google.com/go/pubsub/v2" + "google.golang.org/protobuf/types/known/timestamppb" + pb "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb" +) +... +projectID := "my-project-id" +subscriptionID := "my-subscription-id" + +now := time.Now() + +client, err := pubsub.NewClient(ctx, projectID) +... +client.SubscriptionAdminClient.Seek(ctx, &pb.SeekRequest{ + Subscription: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, subID), + Target: &pb.SeekRequest_Time{ + Time: timestamppb.New(now), + }, +}) +``` + +### Call Options (retries and timeouts) + +In the v2, [pubsub.NewClientWithConfig](https://pkg.go.dev/cloud.google.com/go/pubsub#NewClientWithConfig) is still the correct method to invoke to add RPC specific retries and timeouts. However, the helper struct is renamed from `ClientConfig.PublisherCallOptions` to `TopicAdminCallOptions`. The same is true for Subscription calls, which is now named `SubscriptionAdminCallOptions.` + +```go +// Simplified v2 code +import ( + opts "cloud.google.com/go/pubsub/v2/apiv1" + "cloud.google.com/go/pubsub/v2" +) + +tco := &opts.TopicAdminCallOptions{ + CreateTopic: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 200 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.25, + }) + }), + }, +} + +client, err := NewClientWithConfig(ctx, "my-project", &ClientConfig{ + TopicAdminCallOptions: tco, +}, +defer client.Close() +``` + +## Schemas + +The existing `Schema` client is replaced by a new `SchemaClient`, which behaves similarly to the topic and subscription admin clients in the new v2 version. Since schemas are less commonly used than publishing and subscribing, the Pub/Sub client does not preinitialize these for you. Instead, you must call the `NewSchemaClient` method in [cloud.google.com/go/pubsub/v2/apiv1](http://cloud.google.com/go/pubsub/v2/apiv1). + +```go +// Simplified v2 code +import ( + pubsub "cloud.google.com/go/pubsub/v2/apiv1" + "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb" +) +... + +projectID := "my-project-id" +schemaID := "my-schema" +ctx := context.Background() +client, err := pubsub.NewSchemaClient(ctx) +if err != nil { + return fmt.Errorf("pubsub.NewSchemaClient: %w", err) +} +defer client.Close() + +req := &pubsubpb.GetSchemaRequest{ + Name: fmt.Sprintf("projects/%s/schemas/%s", projectID, schemaID), + View: pubsubpb.SchemaView_FULL, +} +s, err := client.GetSchema(ctx, req) + +``` + +The main difference with the new auto generated schema client is that you cannot pass in a project ID at client instantiation. Instead, all references to schemas are done by its fully qualified resource name (such as `projects/my-project/schemas/my-schema`). + +## Data plane operations + +In contrast with admin operations that deal with resource management, the data plane deals with **publishing** and **receiving** messages. + +In the current v1 version, the data plane clients are intermixed with the admin plane structs: [Topic](https://pkg.go.dev/cloud.google.com/go/pubsub#Topic) and [Subscription](https://pkg.go.dev/cloud.google.com/go/pubsub#Subscription). For example, the `Topic` struct has the [Publish](https://pkg.go.dev/cloud.google.com/go/pubsub#Topic.Publish) method. + +```go +// Simplified v1 code +client, err := pubsub.NewClient(ctx, projectID) +... +topic := client.Topic("my-topic") +topic.Publish(ctx, "message") +``` + +In the v2 version, replace `Topic` with `Publisher` to publish messages. + +```go +// Simplified v2 code +client, err := pubsub.NewClient(ctx, projectID) +... +publisher := client.Publisher("my-topic") +publisher.Publish(ctx, "message") +``` + +Similarly, the v1 version Subscription has [Receive](https://pkg.go.dev/cloud.google.com/go/pubsub#Subscription.Receive) for pulling messages. Replace `Subscription` with `Subscriber` to pull messages. + +```go +// Simplified v2 code +client, err := pubsub.NewClient(ctx, projectID) +... +subscriber := client.Subscriber("my-subscription") +subscriber.Receive(ctx, ...) +``` + +### Instantiation from admin + +In the v1 version, it is possible to call `CreateTopic` to create a topic and then call `Publish` on the returned topic. Since the v2 version `CreateTopic` returns a generated protobuf [topic](https://pkg.go.dev/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb#Topic) that doesn’t have a `Publish` method, you must instantiate your own `Publisher` client to publish messages. + +```go +// Simplified v2 code +client, err := pubsub.NewClient(ctx, projectID) +... + +topicpb := &pb.Topic{ + Name: fmt.Sprintf("projects/%s/topics/%s", projectID, topicID), +} +topic, err := client.TopicAdminClient.CreateTopic(ctx, topicpb) + +// Instantiate the publisher from the topic name. +publisher := client.Publisher(topic.GetName()) +publisher.Publish(ctx, "message") +``` + +### TopicInProject and SubscriptionInProject removed + +To make this transition easier, the Publisher and Subscriber methods can take in either the resource ID (such as `my-topic`) or a fully qualified name (such as `projects/p/topics/topic`) as arguments. This makes it easier to use the fully qualified topic name (accessible through `topic.GetName())` rather than needing to parse out just the resource ID. If you use the resource ID, the publisher and subscriber clients assume you are referring to the project ID defined when instantiating the base pubsub client. + +The previous `TopicInProject` and `SubscriptionInProject` methods are removed from the v2 version. To create a publisher or subscriber in a different project, use the fully qualified name like in the sample above. + +### Renamed settings + +Two subscriber flow control settings are renamed: + +* `MinExtensionPeriod` → `MinDurationPerAckExtension` + +* `MaxExtensionPeriod` → `MaxDurationPerAckExtension` + +### Default settings changes + +To align with other client libraries, we will be changing the default value for `ReceiveSettings.NumGoroutines` to 1\. This is a better default for most users as each stream can handle 10 MB/s and will reduce the number of idle streams for lower throughput applications. + +### Removed settings + +`PublishSettings.BufferedByteLimit` is removed. This was already superseded by the existing `PublishSettings.MaxOutstandingBytes`. + +`ReceiveSettings.Synchronous` used to make the library use the synchronous `Pull` API for the mechanism to receive messages, but we are requiring only using the StreamingPull API in the v2. + +Lastly, we will be removing `ReceiveSettings.UseLegacyFlowControl`, since server side flow control is now a mature feature and should be relied upon for managing flow control. + +### Renamed Error Type + +Because of the change to the data plane clients (now named `Publisher` and `Subscriber)`, we renamed one error type to match this. `ErrTopicStopped` is now `ErrPublisherStopped`. + +## Relevant packages + +* [cloud.google.com/go/pubsub/v2](http://cloud.google.com/go/pubsub/v2) is the base v2 package. + +* [cloud.google.com/go/pubsub/v2/apiv1](http://cloud.google.com/go/pubsub/v2/apiv1) is used for initializing SchemaClient. + +* [cloud.google.com/go/pubsub/v2/apiv1/pubsubpb](http://cloud.google.com/go/pubsub/v2/apiv1/pubsubpb) is used for creating admin protobuf requests. + +* [cloud.google.com/go/iam/apiv1/iampb](http://cloud.google.com/go/iam/apiv1/iampb) is used for IAM requests. + +* [google.golang.org/protobuf/types/known/durationpb](http://google.golang.org/protobuf/types/known/durationpb) is used for proto duration type in place of Go duration. + +* [google.golang.org/protobuf/types/known/fieldmaskpb](http://google.golang.org/protobuf/types/known/fieldmaskpb) is used for masking which fields are updated in update calls. + +## FAQ + +**Q: Why does the new admin API package mention both v2 and apiv1?** + +The new Pub/Sub v2 package is `cloud.google.com/go/v2`. All of the new v2 code lives in the v2 directory. The apiv1 version denotes that the Pub/Sub server API is still under v1 and is **not** changing. + +**Q: Why are you changing the admin API surface?** + +One goal we had for this new Pub/Sub package is to reduce confusion between the data and admin plane surfaces. Particularly, the way that this package references topics and subscriptions was inconsistent with other Pub/Sub libraries in other languages. For example, creating a topic does not automatically create a publisher client in the Java or Python client libraries. Instead, we want it to be clear that creating a topic is a server side operation and creating a publisher client is a client operation. + +In the past, we have seen users be confused about why setting topic.PublishSettings doesn't persist the settings across applications. This is because we are actually setting the ephemeral PublishSettings of the client, which isn't saved to the server. + +Another goal is to improve development velocity by leveraging our auto generation tools that already exist for other Go products. With this change, changes that only affect the admin plane (including recent features such as topic ingestion settings and export subscriptions) can be released sooner. diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary.go b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary.go index 3a9bcffd..6a4cd19e 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -41,7 +41,7 @@ type SchemaIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Schema, nextPageToken string, err error) } -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. func (it *SchemaIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -88,7 +88,7 @@ type SnapshotIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error) } -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. func (it *SnapshotIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -135,7 +135,7 @@ type StringIterator struct { InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) } -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. func (it *StringIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -182,7 +182,7 @@ type SubscriptionIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error) } -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -229,7 +229,7 @@ type TopicIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error) } -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. func (it *TopicIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go new file mode 100644 index 00000000..d449e7be --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go @@ -0,0 +1,56 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package pubsub + +import ( + "iter" + + pubsubpb "cloud.google.com/go/pubsub/apiv1/pubsubpb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SchemaIterator) All() iter.Seq2[*pubsubpb.Schema, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SnapshotIterator) All() iter.Seq2[*pubsubpb.Snapshot, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *StringIterator) All() iter.Seq2[string, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SubscriptionIterator) All() iter.Seq2[*pubsubpb.Subscription, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TopicIterator) All() iter.Seq2[*pubsubpb.Topic, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go index 650d2334..d3eaec33 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,6 +36,7 @@ // // To get started with this package, create a client. // +// // go get cloud.google.com/go/pubsub/apiv1@latest // ctx := context.Background() // // This snippet has been automatically generated and should be regarded as a code template only. // // It will require modifications to work: @@ -54,19 +55,7 @@ // // # Using the Client // -// The following is an example of making an API call with the newly created client. -// -// ctx := context.Background() -// // This snippet has been automatically generated and should be regarded as a code template only. -// // It will require modifications to work: -// // - It may require correct/in-range values for request initialization. -// // - It may require specifying regional endpoints when creating the service client as shown in: -// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options -// c, err := pubsub.NewSchemaClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() +// The following is an example of making an API call with the newly created client, mentioned above. // // req := &pubsubpb.CommitSchemaRequest{ // // TODO: Fill request struct fields. @@ -93,31 +82,3 @@ // [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging // [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors package pubsub // import "cloud.google.com/go/pubsub/apiv1" - -import ( - "context" - - "google.golang.org/api/option" -) - -// For more information on implementing a client constructor hook, see -// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. -type clientHookParams struct{} -type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) - -var versionClient string - -func getVersionClient() string { - if versionClient == "" { - return "UNKNOWN" - } - return versionClient -} - -// DefaultAuthScopes reports the default set of authentication scopes to use with this package. -func DefaultAuthScopes() []string { - return []string{ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub", - } -} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/helpers.go b/vendor/cloud.google.com/go/pubsub/apiv1/helpers.go new file mode 100644 index 00000000..fccce7b5 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/helpers.go @@ -0,0 +1,102 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package pubsub + +import ( + "context" + "io" + "log/slog" + "net/http" + + "github.com/googleapis/gax-go/v2/internallog" + "github.com/googleapis/gax-go/v2/internallog/grpclog" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" +) + +const serviceName = "pubsub.googleapis.com" + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub", + } +} + +func executeHTTPRequestWithResponse(ctx context.Context, client *http.Client, req *http.Request, logger *slog.Logger, body []byte, rpc string) ([]byte, *http.Response, error) { + logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", internallog.HTTPRequest(req, body)) + resp, err := client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + buf, err := io.ReadAll(resp.Body) + if err != nil { + return nil, nil, err + } + logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", internallog.HTTPResponse(resp, buf)) + if err = googleapi.CheckResponseWithBody(resp, buf); err != nil { + return nil, nil, err + } + return buf, resp, nil +} + +func executeHTTPRequest(ctx context.Context, client *http.Client, req *http.Request, logger *slog.Logger, body []byte, rpc string) ([]byte, error) { + buf, _, err := executeHTTPRequestWithResponse(ctx, client, req, logger, body, rpc) + return buf, err +} + +func executeStreamingHTTPRequest(ctx context.Context, client *http.Client, req *http.Request, logger *slog.Logger, body []byte, rpc string) (*http.Response, error) { + logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", internallog.HTTPRequest(req, body)) + resp, err := client.Do(req) + if err != nil { + return nil, err + } + logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", internallog.HTTPResponse(resp, nil)) + if err = googleapi.CheckResponse(resp); err != nil { + return nil, err + } + return resp, nil +} + +func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) { + var zero O + logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", grpclog.ProtoMessageRequest(ctx, req)) + resp, err := fn(ctx, req, opts...) + if err != nil { + return zero, err + } + logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp)) + return resp, err +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go index 03ac865c..ac986410 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ import ( "bytes" "context" "fmt" - "io" + "log/slog" "math" "net/http" "net/url" @@ -29,7 +29,6 @@ import ( iampb "cloud.google.com/go/iam/apiv1/iampb" pubsubpb "cloud.google.com/go/pubsub/apiv1/pubsubpb" gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -68,6 +67,7 @@ func defaultPublisherGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -481,6 +481,8 @@ type publisherGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string + + logger *slog.Logger } // NewPublisherClient creates a new publisher client based on gRPC. @@ -508,6 +510,7 @@ func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*Publ connPool: connPool, publisherClient: pubsubpb.NewPublisherClient(connPool), CallOptions: &client.CallOptions, + logger: internaloption.GetLogger(opts), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), } c.setGoogleClientInfo() @@ -555,6 +558,8 @@ type publisherRESTClient struct { // Points back to the CallOptions field of the containing PublisherClient CallOptions **PublisherCallOptions + + logger *slog.Logger } // NewPublisherRESTClient creates a new publisher rest client. @@ -573,6 +578,7 @@ func NewPublisherRESTClient(ctx context.Context, opts ...option.ClientOption) (* endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, + logger: internaloption.GetLogger(opts), } c.setGoogleClientInfo() @@ -587,6 +593,7 @@ func defaultPublisherRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -624,7 +631,7 @@ func (c *publisherGRPCClient) CreateTopic(ctx context.Context, req *pubsubpb.Top var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.publisherClient.CreateTopic, req, settings.GRPC, c.logger, "CreateTopic") return err }, opts...) if err != nil { @@ -642,7 +649,7 @@ func (c *publisherGRPCClient) UpdateTopic(ctx context.Context, req *pubsubpb.Upd var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.publisherClient.UpdateTopic(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.publisherClient.UpdateTopic, req, settings.GRPC, c.logger, "UpdateTopic") return err }, opts...) if err != nil { @@ -660,7 +667,7 @@ func (c *publisherGRPCClient) Publish(ctx context.Context, req *pubsubpb.Publish var resp *pubsubpb.PublishResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.publisherClient.Publish, req, settings.GRPC, c.logger, "Publish") return err }, opts...) if err != nil { @@ -678,7 +685,7 @@ func (c *publisherGRPCClient) GetTopic(ctx context.Context, req *pubsubpb.GetTop var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.publisherClient.GetTopic, req, settings.GRPC, c.logger, "GetTopic") return err }, opts...) if err != nil { @@ -707,7 +714,7 @@ func (c *publisherGRPCClient) ListTopics(ctx context.Context, req *pubsubpb.List } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.publisherClient.ListTopics, req, settings.GRPC, c.logger, "ListTopics") return err }, opts...) if err != nil { @@ -753,7 +760,7 @@ func (c *publisherGRPCClient) ListTopicSubscriptions(ctx context.Context, req *p } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.publisherClient.ListTopicSubscriptions, req, settings.GRPC, c.logger, "ListTopicSubscriptions") return err }, opts...) if err != nil { @@ -799,7 +806,7 @@ func (c *publisherGRPCClient) ListTopicSnapshots(ctx context.Context, req *pubsu } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.publisherClient.ListTopicSnapshots(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.publisherClient.ListTopicSnapshots, req, settings.GRPC, c.logger, "ListTopicSnapshots") return err }, opts...) if err != nil { @@ -833,7 +840,7 @@ func (c *publisherGRPCClient) DeleteTopic(ctx context.Context, req *pubsubpb.Del opts = append((*c.CallOptions).DeleteTopic[0:len((*c.CallOptions).DeleteTopic):len((*c.CallOptions).DeleteTopic)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...) + _, err = executeRPC(ctx, c.publisherClient.DeleteTopic, req, settings.GRPC, c.logger, "DeleteTopic") return err }, opts...) return err @@ -848,7 +855,7 @@ func (c *publisherGRPCClient) DetachSubscription(ctx context.Context, req *pubsu var resp *pubsubpb.DetachSubscriptionResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.publisherClient.DetachSubscription(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.publisherClient.DetachSubscription, req, settings.GRPC, c.logger, "DetachSubscription") return err }, opts...) if err != nil { @@ -866,7 +873,7 @@ func (c *publisherGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIa var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") return err }, opts...) if err != nil { @@ -884,7 +891,7 @@ func (c *publisherGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIa var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") return err }, opts...) if err != nil { @@ -902,7 +909,7 @@ func (c *publisherGRPCClient) TestIamPermissions(ctx context.Context, req *iampb var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") return err }, opts...) if err != nil { @@ -951,17 +958,7 @@ func (c *publisherRESTClient) CreateTopic(ctx context.Context, req *pubsubpb.Top httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateTopic") if err != nil { return err } @@ -1018,17 +1015,7 @@ func (c *publisherRESTClient) UpdateTopic(ctx context.Context, req *pubsubpb.Upd httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateTopic") if err != nil { return err } @@ -1085,17 +1072,7 @@ func (c *publisherRESTClient) Publish(ctx context.Context, req *pubsubpb.Publish httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Publish") if err != nil { return err } @@ -1145,17 +1122,7 @@ func (c *publisherRESTClient) GetTopic(ctx context.Context, req *pubsubpb.GetTop httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetTopic") if err != nil { return err } @@ -1217,21 +1184,10 @@ func (c *publisherRESTClient) ListTopics(ctx context.Context, req *pubsubpb.List } httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListTopics") if err != nil { return err } - if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1306,21 +1262,10 @@ func (c *publisherRESTClient) ListTopicSubscriptions(ctx context.Context, req *p } httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListTopicSubscriptions") if err != nil { return err } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1399,21 +1344,10 @@ func (c *publisherRESTClient) ListTopicSnapshots(ctx context.Context, req *pubsu } httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListTopicSnapshots") if err != nil { return err } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1477,15 +1411,8 @@ func (c *publisherRESTClient) DeleteTopic(ctx context.Context, req *pubsubpb.Del httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - // Returns nil if there is no error, otherwise wraps - // the response code and body into a non-nil error - return googleapi.CheckResponse(httpRsp) + _, err = executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "DeleteTopic") + return err }, opts...) } @@ -1525,17 +1452,7 @@ func (c *publisherRESTClient) DetachSubscription(ctx context.Context, req *pubsu httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "DetachSubscription") if err != nil { return err } @@ -1589,17 +1506,7 @@ func (c *publisherRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIa httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") if err != nil { return err } @@ -1659,17 +1566,7 @@ func (c *publisherRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIa httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") if err != nil { return err } @@ -1731,17 +1628,7 @@ func (c *publisherRESTClient) TestIamPermissions(ctx context.Context, req *iampb httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") if err != nil { return err } diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/alias.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/alias.go new file mode 100644 index 00000000..464ff991 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/alias.go @@ -0,0 +1,801 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package pubsub aliases all exported identifiers in package +// "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb". +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb. +package pubsubpb + +import ( + src "cloud.google.com/go/pubsub/v2/apiv1/pubsubpb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use consts in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +const ( + BigQueryConfig_ACTIVE = src.BigQueryConfig_ACTIVE + BigQueryConfig_IN_TRANSIT_LOCATION_RESTRICTION = src.BigQueryConfig_IN_TRANSIT_LOCATION_RESTRICTION + BigQueryConfig_NOT_FOUND = src.BigQueryConfig_NOT_FOUND + BigQueryConfig_PERMISSION_DENIED = src.BigQueryConfig_PERMISSION_DENIED + BigQueryConfig_SCHEMA_MISMATCH = src.BigQueryConfig_SCHEMA_MISMATCH + BigQueryConfig_STATE_UNSPECIFIED = src.BigQueryConfig_STATE_UNSPECIFIED + CloudStorageConfig_ACTIVE = src.CloudStorageConfig_ACTIVE + CloudStorageConfig_IN_TRANSIT_LOCATION_RESTRICTION = src.CloudStorageConfig_IN_TRANSIT_LOCATION_RESTRICTION + CloudStorageConfig_NOT_FOUND = src.CloudStorageConfig_NOT_FOUND + CloudStorageConfig_PERMISSION_DENIED = src.CloudStorageConfig_PERMISSION_DENIED + CloudStorageConfig_SCHEMA_MISMATCH = src.CloudStorageConfig_SCHEMA_MISMATCH + CloudStorageConfig_STATE_UNSPECIFIED = src.CloudStorageConfig_STATE_UNSPECIFIED + Encoding_BINARY = src.Encoding_BINARY + Encoding_ENCODING_UNSPECIFIED = src.Encoding_ENCODING_UNSPECIFIED + Encoding_JSON = src.Encoding_JSON + IngestionDataSourceSettings_AwsKinesis_ACTIVE = src.IngestionDataSourceSettings_AwsKinesis_ACTIVE + IngestionDataSourceSettings_AwsKinesis_CONSUMER_NOT_FOUND = src.IngestionDataSourceSettings_AwsKinesis_CONSUMER_NOT_FOUND + IngestionDataSourceSettings_AwsKinesis_KINESIS_PERMISSION_DENIED = src.IngestionDataSourceSettings_AwsKinesis_KINESIS_PERMISSION_DENIED + IngestionDataSourceSettings_AwsKinesis_PUBLISH_PERMISSION_DENIED = src.IngestionDataSourceSettings_AwsKinesis_PUBLISH_PERMISSION_DENIED + IngestionDataSourceSettings_AwsKinesis_STATE_UNSPECIFIED = src.IngestionDataSourceSettings_AwsKinesis_STATE_UNSPECIFIED + IngestionDataSourceSettings_AwsKinesis_STREAM_NOT_FOUND = src.IngestionDataSourceSettings_AwsKinesis_STREAM_NOT_FOUND + IngestionDataSourceSettings_AwsMsk_ACTIVE = src.IngestionDataSourceSettings_AwsMsk_ACTIVE + IngestionDataSourceSettings_AwsMsk_CLUSTER_NOT_FOUND = src.IngestionDataSourceSettings_AwsMsk_CLUSTER_NOT_FOUND + IngestionDataSourceSettings_AwsMsk_MSK_PERMISSION_DENIED = src.IngestionDataSourceSettings_AwsMsk_MSK_PERMISSION_DENIED + IngestionDataSourceSettings_AwsMsk_PUBLISH_PERMISSION_DENIED = src.IngestionDataSourceSettings_AwsMsk_PUBLISH_PERMISSION_DENIED + IngestionDataSourceSettings_AwsMsk_STATE_UNSPECIFIED = src.IngestionDataSourceSettings_AwsMsk_STATE_UNSPECIFIED + IngestionDataSourceSettings_AwsMsk_TOPIC_NOT_FOUND = src.IngestionDataSourceSettings_AwsMsk_TOPIC_NOT_FOUND + IngestionDataSourceSettings_AzureEventHubs_ACTIVE = src.IngestionDataSourceSettings_AzureEventHubs_ACTIVE + IngestionDataSourceSettings_AzureEventHubs_EVENT_HUBS_PERMISSION_DENIED = src.IngestionDataSourceSettings_AzureEventHubs_EVENT_HUBS_PERMISSION_DENIED + IngestionDataSourceSettings_AzureEventHubs_EVENT_HUB_NOT_FOUND = src.IngestionDataSourceSettings_AzureEventHubs_EVENT_HUB_NOT_FOUND + IngestionDataSourceSettings_AzureEventHubs_NAMESPACE_NOT_FOUND = src.IngestionDataSourceSettings_AzureEventHubs_NAMESPACE_NOT_FOUND + IngestionDataSourceSettings_AzureEventHubs_PUBLISH_PERMISSION_DENIED = src.IngestionDataSourceSettings_AzureEventHubs_PUBLISH_PERMISSION_DENIED + IngestionDataSourceSettings_AzureEventHubs_RESOURCE_GROUP_NOT_FOUND = src.IngestionDataSourceSettings_AzureEventHubs_RESOURCE_GROUP_NOT_FOUND + IngestionDataSourceSettings_AzureEventHubs_STATE_UNSPECIFIED = src.IngestionDataSourceSettings_AzureEventHubs_STATE_UNSPECIFIED + IngestionDataSourceSettings_AzureEventHubs_SUBSCRIPTION_NOT_FOUND = src.IngestionDataSourceSettings_AzureEventHubs_SUBSCRIPTION_NOT_FOUND + IngestionDataSourceSettings_CloudStorage_ACTIVE = src.IngestionDataSourceSettings_CloudStorage_ACTIVE + IngestionDataSourceSettings_CloudStorage_BUCKET_NOT_FOUND = src.IngestionDataSourceSettings_CloudStorage_BUCKET_NOT_FOUND + IngestionDataSourceSettings_CloudStorage_CLOUD_STORAGE_PERMISSION_DENIED = src.IngestionDataSourceSettings_CloudStorage_CLOUD_STORAGE_PERMISSION_DENIED + IngestionDataSourceSettings_CloudStorage_PUBLISH_PERMISSION_DENIED = src.IngestionDataSourceSettings_CloudStorage_PUBLISH_PERMISSION_DENIED + IngestionDataSourceSettings_CloudStorage_STATE_UNSPECIFIED = src.IngestionDataSourceSettings_CloudStorage_STATE_UNSPECIFIED + IngestionDataSourceSettings_CloudStorage_TOO_MANY_OBJECTS = src.IngestionDataSourceSettings_CloudStorage_TOO_MANY_OBJECTS + IngestionDataSourceSettings_ConfluentCloud_ACTIVE = src.IngestionDataSourceSettings_ConfluentCloud_ACTIVE + IngestionDataSourceSettings_ConfluentCloud_CLUSTER_NOT_FOUND = src.IngestionDataSourceSettings_ConfluentCloud_CLUSTER_NOT_FOUND + IngestionDataSourceSettings_ConfluentCloud_CONFLUENT_CLOUD_PERMISSION_DENIED = src.IngestionDataSourceSettings_ConfluentCloud_CONFLUENT_CLOUD_PERMISSION_DENIED + IngestionDataSourceSettings_ConfluentCloud_PUBLISH_PERMISSION_DENIED = src.IngestionDataSourceSettings_ConfluentCloud_PUBLISH_PERMISSION_DENIED + IngestionDataSourceSettings_ConfluentCloud_STATE_UNSPECIFIED = src.IngestionDataSourceSettings_ConfluentCloud_STATE_UNSPECIFIED + IngestionDataSourceSettings_ConfluentCloud_TOPIC_NOT_FOUND = src.IngestionDataSourceSettings_ConfluentCloud_TOPIC_NOT_FOUND + IngestionDataSourceSettings_ConfluentCloud_UNREACHABLE_BOOTSTRAP_SERVER = src.IngestionDataSourceSettings_ConfluentCloud_UNREACHABLE_BOOTSTRAP_SERVER + PlatformLogsSettings_DEBUG = src.PlatformLogsSettings_DEBUG + PlatformLogsSettings_DISABLED = src.PlatformLogsSettings_DISABLED + PlatformLogsSettings_ERROR = src.PlatformLogsSettings_ERROR + PlatformLogsSettings_INFO = src.PlatformLogsSettings_INFO + PlatformLogsSettings_SEVERITY_UNSPECIFIED = src.PlatformLogsSettings_SEVERITY_UNSPECIFIED + PlatformLogsSettings_WARNING = src.PlatformLogsSettings_WARNING + SchemaView_BASIC = src.SchemaView_BASIC + SchemaView_FULL = src.SchemaView_FULL + SchemaView_SCHEMA_VIEW_UNSPECIFIED = src.SchemaView_SCHEMA_VIEW_UNSPECIFIED + Schema_AVRO = src.Schema_AVRO + Schema_PROTOCOL_BUFFER = src.Schema_PROTOCOL_BUFFER + Schema_TYPE_UNSPECIFIED = src.Schema_TYPE_UNSPECIFIED + Subscription_ACTIVE = src.Subscription_ACTIVE + Subscription_RESOURCE_ERROR = src.Subscription_RESOURCE_ERROR + Subscription_STATE_UNSPECIFIED = src.Subscription_STATE_UNSPECIFIED + Topic_ACTIVE = src.Topic_ACTIVE + Topic_INGESTION_RESOURCE_ERROR = src.Topic_INGESTION_RESOURCE_ERROR + Topic_STATE_UNSPECIFIED = src.Topic_STATE_UNSPECIFIED +) + +// Deprecated: Please use vars in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +var ( + BigQueryConfig_State_name = src.BigQueryConfig_State_name + BigQueryConfig_State_value = src.BigQueryConfig_State_value + CloudStorageConfig_State_name = src.CloudStorageConfig_State_name + CloudStorageConfig_State_value = src.CloudStorageConfig_State_value + Encoding_name = src.Encoding_name + Encoding_value = src.Encoding_value + File_google_pubsub_v1_pubsub_proto = src.File_google_pubsub_v1_pubsub_proto + File_google_pubsub_v1_schema_proto = src.File_google_pubsub_v1_schema_proto + IngestionDataSourceSettings_AwsKinesis_State_name = src.IngestionDataSourceSettings_AwsKinesis_State_name + IngestionDataSourceSettings_AwsKinesis_State_value = src.IngestionDataSourceSettings_AwsKinesis_State_value + IngestionDataSourceSettings_AwsMsk_State_name = src.IngestionDataSourceSettings_AwsMsk_State_name + IngestionDataSourceSettings_AwsMsk_State_value = src.IngestionDataSourceSettings_AwsMsk_State_value + IngestionDataSourceSettings_AzureEventHubs_State_name = src.IngestionDataSourceSettings_AzureEventHubs_State_name + IngestionDataSourceSettings_AzureEventHubs_State_value = src.IngestionDataSourceSettings_AzureEventHubs_State_value + IngestionDataSourceSettings_CloudStorage_State_name = src.IngestionDataSourceSettings_CloudStorage_State_name + IngestionDataSourceSettings_CloudStorage_State_value = src.IngestionDataSourceSettings_CloudStorage_State_value + IngestionDataSourceSettings_ConfluentCloud_State_name = src.IngestionDataSourceSettings_ConfluentCloud_State_name + IngestionDataSourceSettings_ConfluentCloud_State_value = src.IngestionDataSourceSettings_ConfluentCloud_State_value + PlatformLogsSettings_Severity_name = src.PlatformLogsSettings_Severity_name + PlatformLogsSettings_Severity_value = src.PlatformLogsSettings_Severity_value + SchemaView_name = src.SchemaView_name + SchemaView_value = src.SchemaView_value + Schema_Type_name = src.Schema_Type_name + Schema_Type_value = src.Schema_Type_value + Subscription_State_name = src.Subscription_State_name + Subscription_State_value = src.Subscription_State_value + Topic_State_name = src.Topic_State_name + Topic_State_value = src.Topic_State_value +) + +// Request for the Acknowledge method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type AcknowledgeRequest = src.AcknowledgeRequest + +// Configuration for a BigQuery subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type BigQueryConfig = src.BigQueryConfig + +// Possible states for a BigQuery subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type BigQueryConfig_State = src.BigQueryConfig_State + +// Configuration for a Cloud Storage subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type CloudStorageConfig = src.CloudStorageConfig + +// Configuration for writing message data in Avro format. Message payloads and +// metadata will be written to files as an Avro binary. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type CloudStorageConfig_AvroConfig = src.CloudStorageConfig_AvroConfig +type CloudStorageConfig_AvroConfig_ = src.CloudStorageConfig_AvroConfig_ + +// Possible states for a Cloud Storage subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type CloudStorageConfig_State = src.CloudStorageConfig_State + +// Configuration for writing message data in text format. Message payloads +// will be written to files as raw text, separated by a newline. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type CloudStorageConfig_TextConfig = src.CloudStorageConfig_TextConfig +type CloudStorageConfig_TextConfig_ = src.CloudStorageConfig_TextConfig_ + +// Request for CommitSchema method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type CommitSchemaRequest = src.CommitSchemaRequest + +// Request for the CreateSchema method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type CreateSchemaRequest = src.CreateSchemaRequest + +// Request for the `CreateSnapshot` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type CreateSnapshotRequest = src.CreateSnapshotRequest + +// Dead lettering is done on a best effort basis. The same message might be +// dead lettered multiple times. If validation on any of the fields fails at +// subscription creation/updation, the create/update subscription request will +// fail. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type DeadLetterPolicy = src.DeadLetterPolicy + +// Request for the `DeleteSchema` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type DeleteSchemaRequest = src.DeleteSchemaRequest + +// Request for the `DeleteSchemaRevision` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type DeleteSchemaRevisionRequest = src.DeleteSchemaRevisionRequest + +// Request for the `DeleteSnapshot` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type DeleteSnapshotRequest = src.DeleteSnapshotRequest + +// Request for the DeleteSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type DeleteSubscriptionRequest = src.DeleteSubscriptionRequest + +// Request for the `DeleteTopic` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type DeleteTopicRequest = src.DeleteTopicRequest + +// Request for the DetachSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type DetachSubscriptionRequest = src.DetachSubscriptionRequest + +// Response for the DetachSubscription method. Reserved for future use. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type DetachSubscriptionResponse = src.DetachSubscriptionResponse + +// Possible encoding types for messages. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type Encoding = src.Encoding + +// A policy that specifies the conditions for resource expiration (i.e., +// automatic resource deletion). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ExpirationPolicy = src.ExpirationPolicy + +// Request for the GetSchema method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type GetSchemaRequest = src.GetSchemaRequest + +// Request for the GetSnapshot method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type GetSnapshotRequest = src.GetSnapshotRequest + +// Request for the GetSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type GetSubscriptionRequest = src.GetSubscriptionRequest + +// Request for the GetTopic method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type GetTopicRequest = src.GetTopicRequest + +// Settings for an ingestion data source on a topic. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings = src.IngestionDataSourceSettings + +// Ingestion settings for Amazon Kinesis Data Streams. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_AwsKinesis = src.IngestionDataSourceSettings_AwsKinesis +type IngestionDataSourceSettings_AwsKinesis_ = src.IngestionDataSourceSettings_AwsKinesis_ + +// Possible states for ingestion from Amazon Kinesis Data Streams. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_AwsKinesis_State = src.IngestionDataSourceSettings_AwsKinesis_State + +// Ingestion settings for Amazon MSK. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_AwsMsk = src.IngestionDataSourceSettings_AwsMsk +type IngestionDataSourceSettings_AwsMsk_ = src.IngestionDataSourceSettings_AwsMsk_ + +// Possible states for managed ingestion from Amazon MSK. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_AwsMsk_State = src.IngestionDataSourceSettings_AwsMsk_State + +// Ingestion settings for Azure Event Hubs. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_AzureEventHubs = src.IngestionDataSourceSettings_AzureEventHubs +type IngestionDataSourceSettings_AzureEventHubs_ = src.IngestionDataSourceSettings_AzureEventHubs_ + +// Possible states for managed ingestion from Event Hubs. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_AzureEventHubs_State = src.IngestionDataSourceSettings_AzureEventHubs_State + +// Ingestion settings for Cloud Storage. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_CloudStorage = src.IngestionDataSourceSettings_CloudStorage +type IngestionDataSourceSettings_CloudStorage_ = src.IngestionDataSourceSettings_CloudStorage_ + +// Configuration for reading Cloud Storage data in Avro binary format. The +// bytes of each object will be set to the `data` field of a Pub/Sub message. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_CloudStorage_AvroFormat = src.IngestionDataSourceSettings_CloudStorage_AvroFormat +type IngestionDataSourceSettings_CloudStorage_AvroFormat_ = src.IngestionDataSourceSettings_CloudStorage_AvroFormat_ + +// Configuration for reading Cloud Storage data written via [Cloud Storage +// subscriptions](https://cloud.google.com/pubsub/docs/cloudstorage). The data +// and attributes fields of the originally exported Pub/Sub message will be +// restored when publishing. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat = src.IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat +type IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat = src.IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat + +// Possible states for ingestion from Cloud Storage. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_CloudStorage_State = src.IngestionDataSourceSettings_CloudStorage_State + +// Configuration for reading Cloud Storage data in text format. Each line of +// text as specified by the delimiter will be set to the `data` field of a +// Pub/Sub message. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_CloudStorage_TextFormat = src.IngestionDataSourceSettings_CloudStorage_TextFormat +type IngestionDataSourceSettings_CloudStorage_TextFormat_ = src.IngestionDataSourceSettings_CloudStorage_TextFormat_ + +// Ingestion settings for Confluent Cloud. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_ConfluentCloud = src.IngestionDataSourceSettings_ConfluentCloud +type IngestionDataSourceSettings_ConfluentCloud_ = src.IngestionDataSourceSettings_ConfluentCloud_ + +// Possible states for managed ingestion from Confluent Cloud. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionDataSourceSettings_ConfluentCloud_State = src.IngestionDataSourceSettings_ConfluentCloud_State + +// Payload of the Platform Log entry sent when a failure is encountered while +// ingesting. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionFailureEvent = src.IngestionFailureEvent + +// Specifies the reason why some data may have been left out of the desired +// Pub/Sub message due to the API message limits +// (https://cloud.google.com/pubsub/quotas#resource_limits). For example, when +// the number of attributes is larger than 100, the number of attributes is +// truncated to 100 to respect the limit on the attribute count. Other +// attribute limits are treated similarly. When the size of the desired message +// would've been larger than 10MB, the message won't be published at all, and +// ingestion of the subsequent messages will proceed as normal. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionFailureEvent_ApiViolationReason = src.IngestionFailureEvent_ApiViolationReason + +// Set when an Avro file is unsupported or its format is not valid. When this +// occurs, one or more Avro objects won't be ingested. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionFailureEvent_AvroFailureReason = src.IngestionFailureEvent_AvroFailureReason +type IngestionFailureEvent_AwsMskFailure = src.IngestionFailureEvent_AwsMskFailure + +// Failure when ingesting from an Amazon MSK source. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionFailureEvent_AwsMskFailureReason = src.IngestionFailureEvent_AwsMskFailureReason +type IngestionFailureEvent_AwsMskFailureReason_ApiViolationReason = src.IngestionFailureEvent_AwsMskFailureReason_ApiViolationReason +type IngestionFailureEvent_AzureEventHubsFailure = src.IngestionFailureEvent_AzureEventHubsFailure + +// Failure when ingesting from an Azure Event Hubs source. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionFailureEvent_AzureEventHubsFailureReason = src.IngestionFailureEvent_AzureEventHubsFailureReason +type IngestionFailureEvent_AzureEventHubsFailureReason_ApiViolationReason = src.IngestionFailureEvent_AzureEventHubsFailureReason_ApiViolationReason + +// Failure when ingesting from a Cloud Storage source. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionFailureEvent_CloudStorageFailure = src.IngestionFailureEvent_CloudStorageFailure +type IngestionFailureEvent_CloudStorageFailure_ = src.IngestionFailureEvent_CloudStorageFailure_ +type IngestionFailureEvent_CloudStorageFailure_ApiViolationReason = src.IngestionFailureEvent_CloudStorageFailure_ApiViolationReason +type IngestionFailureEvent_CloudStorageFailure_AvroFailureReason = src.IngestionFailureEvent_CloudStorageFailure_AvroFailureReason +type IngestionFailureEvent_ConfluentCloudFailure = src.IngestionFailureEvent_ConfluentCloudFailure + +// Failure when ingesting from a Confluent Cloud source. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type IngestionFailureEvent_ConfluentCloudFailureReason = src.IngestionFailureEvent_ConfluentCloudFailureReason +type IngestionFailureEvent_ConfluentCloudFailureReason_ApiViolationReason = src.IngestionFailureEvent_ConfluentCloudFailureReason_ApiViolationReason + +// User-defined JavaScript function that can transform or filter a Pub/Sub +// message. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type JavaScriptUDF = src.JavaScriptUDF + +// Request for the `ListSchemaRevisions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListSchemaRevisionsRequest = src.ListSchemaRevisionsRequest + +// Response for the `ListSchemaRevisions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListSchemaRevisionsResponse = src.ListSchemaRevisionsResponse + +// Request for the `ListSchemas` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListSchemasRequest = src.ListSchemasRequest + +// Response for the `ListSchemas` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListSchemasResponse = src.ListSchemasResponse + +// Request for the `ListSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListSnapshotsRequest = src.ListSnapshotsRequest + +// Response for the `ListSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListSnapshotsResponse = src.ListSnapshotsResponse + +// Request for the `ListSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListSubscriptionsRequest = src.ListSubscriptionsRequest + +// Response for the `ListSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListSubscriptionsResponse = src.ListSubscriptionsResponse + +// Request for the `ListTopicSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListTopicSnapshotsRequest = src.ListTopicSnapshotsRequest + +// Response for the `ListTopicSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListTopicSnapshotsResponse = src.ListTopicSnapshotsResponse + +// Request for the `ListTopicSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListTopicSubscriptionsRequest = src.ListTopicSubscriptionsRequest + +// Response for the `ListTopicSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListTopicSubscriptionsResponse = src.ListTopicSubscriptionsResponse + +// Request for the `ListTopics` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListTopicsRequest = src.ListTopicsRequest + +// Response for the `ListTopics` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ListTopicsResponse = src.ListTopicsResponse + +// A policy constraining the storage of messages published to the topic. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type MessageStoragePolicy = src.MessageStoragePolicy + +// All supported message transforms types. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type MessageTransform = src.MessageTransform +type MessageTransform_JavascriptUdf = src.MessageTransform_JavascriptUdf + +// Request for the ModifyAckDeadline method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ModifyAckDeadlineRequest = src.ModifyAckDeadlineRequest + +// Request for the ModifyPushConfig method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ModifyPushConfigRequest = src.ModifyPushConfigRequest + +// Settings for Platform Logs produced by Pub/Sub. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PlatformLogsSettings = src.PlatformLogsSettings + +// Severity levels of Platform Logs. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PlatformLogsSettings_Severity = src.PlatformLogsSettings_Severity + +// Request for the Publish method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PublishRequest = src.PublishRequest + +// Response for the `Publish` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PublishResponse = src.PublishResponse + +// PublisherClient is the client API for Publisher service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PublisherClient = src.PublisherClient + +// PublisherServer is the server API for Publisher service. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PublisherServer = src.PublisherServer + +// A message that is published by publishers and consumed by subscribers. The +// message must contain either a non-empty data field or at least one +// attribute. Note that client libraries represent this object differently +// depending on the language. See the corresponding [client library +// documentation](https://cloud.google.com/pubsub/docs/reference/libraries) for +// more information. See [quotas and limits] +// (https://cloud.google.com/pubsub/quotas) for more information about message +// limits. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PubsubMessage = src.PubsubMessage + +// Request for the `Pull` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PullRequest = src.PullRequest + +// Response for the `Pull` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PullResponse = src.PullResponse + +// Configuration for a push delivery endpoint. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PushConfig = src.PushConfig + +// Sets the `data` field as the HTTP body for delivery. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PushConfig_NoWrapper = src.PushConfig_NoWrapper +type PushConfig_NoWrapper_ = src.PushConfig_NoWrapper_ + +// Contains information needed for generating an [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PushConfig_OidcToken = src.PushConfig_OidcToken +type PushConfig_OidcToken_ = src.PushConfig_OidcToken_ + +// The payload to the push endpoint is in the form of the JSON representation +// of a PubsubMessage +// (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type PushConfig_PubsubWrapper = src.PushConfig_PubsubWrapper +type PushConfig_PubsubWrapper_ = src.PushConfig_PubsubWrapper_ + +// A message and its corresponding acknowledgment ID. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ReceivedMessage = src.ReceivedMessage + +// A policy that specifies how Pub/Sub retries message delivery. Retry delay +// will be exponential based on provided minimum and maximum backoffs. +// https://en.wikipedia.org/wiki/Exponential_backoff. RetryPolicy will be +// triggered on NACKs or acknowledgement deadline exceeded events for a given +// message. Retry Policy is implemented on a best effort basis. At times, the +// delay between consecutive deliveries may not match the configuration. That +// is, delay can be more or less than configured backoff. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type RetryPolicy = src.RetryPolicy + +// Request for the `RollbackSchema` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type RollbackSchemaRequest = src.RollbackSchemaRequest + +// A schema resource. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type Schema = src.Schema + +// SchemaServiceClient is the client API for SchemaService service. For +// semantics around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type SchemaServiceClient = src.SchemaServiceClient + +// SchemaServiceServer is the server API for SchemaService service. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type SchemaServiceServer = src.SchemaServiceServer + +// Settings for validating messages published against a schema. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type SchemaSettings = src.SchemaSettings + +// View of Schema object fields to be returned by GetSchema and ListSchemas. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type SchemaView = src.SchemaView + +// Possible schema definition types. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type Schema_Type = src.Schema_Type + +// Request for the `Seek` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type SeekRequest = src.SeekRequest +type SeekRequest_Snapshot = src.SeekRequest_Snapshot +type SeekRequest_Time = src.SeekRequest_Time + +// Response for the `Seek` method (this response is empty). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type SeekResponse = src.SeekResponse + +// A snapshot resource. Snapshots are used in +// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, +// which allow you to manage message acknowledgments in bulk. That is, you can +// set the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type Snapshot = src.Snapshot + +// Request for the `StreamingPull` streaming RPC method. This request is used +// to establish the initial stream as well as to stream acknowledgements and +// ack deadline modifications from the client to the server. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type StreamingPullRequest = src.StreamingPullRequest + +// Response for the `StreamingPull` method. This response is used to stream +// messages from the server to the client. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type StreamingPullResponse = src.StreamingPullResponse + +// Acknowledgement IDs sent in one or more previous requests to acknowledge a +// previously received message. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type StreamingPullResponse_AcknowledgeConfirmation = src.StreamingPullResponse_AcknowledgeConfirmation + +// Acknowledgement IDs sent in one or more previous requests to modify the +// deadline for a specific message. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type StreamingPullResponse_ModifyAckDeadlineConfirmation = src.StreamingPullResponse_ModifyAckDeadlineConfirmation + +// Subscription properties sent as part of the response. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type StreamingPullResponse_SubscriptionProperties = src.StreamingPullResponse_SubscriptionProperties + +// SubscriberClient is the client API for Subscriber service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type SubscriberClient = src.SubscriberClient + +// SubscriberServer is the server API for Subscriber service. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type SubscriberServer = src.SubscriberServer +type Subscriber_StreamingPullClient = src.Subscriber_StreamingPullClient +type Subscriber_StreamingPullServer = src.Subscriber_StreamingPullServer + +// A subscription resource. If none of `push_config`, `bigquery_config`, or +// `cloud_storage_config` is set, then the subscriber will pull and ack +// messages using API methods. At most one of these fields may be set. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type Subscription = src.Subscription + +// Information about an associated [Analytics Hub +// subscription](https://cloud.google.com/bigquery/docs/analytics-hub-manage-subscriptions). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type Subscription_AnalyticsHubSubscriptionInfo = src.Subscription_AnalyticsHubSubscriptionInfo + +// Possible states for a subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type Subscription_State = src.Subscription_State + +// A topic resource. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type Topic = src.Topic + +// The state of the topic. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type Topic_State = src.Topic_State + +// UnimplementedPublisherServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type UnimplementedPublisherServer = src.UnimplementedPublisherServer + +// UnimplementedSchemaServiceServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type UnimplementedSchemaServiceServer = src.UnimplementedSchemaServiceServer + +// UnimplementedSubscriberServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type UnimplementedSubscriberServer = src.UnimplementedSubscriberServer + +// Request for the UpdateSnapshot method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type UpdateSnapshotRequest = src.UpdateSnapshotRequest + +// Request for the UpdateSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type UpdateSubscriptionRequest = src.UpdateSubscriptionRequest + +// Request for the UpdateTopic method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type UpdateTopicRequest = src.UpdateTopicRequest + +// Request for the `ValidateMessage` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ValidateMessageRequest = src.ValidateMessageRequest +type ValidateMessageRequest_Name = src.ValidateMessageRequest_Name +type ValidateMessageRequest_Schema = src.ValidateMessageRequest_Schema + +// Response for the `ValidateMessage` method. Empty for now. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ValidateMessageResponse = src.ValidateMessageResponse + +// Request for the `ValidateSchema` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ValidateSchemaRequest = src.ValidateSchemaRequest + +// Response for the `ValidateSchema` method. Empty for now. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +type ValidateSchemaResponse = src.ValidateSchemaResponse + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +func NewPublisherClient(cc grpc.ClientConnInterface) PublisherClient { + return src.NewPublisherClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +func NewSchemaServiceClient(cc grpc.ClientConnInterface) SchemaServiceClient { + return src.NewSchemaServiceClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +func NewSubscriberClient(cc grpc.ClientConnInterface) SubscriberClient { + return src.NewSubscriberClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +func RegisterPublisherServer(s *grpc.Server, srv PublisherServer) { + src.RegisterPublisherServer(s, srv) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +func RegisterSchemaServiceServer(s *grpc.Server, srv SchemaServiceServer) { + src.RegisterSchemaServiceServer(s, srv) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/v2/apiv1/pubsubpb +func RegisterSubscriberServer(s *grpc.Server, srv SubscriberServer) { + src.RegisterSubscriberServer(s, srv) +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go deleted file mode 100644 index b266b23b..00000000 --- a/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go +++ /dev/null @@ -1,7919 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.34.2 -// protoc v4.25.3 -// source: google/pubsub/v1/pubsub.proto - -package pubsubpb - -import ( - context "context" - reflect "reflect" - sync "sync" - - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - emptypb "google.golang.org/protobuf/types/known/emptypb" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Possible states for ingestion from Amazon Kinesis Data Streams. -type IngestionDataSourceSettings_AwsKinesis_State int32 - -const ( - // Default value. This value is unused. - IngestionDataSourceSettings_AwsKinesis_STATE_UNSPECIFIED IngestionDataSourceSettings_AwsKinesis_State = 0 - // Ingestion is active. - IngestionDataSourceSettings_AwsKinesis_ACTIVE IngestionDataSourceSettings_AwsKinesis_State = 1 - // Permission denied encountered while consuming data from Kinesis. - // This can happen if: - // - The provided `aws_role_arn` does not exist or does not have the - // appropriate permissions attached. - // - The provided `aws_role_arn` is not set up properly for Identity - // Federation using `gcp_service_account`. - // - The Pub/Sub SA is not granted the - // `iam.serviceAccounts.getOpenIdToken` permission on - // `gcp_service_account`. - IngestionDataSourceSettings_AwsKinesis_KINESIS_PERMISSION_DENIED IngestionDataSourceSettings_AwsKinesis_State = 2 - // Permission denied encountered while publishing to the topic. This can - // happen if the Pub/Sub SA has not been granted the [appropriate publish - // permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publisher) - IngestionDataSourceSettings_AwsKinesis_PUBLISH_PERMISSION_DENIED IngestionDataSourceSettings_AwsKinesis_State = 3 - // The Kinesis stream does not exist. - IngestionDataSourceSettings_AwsKinesis_STREAM_NOT_FOUND IngestionDataSourceSettings_AwsKinesis_State = 4 - // The Kinesis consumer does not exist. - IngestionDataSourceSettings_AwsKinesis_CONSUMER_NOT_FOUND IngestionDataSourceSettings_AwsKinesis_State = 5 -) - -// Enum value maps for IngestionDataSourceSettings_AwsKinesis_State. -var ( - IngestionDataSourceSettings_AwsKinesis_State_name = map[int32]string{ - 0: "STATE_UNSPECIFIED", - 1: "ACTIVE", - 2: "KINESIS_PERMISSION_DENIED", - 3: "PUBLISH_PERMISSION_DENIED", - 4: "STREAM_NOT_FOUND", - 5: "CONSUMER_NOT_FOUND", - } - IngestionDataSourceSettings_AwsKinesis_State_value = map[string]int32{ - "STATE_UNSPECIFIED": 0, - "ACTIVE": 1, - "KINESIS_PERMISSION_DENIED": 2, - "PUBLISH_PERMISSION_DENIED": 3, - "STREAM_NOT_FOUND": 4, - "CONSUMER_NOT_FOUND": 5, - } -) - -func (x IngestionDataSourceSettings_AwsKinesis_State) Enum() *IngestionDataSourceSettings_AwsKinesis_State { - p := new(IngestionDataSourceSettings_AwsKinesis_State) - *p = x - return p -} - -func (x IngestionDataSourceSettings_AwsKinesis_State) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (IngestionDataSourceSettings_AwsKinesis_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_pubsub_v1_pubsub_proto_enumTypes[0].Descriptor() -} - -func (IngestionDataSourceSettings_AwsKinesis_State) Type() protoreflect.EnumType { - return &file_google_pubsub_v1_pubsub_proto_enumTypes[0] -} - -func (x IngestionDataSourceSettings_AwsKinesis_State) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use IngestionDataSourceSettings_AwsKinesis_State.Descriptor instead. -func (IngestionDataSourceSettings_AwsKinesis_State) EnumDescriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 0, 0} -} - -// The state of the topic. -type Topic_State int32 - -const ( - // Default value. This value is unused. - Topic_STATE_UNSPECIFIED Topic_State = 0 - // The topic does not have any persistent errors. - Topic_ACTIVE Topic_State = 1 - // Ingestion from the data source has encountered a permanent error. - // See the more detailed error state in the corresponding ingestion - // source configuration. - Topic_INGESTION_RESOURCE_ERROR Topic_State = 2 -) - -// Enum value maps for Topic_State. -var ( - Topic_State_name = map[int32]string{ - 0: "STATE_UNSPECIFIED", - 1: "ACTIVE", - 2: "INGESTION_RESOURCE_ERROR", - } - Topic_State_value = map[string]int32{ - "STATE_UNSPECIFIED": 0, - "ACTIVE": 1, - "INGESTION_RESOURCE_ERROR": 2, - } -) - -func (x Topic_State) Enum() *Topic_State { - p := new(Topic_State) - *p = x - return p -} - -func (x Topic_State) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Topic_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_pubsub_v1_pubsub_proto_enumTypes[1].Descriptor() -} - -func (Topic_State) Type() protoreflect.EnumType { - return &file_google_pubsub_v1_pubsub_proto_enumTypes[1] -} - -func (x Topic_State) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Topic_State.Descriptor instead. -func (Topic_State) EnumDescriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3, 0} -} - -// Possible states for a subscription. -type Subscription_State int32 - -const ( - // Default value. This value is unused. - Subscription_STATE_UNSPECIFIED Subscription_State = 0 - // The subscription can actively receive messages - Subscription_ACTIVE Subscription_State = 1 - // The subscription cannot receive messages because of an error with the - // resource to which it pushes messages. See the more detailed error state - // in the corresponding configuration. - Subscription_RESOURCE_ERROR Subscription_State = 2 -) - -// Enum value maps for Subscription_State. -var ( - Subscription_State_name = map[int32]string{ - 0: "STATE_UNSPECIFIED", - 1: "ACTIVE", - 2: "RESOURCE_ERROR", - } - Subscription_State_value = map[string]int32{ - "STATE_UNSPECIFIED": 0, - "ACTIVE": 1, - "RESOURCE_ERROR": 2, - } -) - -func (x Subscription_State) Enum() *Subscription_State { - p := new(Subscription_State) - *p = x - return p -} - -func (x Subscription_State) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Subscription_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_pubsub_v1_pubsub_proto_enumTypes[2].Descriptor() -} - -func (Subscription_State) Type() protoreflect.EnumType { - return &file_google_pubsub_v1_pubsub_proto_enumTypes[2] -} - -func (x Subscription_State) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Subscription_State.Descriptor instead. -func (Subscription_State) EnumDescriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18, 0} -} - -// Possible states for a BigQuery subscription. -type BigQueryConfig_State int32 - -const ( - // Default value. This value is unused. - BigQueryConfig_STATE_UNSPECIFIED BigQueryConfig_State = 0 - // The subscription can actively send messages to BigQuery - BigQueryConfig_ACTIVE BigQueryConfig_State = 1 - // Cannot write to the BigQuery table because of permission denied errors. - // This can happen if - // - Pub/Sub SA has not been granted the [appropriate BigQuery IAM - // permissions](https://cloud.google.com/pubsub/docs/create-subscription#assign_bigquery_service_account) - // - bigquery.googleapis.com API is not enabled for the project - // ([instructions](https://cloud.google.com/service-usage/docs/enable-disable)) - BigQueryConfig_PERMISSION_DENIED BigQueryConfig_State = 2 - // Cannot write to the BigQuery table because it does not exist. - BigQueryConfig_NOT_FOUND BigQueryConfig_State = 3 - // Cannot write to the BigQuery table due to a schema mismatch. - BigQueryConfig_SCHEMA_MISMATCH BigQueryConfig_State = 4 - // Cannot write to the destination because enforce_in_transit is set to true - // and the destination locations are not in the allowed regions. - BigQueryConfig_IN_TRANSIT_LOCATION_RESTRICTION BigQueryConfig_State = 5 -) - -// Enum value maps for BigQueryConfig_State. -var ( - BigQueryConfig_State_name = map[int32]string{ - 0: "STATE_UNSPECIFIED", - 1: "ACTIVE", - 2: "PERMISSION_DENIED", - 3: "NOT_FOUND", - 4: "SCHEMA_MISMATCH", - 5: "IN_TRANSIT_LOCATION_RESTRICTION", - } - BigQueryConfig_State_value = map[string]int32{ - "STATE_UNSPECIFIED": 0, - "ACTIVE": 1, - "PERMISSION_DENIED": 2, - "NOT_FOUND": 3, - "SCHEMA_MISMATCH": 4, - "IN_TRANSIT_LOCATION_RESTRICTION": 5, - } -) - -func (x BigQueryConfig_State) Enum() *BigQueryConfig_State { - p := new(BigQueryConfig_State) - *p = x - return p -} - -func (x BigQueryConfig_State) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (BigQueryConfig_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_pubsub_v1_pubsub_proto_enumTypes[3].Descriptor() -} - -func (BigQueryConfig_State) Type() protoreflect.EnumType { - return &file_google_pubsub_v1_pubsub_proto_enumTypes[3] -} - -func (x BigQueryConfig_State) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use BigQueryConfig_State.Descriptor instead. -func (BigQueryConfig_State) EnumDescriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23, 0} -} - -// Possible states for a Cloud Storage subscription. -type CloudStorageConfig_State int32 - -const ( - // Default value. This value is unused. - CloudStorageConfig_STATE_UNSPECIFIED CloudStorageConfig_State = 0 - // The subscription can actively send messages to Cloud Storage. - CloudStorageConfig_ACTIVE CloudStorageConfig_State = 1 - // Cannot write to the Cloud Storage bucket because of permission denied - // errors. - CloudStorageConfig_PERMISSION_DENIED CloudStorageConfig_State = 2 - // Cannot write to the Cloud Storage bucket because it does not exist. - CloudStorageConfig_NOT_FOUND CloudStorageConfig_State = 3 - // Cannot write to the destination because enforce_in_transit is set to true - // and the destination locations are not in the allowed regions. - CloudStorageConfig_IN_TRANSIT_LOCATION_RESTRICTION CloudStorageConfig_State = 4 - // Cannot write to the Cloud Storage bucket due to an incompatibility - // between the topic schema and subscription settings. - CloudStorageConfig_SCHEMA_MISMATCH CloudStorageConfig_State = 5 -) - -// Enum value maps for CloudStorageConfig_State. -var ( - CloudStorageConfig_State_name = map[int32]string{ - 0: "STATE_UNSPECIFIED", - 1: "ACTIVE", - 2: "PERMISSION_DENIED", - 3: "NOT_FOUND", - 4: "IN_TRANSIT_LOCATION_RESTRICTION", - 5: "SCHEMA_MISMATCH", - } - CloudStorageConfig_State_value = map[string]int32{ - "STATE_UNSPECIFIED": 0, - "ACTIVE": 1, - "PERMISSION_DENIED": 2, - "NOT_FOUND": 3, - "IN_TRANSIT_LOCATION_RESTRICTION": 4, - "SCHEMA_MISMATCH": 5, - } -) - -func (x CloudStorageConfig_State) Enum() *CloudStorageConfig_State { - p := new(CloudStorageConfig_State) - *p = x - return p -} - -func (x CloudStorageConfig_State) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (CloudStorageConfig_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_pubsub_v1_pubsub_proto_enumTypes[4].Descriptor() -} - -func (CloudStorageConfig_State) Type() protoreflect.EnumType { - return &file_google_pubsub_v1_pubsub_proto_enumTypes[4] -} - -func (x CloudStorageConfig_State) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use CloudStorageConfig_State.Descriptor instead. -func (CloudStorageConfig_State) EnumDescriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 0} -} - -// A policy constraining the storage of messages published to the topic. -type MessageStoragePolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. A list of IDs of Google Cloud regions where messages that are - // published to the topic may be persisted in storage. Messages published by - // publishers running in non-allowed Google Cloud regions (or running outside - // of Google Cloud altogether) are routed for storage in one of the allowed - // regions. An empty list means that no regions are allowed, and is not a - // valid configuration. - AllowedPersistenceRegions []string `protobuf:"bytes,1,rep,name=allowed_persistence_regions,json=allowedPersistenceRegions,proto3" json:"allowed_persistence_regions,omitempty"` - // Optional. If true, `allowed_persistence_regions` is also used to enforce - // in-transit guarantees for messages. That is, Pub/Sub will fail - // Publish operations on this topic and subscribe operations - // on any subscription attached to this topic in any region that is - // not in `allowed_persistence_regions`. - EnforceInTransit bool `protobuf:"varint,2,opt,name=enforce_in_transit,json=enforceInTransit,proto3" json:"enforce_in_transit,omitempty"` -} - -func (x *MessageStoragePolicy) Reset() { - *x = MessageStoragePolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MessageStoragePolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MessageStoragePolicy) ProtoMessage() {} - -func (x *MessageStoragePolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MessageStoragePolicy.ProtoReflect.Descriptor instead. -func (*MessageStoragePolicy) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{0} -} - -func (x *MessageStoragePolicy) GetAllowedPersistenceRegions() []string { - if x != nil { - return x.AllowedPersistenceRegions - } - return nil -} - -func (x *MessageStoragePolicy) GetEnforceInTransit() bool { - if x != nil { - return x.EnforceInTransit - } - return false -} - -// Settings for validating messages published against a schema. -type SchemaSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the schema that messages published should be - // validated against. Format is `projects/{project}/schemas/{schema}`. The - // value of this field will be `_deleted-schema_` if the schema has been - // deleted. - Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` - // Optional. The encoding of messages validated against `schema`. - Encoding Encoding `protobuf:"varint,2,opt,name=encoding,proto3,enum=google.pubsub.v1.Encoding" json:"encoding,omitempty"` - // Optional. The minimum (inclusive) revision allowed for validating messages. - // If empty or not present, allow any revision to be validated against - // last_revision or any revision created before. - FirstRevisionId string `protobuf:"bytes,3,opt,name=first_revision_id,json=firstRevisionId,proto3" json:"first_revision_id,omitempty"` - // Optional. The maximum (inclusive) revision allowed for validating messages. - // If empty or not present, allow any revision to be validated against - // first_revision or any revision created after. - LastRevisionId string `protobuf:"bytes,4,opt,name=last_revision_id,json=lastRevisionId,proto3" json:"last_revision_id,omitempty"` -} - -func (x *SchemaSettings) Reset() { - *x = SchemaSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SchemaSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SchemaSettings) ProtoMessage() {} - -func (x *SchemaSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SchemaSettings.ProtoReflect.Descriptor instead. -func (*SchemaSettings) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{1} -} - -func (x *SchemaSettings) GetSchema() string { - if x != nil { - return x.Schema - } - return "" -} - -func (x *SchemaSettings) GetEncoding() Encoding { - if x != nil { - return x.Encoding - } - return Encoding_ENCODING_UNSPECIFIED -} - -func (x *SchemaSettings) GetFirstRevisionId() string { - if x != nil { - return x.FirstRevisionId - } - return "" -} - -func (x *SchemaSettings) GetLastRevisionId() string { - if x != nil { - return x.LastRevisionId - } - return "" -} - -// Settings for an ingestion data source on a topic. -type IngestionDataSourceSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Only one source type can have settings set. - // - // Types that are assignable to Source: - // - // *IngestionDataSourceSettings_AwsKinesis_ - Source isIngestionDataSourceSettings_Source `protobuf_oneof:"source"` -} - -func (x *IngestionDataSourceSettings) Reset() { - *x = IngestionDataSourceSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IngestionDataSourceSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IngestionDataSourceSettings) ProtoMessage() {} - -func (x *IngestionDataSourceSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IngestionDataSourceSettings.ProtoReflect.Descriptor instead. -func (*IngestionDataSourceSettings) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2} -} - -func (m *IngestionDataSourceSettings) GetSource() isIngestionDataSourceSettings_Source { - if m != nil { - return m.Source - } - return nil -} - -func (x *IngestionDataSourceSettings) GetAwsKinesis() *IngestionDataSourceSettings_AwsKinesis { - if x, ok := x.GetSource().(*IngestionDataSourceSettings_AwsKinesis_); ok { - return x.AwsKinesis - } - return nil -} - -type isIngestionDataSourceSettings_Source interface { - isIngestionDataSourceSettings_Source() -} - -type IngestionDataSourceSettings_AwsKinesis_ struct { - // Optional. Amazon Kinesis Data Streams. - AwsKinesis *IngestionDataSourceSettings_AwsKinesis `protobuf:"bytes,1,opt,name=aws_kinesis,json=awsKinesis,proto3,oneof"` -} - -func (*IngestionDataSourceSettings_AwsKinesis_) isIngestionDataSourceSettings_Source() {} - -// A topic resource. -type Topic struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the topic. It must have the format - // `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter, - // and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), - // underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent - // signs (`%`). It must be between 3 and 255 characters in length, and it - // must not start with `"goog"`. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Optional. See [Creating and managing labels] - // (https://cloud.google.com/pubsub/docs/labels). - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Optional. Policy constraining the set of Google Cloud Platform regions - // where messages published to the topic may be stored. If not present, then - // no constraints are in effect. - MessageStoragePolicy *MessageStoragePolicy `protobuf:"bytes,3,opt,name=message_storage_policy,json=messageStoragePolicy,proto3" json:"message_storage_policy,omitempty"` - // Optional. The resource name of the Cloud KMS CryptoKey to be used to - // protect access to messages published on this topic. - // - // The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. - KmsKeyName string `protobuf:"bytes,5,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"` - // Optional. Settings for validating messages published against a schema. - SchemaSettings *SchemaSettings `protobuf:"bytes,6,opt,name=schema_settings,json=schemaSettings,proto3" json:"schema_settings,omitempty"` - // Optional. Reserved for future use. This field is set only in responses from - // the server; it is ignored if it is set in any requests. - SatisfiesPzs bool `protobuf:"varint,7,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` - // Optional. Indicates the minimum duration to retain a message after it is - // published to the topic. If this field is set, messages published to the - // topic in the last `message_retention_duration` are always available to - // subscribers. For instance, it allows any attached subscription to [seek to - // a - // timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) - // that is up to `message_retention_duration` in the past. If this field is - // not set, message retention is controlled by settings on individual - // subscriptions. Cannot be more than 31 days or less than 10 minutes. - MessageRetentionDuration *durationpb.Duration `protobuf:"bytes,8,opt,name=message_retention_duration,json=messageRetentionDuration,proto3" json:"message_retention_duration,omitempty"` - // Output only. An output-only field indicating the state of the topic. - State Topic_State `protobuf:"varint,9,opt,name=state,proto3,enum=google.pubsub.v1.Topic_State" json:"state,omitempty"` - // Optional. Settings for ingestion from a data source into this topic. - IngestionDataSourceSettings *IngestionDataSourceSettings `protobuf:"bytes,10,opt,name=ingestion_data_source_settings,json=ingestionDataSourceSettings,proto3" json:"ingestion_data_source_settings,omitempty"` -} - -func (x *Topic) Reset() { - *x = Topic{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Topic) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Topic) ProtoMessage() {} - -func (x *Topic) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Topic.ProtoReflect.Descriptor instead. -func (*Topic) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3} -} - -func (x *Topic) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Topic) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *Topic) GetMessageStoragePolicy() *MessageStoragePolicy { - if x != nil { - return x.MessageStoragePolicy - } - return nil -} - -func (x *Topic) GetKmsKeyName() string { - if x != nil { - return x.KmsKeyName - } - return "" -} - -func (x *Topic) GetSchemaSettings() *SchemaSettings { - if x != nil { - return x.SchemaSettings - } - return nil -} - -func (x *Topic) GetSatisfiesPzs() bool { - if x != nil { - return x.SatisfiesPzs - } - return false -} - -func (x *Topic) GetMessageRetentionDuration() *durationpb.Duration { - if x != nil { - return x.MessageRetentionDuration - } - return nil -} - -func (x *Topic) GetState() Topic_State { - if x != nil { - return x.State - } - return Topic_STATE_UNSPECIFIED -} - -func (x *Topic) GetIngestionDataSourceSettings() *IngestionDataSourceSettings { - if x != nil { - return x.IngestionDataSourceSettings - } - return nil -} - -// A message that is published by publishers and consumed by subscribers. The -// message must contain either a non-empty data field or at least one attribute. -// Note that client libraries represent this object differently -// depending on the language. See the corresponding [client library -// documentation](https://cloud.google.com/pubsub/docs/reference/libraries) for -// more information. See [quotas and limits] -// (https://cloud.google.com/pubsub/quotas) for more information about message -// limits. -type PubsubMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The message data field. If this field is empty, the message must - // contain at least one attribute. - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - // Optional. Attributes for this message. If this field is empty, the message - // must contain non-empty data. This can be used to filter messages on the - // subscription. - Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // ID of this message, assigned by the server when the message is published. - // Guaranteed to be unique within the topic. This value may be read by a - // subscriber that receives a `PubsubMessage` via a `Pull` call or a push - // delivery. It must not be populated by the publisher in a `Publish` call. - MessageId string `protobuf:"bytes,3,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` - // The time at which the message was published, populated by the server when - // it receives the `Publish` call. It must not be populated by the - // publisher in a `Publish` call. - PublishTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=publish_time,json=publishTime,proto3" json:"publish_time,omitempty"` - // Optional. If non-empty, identifies related messages for which publish order - // should be respected. If a `Subscription` has `enable_message_ordering` set - // to `true`, messages published with the same non-empty `ordering_key` value - // will be delivered to subscribers in the order in which they are received by - // the Pub/Sub system. All `PubsubMessage`s published in a given - // `PublishRequest` must specify the same `ordering_key` value. For more - // information, see [ordering - // messages](https://cloud.google.com/pubsub/docs/ordering). - OrderingKey string `protobuf:"bytes,5,opt,name=ordering_key,json=orderingKey,proto3" json:"ordering_key,omitempty"` -} - -func (x *PubsubMessage) Reset() { - *x = PubsubMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PubsubMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PubsubMessage) ProtoMessage() {} - -func (x *PubsubMessage) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PubsubMessage.ProtoReflect.Descriptor instead. -func (*PubsubMessage) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4} -} - -func (x *PubsubMessage) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - -func (x *PubsubMessage) GetAttributes() map[string]string { - if x != nil { - return x.Attributes - } - return nil -} - -func (x *PubsubMessage) GetMessageId() string { - if x != nil { - return x.MessageId - } - return "" -} - -func (x *PubsubMessage) GetPublishTime() *timestamppb.Timestamp { - if x != nil { - return x.PublishTime - } - return nil -} - -func (x *PubsubMessage) GetOrderingKey() string { - if x != nil { - return x.OrderingKey - } - return "" -} - -// Request for the GetTopic method. -type GetTopicRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the topic to get. - // Format is `projects/{project}/topics/{topic}`. - Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` -} - -func (x *GetTopicRequest) Reset() { - *x = GetTopicRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTopicRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTopicRequest) ProtoMessage() {} - -func (x *GetTopicRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTopicRequest.ProtoReflect.Descriptor instead. -func (*GetTopicRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{5} -} - -func (x *GetTopicRequest) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -// Request for the UpdateTopic method. -type UpdateTopicRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The updated topic object. - Topic *Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - // Required. Indicates which fields in the provided topic to update. Must be - // specified and non-empty. Note that if `update_mask` contains - // "message_storage_policy" but the `message_storage_policy` is not set in - // the `topic` provided above, then the updated value is determined by the - // policy configured at the project or organization level. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateTopicRequest) Reset() { - *x = UpdateTopicRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateTopicRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateTopicRequest) ProtoMessage() {} - -func (x *UpdateTopicRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateTopicRequest.ProtoReflect.Descriptor instead. -func (*UpdateTopicRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{6} -} - -func (x *UpdateTopicRequest) GetTopic() *Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *UpdateTopicRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Request for the Publish method. -type PublishRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The messages in the request will be published on this topic. - // Format is `projects/{project}/topics/{topic}`. - Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - // Required. The messages to publish. - Messages []*PubsubMessage `protobuf:"bytes,2,rep,name=messages,proto3" json:"messages,omitempty"` -} - -func (x *PublishRequest) Reset() { - *x = PublishRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PublishRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishRequest) ProtoMessage() {} - -func (x *PublishRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. -func (*PublishRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{7} -} - -func (x *PublishRequest) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *PublishRequest) GetMessages() []*PubsubMessage { - if x != nil { - return x.Messages - } - return nil -} - -// Response for the `Publish` method. -type PublishResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The server-assigned ID of each published message, in the same - // order as the messages in the request. IDs are guaranteed to be unique - // within the topic. - MessageIds []string `protobuf:"bytes,1,rep,name=message_ids,json=messageIds,proto3" json:"message_ids,omitempty"` -} - -func (x *PublishResponse) Reset() { - *x = PublishResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PublishResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishResponse) ProtoMessage() {} - -func (x *PublishResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead. -func (*PublishResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{8} -} - -func (x *PublishResponse) GetMessageIds() []string { - if x != nil { - return x.MessageIds - } - return nil -} - -// Request for the `ListTopics` method. -type ListTopicsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the project in which to list topics. - // Format is `projects/{project-id}`. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // Optional. Maximum number of topics to return. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // Optional. The value returned by the last `ListTopicsResponse`; indicates - // that this is a continuation of a prior `ListTopics` call, and that the - // system should return the next page of data. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListTopicsRequest) Reset() { - *x = ListTopicsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTopicsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTopicsRequest) ProtoMessage() {} - -func (x *ListTopicsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTopicsRequest.ProtoReflect.Descriptor instead. -func (*ListTopicsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{9} -} - -func (x *ListTopicsRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *ListTopicsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListTopicsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// Response for the `ListTopics` method. -type ListTopicsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The resulting topics. - Topics []*Topic `protobuf:"bytes,1,rep,name=topics,proto3" json:"topics,omitempty"` - // Optional. If not empty, indicates that there may be more topics that match - // the request; this value should be passed in a new `ListTopicsRequest`. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListTopicsResponse) Reset() { - *x = ListTopicsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTopicsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTopicsResponse) ProtoMessage() {} - -func (x *ListTopicsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTopicsResponse.ProtoReflect.Descriptor instead. -func (*ListTopicsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{10} -} - -func (x *ListTopicsResponse) GetTopics() []*Topic { - if x != nil { - return x.Topics - } - return nil -} - -func (x *ListTopicsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request for the `ListTopicSubscriptions` method. -type ListTopicSubscriptionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the topic that subscriptions are attached to. - // Format is `projects/{project}/topics/{topic}`. - Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - // Optional. Maximum number of subscription names to return. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // Optional. The value returned by the last `ListTopicSubscriptionsResponse`; - // indicates that this is a continuation of a prior `ListTopicSubscriptions` - // call, and that the system should return the next page of data. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListTopicSubscriptionsRequest) Reset() { - *x = ListTopicSubscriptionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTopicSubscriptionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTopicSubscriptionsRequest) ProtoMessage() {} - -func (x *ListTopicSubscriptionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTopicSubscriptionsRequest.ProtoReflect.Descriptor instead. -func (*ListTopicSubscriptionsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{11} -} - -func (x *ListTopicSubscriptionsRequest) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *ListTopicSubscriptionsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListTopicSubscriptionsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// Response for the `ListTopicSubscriptions` method. -type ListTopicSubscriptionsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The names of subscriptions attached to the topic specified in the - // request. - Subscriptions []string `protobuf:"bytes,1,rep,name=subscriptions,proto3" json:"subscriptions,omitempty"` - // Optional. If not empty, indicates that there may be more subscriptions that - // match the request; this value should be passed in a new - // `ListTopicSubscriptionsRequest` to get more subscriptions. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListTopicSubscriptionsResponse) Reset() { - *x = ListTopicSubscriptionsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTopicSubscriptionsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTopicSubscriptionsResponse) ProtoMessage() {} - -func (x *ListTopicSubscriptionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTopicSubscriptionsResponse.ProtoReflect.Descriptor instead. -func (*ListTopicSubscriptionsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{12} -} - -func (x *ListTopicSubscriptionsResponse) GetSubscriptions() []string { - if x != nil { - return x.Subscriptions - } - return nil -} - -func (x *ListTopicSubscriptionsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request for the `ListTopicSnapshots` method. -type ListTopicSnapshotsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the topic that snapshots are attached to. - // Format is `projects/{project}/topics/{topic}`. - Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - // Optional. Maximum number of snapshot names to return. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // Optional. The value returned by the last `ListTopicSnapshotsResponse`; - // indicates that this is a continuation of a prior `ListTopicSnapshots` call, - // and that the system should return the next page of data. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListTopicSnapshotsRequest) Reset() { - *x = ListTopicSnapshotsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTopicSnapshotsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTopicSnapshotsRequest) ProtoMessage() {} - -func (x *ListTopicSnapshotsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTopicSnapshotsRequest.ProtoReflect.Descriptor instead. -func (*ListTopicSnapshotsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{13} -} - -func (x *ListTopicSnapshotsRequest) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *ListTopicSnapshotsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListTopicSnapshotsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// Response for the `ListTopicSnapshots` method. -type ListTopicSnapshotsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The names of the snapshots that match the request. - Snapshots []string `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` - // Optional. If not empty, indicates that there may be more snapshots that - // match the request; this value should be passed in a new - // `ListTopicSnapshotsRequest` to get more snapshots. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListTopicSnapshotsResponse) Reset() { - *x = ListTopicSnapshotsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTopicSnapshotsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTopicSnapshotsResponse) ProtoMessage() {} - -func (x *ListTopicSnapshotsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTopicSnapshotsResponse.ProtoReflect.Descriptor instead. -func (*ListTopicSnapshotsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{14} -} - -func (x *ListTopicSnapshotsResponse) GetSnapshots() []string { - if x != nil { - return x.Snapshots - } - return nil -} - -func (x *ListTopicSnapshotsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request for the `DeleteTopic` method. -type DeleteTopicRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of the topic to delete. - // Format is `projects/{project}/topics/{topic}`. - Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` -} - -func (x *DeleteTopicRequest) Reset() { - *x = DeleteTopicRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteTopicRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteTopicRequest) ProtoMessage() {} - -func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead. -func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{15} -} - -func (x *DeleteTopicRequest) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -// Request for the DetachSubscription method. -type DetachSubscriptionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The subscription to detach. - // Format is `projects/{project}/subscriptions/{subscription}`. - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` -} - -func (x *DetachSubscriptionRequest) Reset() { - *x = DetachSubscriptionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DetachSubscriptionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DetachSubscriptionRequest) ProtoMessage() {} - -func (x *DetachSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DetachSubscriptionRequest.ProtoReflect.Descriptor instead. -func (*DetachSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{16} -} - -func (x *DetachSubscriptionRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -// Response for the DetachSubscription method. -// Reserved for future use. -type DetachSubscriptionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *DetachSubscriptionResponse) Reset() { - *x = DetachSubscriptionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DetachSubscriptionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DetachSubscriptionResponse) ProtoMessage() {} - -func (x *DetachSubscriptionResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DetachSubscriptionResponse.ProtoReflect.Descriptor instead. -func (*DetachSubscriptionResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{17} -} - -// A subscription resource. If none of `push_config`, `bigquery_config`, or -// `cloud_storage_config` is set, then the subscriber will pull and ack messages -// using API methods. At most one of these fields may be set. -type Subscription struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the subscription. It must have the format - // `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must - // start with a letter, and contain only letters (`[A-Za-z]`), numbers - // (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), - // plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters - // in length, and it must not start with `"goog"`. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The name of the topic from which this subscription is receiving - // messages. Format is `projects/{project}/topics/{topic}`. The value of this - // field will be `_deleted-topic_` if the topic has been deleted. - Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` - // Optional. If push delivery is used with this subscription, this field is - // used to configure it. - PushConfig *PushConfig `protobuf:"bytes,4,opt,name=push_config,json=pushConfig,proto3" json:"push_config,omitempty"` - // Optional. If delivery to BigQuery is used with this subscription, this - // field is used to configure it. - BigqueryConfig *BigQueryConfig `protobuf:"bytes,18,opt,name=bigquery_config,json=bigqueryConfig,proto3" json:"bigquery_config,omitempty"` - // Optional. If delivery to Google Cloud Storage is used with this - // subscription, this field is used to configure it. - CloudStorageConfig *CloudStorageConfig `protobuf:"bytes,22,opt,name=cloud_storage_config,json=cloudStorageConfig,proto3" json:"cloud_storage_config,omitempty"` - // Optional. The approximate amount of time (on a best-effort basis) Pub/Sub - // waits for the subscriber to acknowledge receipt before resending the - // message. In the interval after the message is delivered and before it is - // acknowledged, it is considered to be _outstanding_. During that time - // period, the message will not be redelivered (on a best-effort basis). - // - // For pull subscriptions, this value is used as the initial value for the ack - // deadline. To override this value for a given message, call - // `ModifyAckDeadline` with the corresponding `ack_id` if using - // non-streaming pull or send the `ack_id` in a - // `StreamingModifyAckDeadlineRequest` if using streaming pull. - // The minimum custom deadline you can specify is 10 seconds. - // The maximum custom deadline you can specify is 600 seconds (10 minutes). - // If this parameter is 0, a default value of 10 seconds is used. - // - // For push delivery, this value is also used to set the request timeout for - // the call to the push endpoint. - // - // If the subscriber never acknowledges the message, the Pub/Sub - // system will eventually redeliver the message. - AckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds,proto3" json:"ack_deadline_seconds,omitempty"` - // Optional. Indicates whether to retain acknowledged messages. If true, then - // messages are not expunged from the subscription's backlog, even if they are - // acknowledged, until they fall out of the `message_retention_duration` - // window. This must be true if you would like to [`Seek` to a timestamp] - // (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) in - // the past to replay previously-acknowledged messages. - RetainAckedMessages bool `protobuf:"varint,7,opt,name=retain_acked_messages,json=retainAckedMessages,proto3" json:"retain_acked_messages,omitempty"` - // Optional. How long to retain unacknowledged messages in the subscription's - // backlog, from the moment a message is published. If `retain_acked_messages` - // is true, then this also configures the retention of acknowledged messages, - // and thus configures how far back in time a `Seek` can be done. Defaults to - // 7 days. Cannot be more than 7 days or less than 10 minutes. - MessageRetentionDuration *durationpb.Duration `protobuf:"bytes,8,opt,name=message_retention_duration,json=messageRetentionDuration,proto3" json:"message_retention_duration,omitempty"` - // Optional. See [Creating and managing - // labels](https://cloud.google.com/pubsub/docs/labels). - Labels map[string]string `protobuf:"bytes,9,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Optional. If true, messages published with the same `ordering_key` in - // `PubsubMessage` will be delivered to the subscribers in the order in which - // they are received by the Pub/Sub system. Otherwise, they may be delivered - // in any order. - EnableMessageOrdering bool `protobuf:"varint,10,opt,name=enable_message_ordering,json=enableMessageOrdering,proto3" json:"enable_message_ordering,omitempty"` - // Optional. A policy that specifies the conditions for this subscription's - // expiration. A subscription is considered active as long as any connected - // subscriber is successfully consuming messages from the subscription or is - // issuing operations on the subscription. If `expiration_policy` is not set, - // a *default policy* with `ttl` of 31 days will be used. The minimum allowed - // value for `expiration_policy.ttl` is 1 day. If `expiration_policy` is set, - // but `expiration_policy.ttl` is not set, the subscription never expires. - ExpirationPolicy *ExpirationPolicy `protobuf:"bytes,11,opt,name=expiration_policy,json=expirationPolicy,proto3" json:"expiration_policy,omitempty"` - // Optional. An expression written in the Pub/Sub [filter - // language](https://cloud.google.com/pubsub/docs/filtering). If non-empty, - // then only `PubsubMessage`s whose `attributes` field matches the filter are - // delivered on this subscription. If empty, then no messages are filtered - // out. - Filter string `protobuf:"bytes,12,opt,name=filter,proto3" json:"filter,omitempty"` - // Optional. A policy that specifies the conditions for dead lettering - // messages in this subscription. If dead_letter_policy is not set, dead - // lettering is disabled. - // - // The Pub/Sub service account associated with this subscriptions's - // parent project (i.e., - // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have - // permission to Acknowledge() messages on this subscription. - DeadLetterPolicy *DeadLetterPolicy `protobuf:"bytes,13,opt,name=dead_letter_policy,json=deadLetterPolicy,proto3" json:"dead_letter_policy,omitempty"` - // Optional. A policy that specifies how Pub/Sub retries message delivery for - // this subscription. - // - // If not set, the default retry policy is applied. This generally implies - // that messages will be retried as soon as possible for healthy subscribers. - // RetryPolicy will be triggered on NACKs or acknowledgement deadline - // exceeded events for a given message. - RetryPolicy *RetryPolicy `protobuf:"bytes,14,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` - // Optional. Indicates whether the subscription is detached from its topic. - // Detached subscriptions don't receive messages from their topic and don't - // retain any backlog. `Pull` and `StreamingPull` requests will return - // FAILED_PRECONDITION. If the subscription is a push subscription, pushes to - // the endpoint will not be made. - Detached bool `protobuf:"varint,15,opt,name=detached,proto3" json:"detached,omitempty"` - // Optional. If true, Pub/Sub provides the following guarantees for the - // delivery of a message with a given value of `message_id` on this - // subscription: - // - // * The message sent to a subscriber is guaranteed not to be resent - // before the message's acknowledgement deadline expires. - // * An acknowledged message will not be resent to a subscriber. - // - // Note that subscribers may still receive multiple copies of a message - // when `enable_exactly_once_delivery` is true if the message was published - // multiple times by a publisher client. These copies are considered distinct - // by Pub/Sub and have distinct `message_id` values. - EnableExactlyOnceDelivery bool `protobuf:"varint,16,opt,name=enable_exactly_once_delivery,json=enableExactlyOnceDelivery,proto3" json:"enable_exactly_once_delivery,omitempty"` - // Output only. Indicates the minimum duration for which a message is retained - // after it is published to the subscription's topic. If this field is set, - // messages published to the subscription's topic in the last - // `topic_message_retention_duration` are always available to subscribers. See - // the `message_retention_duration` field in `Topic`. This field is set only - // in responses from the server; it is ignored if it is set in any requests. - TopicMessageRetentionDuration *durationpb.Duration `protobuf:"bytes,17,opt,name=topic_message_retention_duration,json=topicMessageRetentionDuration,proto3" json:"topic_message_retention_duration,omitempty"` - // Output only. An output-only field indicating whether or not the - // subscription can receive messages. - State Subscription_State `protobuf:"varint,19,opt,name=state,proto3,enum=google.pubsub.v1.Subscription_State" json:"state,omitempty"` -} - -func (x *Subscription) Reset() { - *x = Subscription{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Subscription) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Subscription) ProtoMessage() {} - -func (x *Subscription) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Subscription.ProtoReflect.Descriptor instead. -func (*Subscription) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18} -} - -func (x *Subscription) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Subscription) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *Subscription) GetPushConfig() *PushConfig { - if x != nil { - return x.PushConfig - } - return nil -} - -func (x *Subscription) GetBigqueryConfig() *BigQueryConfig { - if x != nil { - return x.BigqueryConfig - } - return nil -} - -func (x *Subscription) GetCloudStorageConfig() *CloudStorageConfig { - if x != nil { - return x.CloudStorageConfig - } - return nil -} - -func (x *Subscription) GetAckDeadlineSeconds() int32 { - if x != nil { - return x.AckDeadlineSeconds - } - return 0 -} - -func (x *Subscription) GetRetainAckedMessages() bool { - if x != nil { - return x.RetainAckedMessages - } - return false -} - -func (x *Subscription) GetMessageRetentionDuration() *durationpb.Duration { - if x != nil { - return x.MessageRetentionDuration - } - return nil -} - -func (x *Subscription) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *Subscription) GetEnableMessageOrdering() bool { - if x != nil { - return x.EnableMessageOrdering - } - return false -} - -func (x *Subscription) GetExpirationPolicy() *ExpirationPolicy { - if x != nil { - return x.ExpirationPolicy - } - return nil -} - -func (x *Subscription) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *Subscription) GetDeadLetterPolicy() *DeadLetterPolicy { - if x != nil { - return x.DeadLetterPolicy - } - return nil -} - -func (x *Subscription) GetRetryPolicy() *RetryPolicy { - if x != nil { - return x.RetryPolicy - } - return nil -} - -func (x *Subscription) GetDetached() bool { - if x != nil { - return x.Detached - } - return false -} - -func (x *Subscription) GetEnableExactlyOnceDelivery() bool { - if x != nil { - return x.EnableExactlyOnceDelivery - } - return false -} - -func (x *Subscription) GetTopicMessageRetentionDuration() *durationpb.Duration { - if x != nil { - return x.TopicMessageRetentionDuration - } - return nil -} - -func (x *Subscription) GetState() Subscription_State { - if x != nil { - return x.State - } - return Subscription_STATE_UNSPECIFIED -} - -// A policy that specifies how Pub/Sub retries message delivery. -// -// Retry delay will be exponential based on provided minimum and maximum -// backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. -// -// RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded -// events for a given message. -// -// Retry Policy is implemented on a best effort basis. At times, the delay -// between consecutive deliveries may not match the configuration. That is, -// delay can be more or less than configured backoff. -type RetryPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The minimum delay between consecutive deliveries of a given - // message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. - MinimumBackoff *durationpb.Duration `protobuf:"bytes,1,opt,name=minimum_backoff,json=minimumBackoff,proto3" json:"minimum_backoff,omitempty"` - // Optional. The maximum delay between consecutive deliveries of a given - // message. Value should be between 0 and 600 seconds. Defaults to 600 - // seconds. - MaximumBackoff *durationpb.Duration `protobuf:"bytes,2,opt,name=maximum_backoff,json=maximumBackoff,proto3" json:"maximum_backoff,omitempty"` -} - -func (x *RetryPolicy) Reset() { - *x = RetryPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RetryPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RetryPolicy) ProtoMessage() {} - -func (x *RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. -func (*RetryPolicy) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{19} -} - -func (x *RetryPolicy) GetMinimumBackoff() *durationpb.Duration { - if x != nil { - return x.MinimumBackoff - } - return nil -} - -func (x *RetryPolicy) GetMaximumBackoff() *durationpb.Duration { - if x != nil { - return x.MaximumBackoff - } - return nil -} - -// Dead lettering is done on a best effort basis. The same message might be -// dead lettered multiple times. -// -// If validation on any of the fields fails at subscription creation/updation, -// the create/update subscription request will fail. -type DeadLetterPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The name of the topic to which dead letter messages should be - // published. Format is `projects/{project}/topics/{topic}`.The Pub/Sub - // service account associated with the enclosing subscription's parent project - // (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must - // have permission to Publish() to this topic. - // - // The operation will fail if the topic does not exist. - // Users should ensure that there is a subscription attached to this topic - // since messages published to a topic with no subscriptions are lost. - DeadLetterTopic string `protobuf:"bytes,1,opt,name=dead_letter_topic,json=deadLetterTopic,proto3" json:"dead_letter_topic,omitempty"` - // Optional. The maximum number of delivery attempts for any message. The - // value must be between 5 and 100. - // - // The number of delivery attempts is defined as 1 + (the sum of number of - // NACKs and number of times the acknowledgement deadline has been exceeded - // for the message). - // - // A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that - // client libraries may automatically extend ack_deadlines. - // - // This field will be honored on a best effort basis. - // - // If this parameter is 0, a default value of 5 is used. - MaxDeliveryAttempts int32 `protobuf:"varint,2,opt,name=max_delivery_attempts,json=maxDeliveryAttempts,proto3" json:"max_delivery_attempts,omitempty"` -} - -func (x *DeadLetterPolicy) Reset() { - *x = DeadLetterPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeadLetterPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeadLetterPolicy) ProtoMessage() {} - -func (x *DeadLetterPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeadLetterPolicy.ProtoReflect.Descriptor instead. -func (*DeadLetterPolicy) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20} -} - -func (x *DeadLetterPolicy) GetDeadLetterTopic() string { - if x != nil { - return x.DeadLetterTopic - } - return "" -} - -func (x *DeadLetterPolicy) GetMaxDeliveryAttempts() int32 { - if x != nil { - return x.MaxDeliveryAttempts - } - return 0 -} - -// A policy that specifies the conditions for resource expiration (i.e., -// automatic resource deletion). -type ExpirationPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. Specifies the "time-to-live" duration for an associated resource. - // The resource expires if it is not active for a period of `ttl`. The - // definition of "activity" depends on the type of the associated resource. - // The minimum and maximum allowed values for `ttl` depend on the type of the - // associated resource, as well. If `ttl` is not set, the associated resource - // never expires. - Ttl *durationpb.Duration `protobuf:"bytes,1,opt,name=ttl,proto3" json:"ttl,omitempty"` -} - -func (x *ExpirationPolicy) Reset() { - *x = ExpirationPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExpirationPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExpirationPolicy) ProtoMessage() {} - -func (x *ExpirationPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExpirationPolicy.ProtoReflect.Descriptor instead. -func (*ExpirationPolicy) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{21} -} - -func (x *ExpirationPolicy) GetTtl() *durationpb.Duration { - if x != nil { - return x.Ttl - } - return nil -} - -// Configuration for a push delivery endpoint. -type PushConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. A URL locating the endpoint to which messages should be pushed. - // For example, a Webhook endpoint might use `https://example.com/push`. - PushEndpoint string `protobuf:"bytes,1,opt,name=push_endpoint,json=pushEndpoint,proto3" json:"push_endpoint,omitempty"` - // Optional. Endpoint configuration attributes that can be used to control - // different aspects of the message delivery. - // - // The only currently supported attribute is `x-goog-version`, which you can - // use to change the format of the pushed message. This attribute - // indicates the version of the data expected by the endpoint. This - // controls the shape of the pushed message (i.e., its fields and metadata). - // - // If not present during the `CreateSubscription` call, it will default to - // the version of the Pub/Sub API used to make such call. If not present in a - // `ModifyPushConfig` call, its value will not be changed. `GetSubscription` - // calls will always return a valid version, even if the subscription was - // created without this attribute. - // - // The only supported values for the `x-goog-version` attribute are: - // - // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. - // * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. - // - // For example: - // `attributes { "x-goog-version": "v1" }` - Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // An authentication method used by push endpoints to verify the source of - // push requests. This can be used with push endpoints that are private by - // default to allow requests only from the Pub/Sub system, for example. - // This field is optional and should be set only by users interested in - // authenticated push. - // - // Types that are assignable to AuthenticationMethod: - // - // *PushConfig_OidcToken_ - AuthenticationMethod isPushConfig_AuthenticationMethod `protobuf_oneof:"authentication_method"` - // The format of the delivered message to the push endpoint is defined by - // the chosen wrapper. When unset, `PubsubWrapper` is used. - // - // Types that are assignable to Wrapper: - // - // *PushConfig_PubsubWrapper_ - // *PushConfig_NoWrapper_ - Wrapper isPushConfig_Wrapper `protobuf_oneof:"wrapper"` -} - -func (x *PushConfig) Reset() { - *x = PushConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PushConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PushConfig) ProtoMessage() {} - -func (x *PushConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PushConfig.ProtoReflect.Descriptor instead. -func (*PushConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22} -} - -func (x *PushConfig) GetPushEndpoint() string { - if x != nil { - return x.PushEndpoint - } - return "" -} - -func (x *PushConfig) GetAttributes() map[string]string { - if x != nil { - return x.Attributes - } - return nil -} - -func (m *PushConfig) GetAuthenticationMethod() isPushConfig_AuthenticationMethod { - if m != nil { - return m.AuthenticationMethod - } - return nil -} - -func (x *PushConfig) GetOidcToken() *PushConfig_OidcToken { - if x, ok := x.GetAuthenticationMethod().(*PushConfig_OidcToken_); ok { - return x.OidcToken - } - return nil -} - -func (m *PushConfig) GetWrapper() isPushConfig_Wrapper { - if m != nil { - return m.Wrapper - } - return nil -} - -func (x *PushConfig) GetPubsubWrapper() *PushConfig_PubsubWrapper { - if x, ok := x.GetWrapper().(*PushConfig_PubsubWrapper_); ok { - return x.PubsubWrapper - } - return nil -} - -func (x *PushConfig) GetNoWrapper() *PushConfig_NoWrapper { - if x, ok := x.GetWrapper().(*PushConfig_NoWrapper_); ok { - return x.NoWrapper - } - return nil -} - -type isPushConfig_AuthenticationMethod interface { - isPushConfig_AuthenticationMethod() -} - -type PushConfig_OidcToken_ struct { - // Optional. If specified, Pub/Sub will generate and attach an OIDC JWT - // token as an `Authorization` header in the HTTP request for every pushed - // message. - OidcToken *PushConfig_OidcToken `protobuf:"bytes,3,opt,name=oidc_token,json=oidcToken,proto3,oneof"` -} - -func (*PushConfig_OidcToken_) isPushConfig_AuthenticationMethod() {} - -type isPushConfig_Wrapper interface { - isPushConfig_Wrapper() -} - -type PushConfig_PubsubWrapper_ struct { - // Optional. When set, the payload to the push endpoint is in the form of - // the JSON representation of a PubsubMessage - // (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). - PubsubWrapper *PushConfig_PubsubWrapper `protobuf:"bytes,4,opt,name=pubsub_wrapper,json=pubsubWrapper,proto3,oneof"` -} - -type PushConfig_NoWrapper_ struct { - // Optional. When set, the payload to the push endpoint is not wrapped. - NoWrapper *PushConfig_NoWrapper `protobuf:"bytes,5,opt,name=no_wrapper,json=noWrapper,proto3,oneof"` -} - -func (*PushConfig_PubsubWrapper_) isPushConfig_Wrapper() {} - -func (*PushConfig_NoWrapper_) isPushConfig_Wrapper() {} - -// Configuration for a BigQuery subscription. -type BigQueryConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The name of the table to which to write data, of the form - // {projectId}.{datasetId}.{tableId} - Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` - // Optional. When true, use the topic's schema as the columns to write to in - // BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be - // enabled at the same time. - UseTopicSchema bool `protobuf:"varint,2,opt,name=use_topic_schema,json=useTopicSchema,proto3" json:"use_topic_schema,omitempty"` - // Optional. When true, write the subscription name, message_id, publish_time, - // attributes, and ordering_key to additional columns in the table. The - // subscription name, message_id, and publish_time fields are put in their own - // columns while all other message properties (other than data) are written to - // a JSON object in the attributes column. - WriteMetadata bool `protobuf:"varint,3,opt,name=write_metadata,json=writeMetadata,proto3" json:"write_metadata,omitempty"` - // Optional. When true and use_topic_schema is true, any fields that are a - // part of the topic schema that are not part of the BigQuery table schema are - // dropped when writing to BigQuery. Otherwise, the schemas must be kept in - // sync and any messages with extra fields are not written and remain in the - // subscription's backlog. - DropUnknownFields bool `protobuf:"varint,4,opt,name=drop_unknown_fields,json=dropUnknownFields,proto3" json:"drop_unknown_fields,omitempty"` - // Output only. An output-only field that indicates whether or not the - // subscription can receive messages. - State BigQueryConfig_State `protobuf:"varint,5,opt,name=state,proto3,enum=google.pubsub.v1.BigQueryConfig_State" json:"state,omitempty"` - // Optional. When true, use the BigQuery table's schema as the columns to - // write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be - // enabled at the same time. - UseTableSchema bool `protobuf:"varint,6,opt,name=use_table_schema,json=useTableSchema,proto3" json:"use_table_schema,omitempty"` - // Optional. The service account to use to write to BigQuery. The subscription - // creator or updater that specifies this field must have - // `iam.serviceAccounts.actAs` permission on the service account. If not - // specified, the Pub/Sub [service - // agent](https://cloud.google.com/iam/docs/service-agents), - // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. - ServiceAccountEmail string `protobuf:"bytes,7,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` -} - -func (x *BigQueryConfig) Reset() { - *x = BigQueryConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BigQueryConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BigQueryConfig) ProtoMessage() {} - -func (x *BigQueryConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BigQueryConfig.ProtoReflect.Descriptor instead. -func (*BigQueryConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23} -} - -func (x *BigQueryConfig) GetTable() string { - if x != nil { - return x.Table - } - return "" -} - -func (x *BigQueryConfig) GetUseTopicSchema() bool { - if x != nil { - return x.UseTopicSchema - } - return false -} - -func (x *BigQueryConfig) GetWriteMetadata() bool { - if x != nil { - return x.WriteMetadata - } - return false -} - -func (x *BigQueryConfig) GetDropUnknownFields() bool { - if x != nil { - return x.DropUnknownFields - } - return false -} - -func (x *BigQueryConfig) GetState() BigQueryConfig_State { - if x != nil { - return x.State - } - return BigQueryConfig_STATE_UNSPECIFIED -} - -func (x *BigQueryConfig) GetUseTableSchema() bool { - if x != nil { - return x.UseTableSchema - } - return false -} - -func (x *BigQueryConfig) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -// Configuration for a Cloud Storage subscription. -type CloudStorageConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. User-provided name for the Cloud Storage bucket. - // The bucket must be created by the user. The bucket name must be without - // any prefix like "gs://". See the [bucket naming - // requirements] (https://cloud.google.com/storage/docs/buckets#naming). - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Optional. User-provided prefix for Cloud Storage filename. See the [object - // naming requirements](https://cloud.google.com/storage/docs/objects#naming). - FilenamePrefix string `protobuf:"bytes,2,opt,name=filename_prefix,json=filenamePrefix,proto3" json:"filename_prefix,omitempty"` - // Optional. User-provided suffix for Cloud Storage filename. See the [object - // naming requirements](https://cloud.google.com/storage/docs/objects#naming). - // Must not end in "/". - FilenameSuffix string `protobuf:"bytes,3,opt,name=filename_suffix,json=filenameSuffix,proto3" json:"filename_suffix,omitempty"` - // Optional. User-provided format string specifying how to represent datetimes - // in Cloud Storage filenames. See the [datetime format - // guidance](https://cloud.google.com/pubsub/docs/create-cloudstorage-subscription#file_names). - FilenameDatetimeFormat string `protobuf:"bytes,10,opt,name=filename_datetime_format,json=filenameDatetimeFormat,proto3" json:"filename_datetime_format,omitempty"` - // Defaults to text format. - // - // Types that are assignable to OutputFormat: - // - // *CloudStorageConfig_TextConfig_ - // *CloudStorageConfig_AvroConfig_ - OutputFormat isCloudStorageConfig_OutputFormat `protobuf_oneof:"output_format"` - // Optional. The maximum duration that can elapse before a new Cloud Storage - // file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not - // exceed the subscription's acknowledgement deadline. - MaxDuration *durationpb.Duration `protobuf:"bytes,6,opt,name=max_duration,json=maxDuration,proto3" json:"max_duration,omitempty"` - // Optional. The maximum bytes that can be written to a Cloud Storage file - // before a new file is created. Min 1 KB, max 10 GiB. The max_bytes limit may - // be exceeded in cases where messages are larger than the limit. - MaxBytes int64 `protobuf:"varint,7,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` - // Optional. The maximum number of messages that can be written to a Cloud - // Storage file before a new file is created. Min 1000 messages. - MaxMessages int64 `protobuf:"varint,8,opt,name=max_messages,json=maxMessages,proto3" json:"max_messages,omitempty"` - // Output only. An output-only field that indicates whether or not the - // subscription can receive messages. - State CloudStorageConfig_State `protobuf:"varint,9,opt,name=state,proto3,enum=google.pubsub.v1.CloudStorageConfig_State" json:"state,omitempty"` - // Optional. The service account to use to write to Cloud Storage. The - // subscription creator or updater that specifies this field must have - // `iam.serviceAccounts.actAs` permission on the service account. If not - // specified, the Pub/Sub - // [service agent](https://cloud.google.com/iam/docs/service-agents), - // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. - ServiceAccountEmail string `protobuf:"bytes,11,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` -} - -func (x *CloudStorageConfig) Reset() { - *x = CloudStorageConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CloudStorageConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloudStorageConfig) ProtoMessage() {} - -func (x *CloudStorageConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloudStorageConfig.ProtoReflect.Descriptor instead. -func (*CloudStorageConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24} -} - -func (x *CloudStorageConfig) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *CloudStorageConfig) GetFilenamePrefix() string { - if x != nil { - return x.FilenamePrefix - } - return "" -} - -func (x *CloudStorageConfig) GetFilenameSuffix() string { - if x != nil { - return x.FilenameSuffix - } - return "" -} - -func (x *CloudStorageConfig) GetFilenameDatetimeFormat() string { - if x != nil { - return x.FilenameDatetimeFormat - } - return "" -} - -func (m *CloudStorageConfig) GetOutputFormat() isCloudStorageConfig_OutputFormat { - if m != nil { - return m.OutputFormat - } - return nil -} - -func (x *CloudStorageConfig) GetTextConfig() *CloudStorageConfig_TextConfig { - if x, ok := x.GetOutputFormat().(*CloudStorageConfig_TextConfig_); ok { - return x.TextConfig - } - return nil -} - -func (x *CloudStorageConfig) GetAvroConfig() *CloudStorageConfig_AvroConfig { - if x, ok := x.GetOutputFormat().(*CloudStorageConfig_AvroConfig_); ok { - return x.AvroConfig - } - return nil -} - -func (x *CloudStorageConfig) GetMaxDuration() *durationpb.Duration { - if x != nil { - return x.MaxDuration - } - return nil -} - -func (x *CloudStorageConfig) GetMaxBytes() int64 { - if x != nil { - return x.MaxBytes - } - return 0 -} - -func (x *CloudStorageConfig) GetMaxMessages() int64 { - if x != nil { - return x.MaxMessages - } - return 0 -} - -func (x *CloudStorageConfig) GetState() CloudStorageConfig_State { - if x != nil { - return x.State - } - return CloudStorageConfig_STATE_UNSPECIFIED -} - -func (x *CloudStorageConfig) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -type isCloudStorageConfig_OutputFormat interface { - isCloudStorageConfig_OutputFormat() -} - -type CloudStorageConfig_TextConfig_ struct { - // Optional. If set, message data will be written to Cloud Storage in text - // format. - TextConfig *CloudStorageConfig_TextConfig `protobuf:"bytes,4,opt,name=text_config,json=textConfig,proto3,oneof"` -} - -type CloudStorageConfig_AvroConfig_ struct { - // Optional. If set, message data will be written to Cloud Storage in Avro - // format. - AvroConfig *CloudStorageConfig_AvroConfig `protobuf:"bytes,5,opt,name=avro_config,json=avroConfig,proto3,oneof"` -} - -func (*CloudStorageConfig_TextConfig_) isCloudStorageConfig_OutputFormat() {} - -func (*CloudStorageConfig_AvroConfig_) isCloudStorageConfig_OutputFormat() {} - -// A message and its corresponding acknowledgment ID. -type ReceivedMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. This ID can be used to acknowledge the received message. - AckId string `protobuf:"bytes,1,opt,name=ack_id,json=ackId,proto3" json:"ack_id,omitempty"` - // Optional. The message. - Message *PubsubMessage `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // Optional. The approximate number of times that Pub/Sub has attempted to - // deliver the associated message to a subscriber. - // - // More precisely, this is 1 + (number of NACKs) + - // (number of ack_deadline exceeds) for this message. - // - // A NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline - // exceeds event is whenever a message is not acknowledged within - // ack_deadline. Note that ack_deadline is initially - // Subscription.ackDeadlineSeconds, but may get extended automatically by - // the client library. - // - // Upon the first delivery of a given message, `delivery_attempt` will have a - // value of 1. The value is calculated at best effort and is approximate. - // - // If a DeadLetterPolicy is not set on the subscription, this will be 0. - DeliveryAttempt int32 `protobuf:"varint,3,opt,name=delivery_attempt,json=deliveryAttempt,proto3" json:"delivery_attempt,omitempty"` -} - -func (x *ReceivedMessage) Reset() { - *x = ReceivedMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReceivedMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReceivedMessage) ProtoMessage() {} - -func (x *ReceivedMessage) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReceivedMessage.ProtoReflect.Descriptor instead. -func (*ReceivedMessage) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25} -} - -func (x *ReceivedMessage) GetAckId() string { - if x != nil { - return x.AckId - } - return "" -} - -func (x *ReceivedMessage) GetMessage() *PubsubMessage { - if x != nil { - return x.Message - } - return nil -} - -func (x *ReceivedMessage) GetDeliveryAttempt() int32 { - if x != nil { - return x.DeliveryAttempt - } - return 0 -} - -// Request for the GetSubscription method. -type GetSubscriptionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the subscription to get. - // Format is `projects/{project}/subscriptions/{sub}`. - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` -} - -func (x *GetSubscriptionRequest) Reset() { - *x = GetSubscriptionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetSubscriptionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSubscriptionRequest) ProtoMessage() {} - -func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSubscriptionRequest.ProtoReflect.Descriptor instead. -func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26} -} - -func (x *GetSubscriptionRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -// Request for the UpdateSubscription method. -type UpdateSubscriptionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The updated subscription object. - Subscription *Subscription `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` - // Required. Indicates which fields in the provided subscription to update. - // Must be specified and non-empty. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateSubscriptionRequest) Reset() { - *x = UpdateSubscriptionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateSubscriptionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateSubscriptionRequest) ProtoMessage() {} - -func (x *UpdateSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateSubscriptionRequest.ProtoReflect.Descriptor instead. -func (*UpdateSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27} -} - -func (x *UpdateSubscriptionRequest) GetSubscription() *Subscription { - if x != nil { - return x.Subscription - } - return nil -} - -func (x *UpdateSubscriptionRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Request for the `ListSubscriptions` method. -type ListSubscriptionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the project in which to list subscriptions. - // Format is `projects/{project-id}`. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // Optional. Maximum number of subscriptions to return. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // Optional. The value returned by the last `ListSubscriptionsResponse`; - // indicates that this is a continuation of a prior `ListSubscriptions` call, - // and that the system should return the next page of data. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListSubscriptionsRequest) Reset() { - *x = ListSubscriptionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListSubscriptionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSubscriptionsRequest) ProtoMessage() {} - -func (x *ListSubscriptionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSubscriptionsRequest.ProtoReflect.Descriptor instead. -func (*ListSubscriptionsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28} -} - -func (x *ListSubscriptionsRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *ListSubscriptionsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListSubscriptionsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// Response for the `ListSubscriptions` method. -type ListSubscriptionsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The subscriptions that match the request. - Subscriptions []*Subscription `protobuf:"bytes,1,rep,name=subscriptions,proto3" json:"subscriptions,omitempty"` - // Optional. If not empty, indicates that there may be more subscriptions that - // match the request; this value should be passed in a new - // `ListSubscriptionsRequest` to get more subscriptions. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListSubscriptionsResponse) Reset() { - *x = ListSubscriptionsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListSubscriptionsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSubscriptionsResponse) ProtoMessage() {} - -func (x *ListSubscriptionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSubscriptionsResponse.ProtoReflect.Descriptor instead. -func (*ListSubscriptionsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{29} -} - -func (x *ListSubscriptionsResponse) GetSubscriptions() []*Subscription { - if x != nil { - return x.Subscriptions - } - return nil -} - -func (x *ListSubscriptionsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request for the DeleteSubscription method. -type DeleteSubscriptionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The subscription to delete. - // Format is `projects/{project}/subscriptions/{sub}`. - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` -} - -func (x *DeleteSubscriptionRequest) Reset() { - *x = DeleteSubscriptionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteSubscriptionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteSubscriptionRequest) ProtoMessage() {} - -func (x *DeleteSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteSubscriptionRequest.ProtoReflect.Descriptor instead. -func (*DeleteSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{30} -} - -func (x *DeleteSubscriptionRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -// Request for the ModifyPushConfig method. -type ModifyPushConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the subscription. - // Format is `projects/{project}/subscriptions/{sub}`. - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` - // Required. The push configuration for future deliveries. - // - // An empty `pushConfig` indicates that the Pub/Sub system should - // stop pushing messages from the given subscription and allow - // messages to be pulled and acknowledged - effectively pausing - // the subscription if `Pull` or `StreamingPull` is not called. - PushConfig *PushConfig `protobuf:"bytes,2,opt,name=push_config,json=pushConfig,proto3" json:"push_config,omitempty"` -} - -func (x *ModifyPushConfigRequest) Reset() { - *x = ModifyPushConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ModifyPushConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ModifyPushConfigRequest) ProtoMessage() {} - -func (x *ModifyPushConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ModifyPushConfigRequest.ProtoReflect.Descriptor instead. -func (*ModifyPushConfigRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{31} -} - -func (x *ModifyPushConfigRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -func (x *ModifyPushConfigRequest) GetPushConfig() *PushConfig { - if x != nil { - return x.PushConfig - } - return nil -} - -// Request for the `Pull` method. -type PullRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The subscription from which messages should be pulled. - // Format is `projects/{project}/subscriptions/{sub}`. - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` - // Optional. If this field set to true, the system will respond immediately - // even if it there are no messages available to return in the `Pull` - // response. Otherwise, the system may wait (for a bounded amount of time) - // until at least one message is available, rather than returning no messages. - // Warning: setting this field to `true` is discouraged because it adversely - // impacts the performance of `Pull` operations. We recommend that users do - // not set this field. - // - // Deprecated: Marked as deprecated in google/pubsub/v1/pubsub.proto. - ReturnImmediately bool `protobuf:"varint,2,opt,name=return_immediately,json=returnImmediately,proto3" json:"return_immediately,omitempty"` - // Required. The maximum number of messages to return for this request. Must - // be a positive integer. The Pub/Sub system may return fewer than the number - // specified. - MaxMessages int32 `protobuf:"varint,3,opt,name=max_messages,json=maxMessages,proto3" json:"max_messages,omitempty"` -} - -func (x *PullRequest) Reset() { - *x = PullRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PullRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PullRequest) ProtoMessage() {} - -func (x *PullRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PullRequest.ProtoReflect.Descriptor instead. -func (*PullRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{32} -} - -func (x *PullRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -// Deprecated: Marked as deprecated in google/pubsub/v1/pubsub.proto. -func (x *PullRequest) GetReturnImmediately() bool { - if x != nil { - return x.ReturnImmediately - } - return false -} - -func (x *PullRequest) GetMaxMessages() int32 { - if x != nil { - return x.MaxMessages - } - return 0 -} - -// Response for the `Pull` method. -type PullResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. Received Pub/Sub messages. The list will be empty if there are no - // more messages available in the backlog, or if no messages could be returned - // before the request timeout. For JSON, the response can be entirely - // empty. The Pub/Sub system may return fewer than the `maxMessages` requested - // even if there are more messages available in the backlog. - ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages,proto3" json:"received_messages,omitempty"` -} - -func (x *PullResponse) Reset() { - *x = PullResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PullResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PullResponse) ProtoMessage() {} - -func (x *PullResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PullResponse.ProtoReflect.Descriptor instead. -func (*PullResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{33} -} - -func (x *PullResponse) GetReceivedMessages() []*ReceivedMessage { - if x != nil { - return x.ReceivedMessages - } - return nil -} - -// Request for the ModifyAckDeadline method. -type ModifyAckDeadlineRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the subscription. - // Format is `projects/{project}/subscriptions/{sub}`. - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` - // Required. List of acknowledgment IDs. - AckIds []string `protobuf:"bytes,4,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` - // Required. The new ack deadline with respect to the time this request was - // sent to the Pub/Sub system. For example, if the value is 10, the new ack - // deadline will expire 10 seconds after the `ModifyAckDeadline` call was - // made. Specifying zero might immediately make the message available for - // delivery to another subscriber client. This typically results in an - // increase in the rate of message redeliveries (that is, duplicates). - // The minimum deadline you can specify is 0 seconds. - // The maximum deadline you can specify in a single request is 600 seconds - // (10 minutes). - AckDeadlineSeconds int32 `protobuf:"varint,3,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds,proto3" json:"ack_deadline_seconds,omitempty"` -} - -func (x *ModifyAckDeadlineRequest) Reset() { - *x = ModifyAckDeadlineRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ModifyAckDeadlineRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ModifyAckDeadlineRequest) ProtoMessage() {} - -func (x *ModifyAckDeadlineRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ModifyAckDeadlineRequest.ProtoReflect.Descriptor instead. -func (*ModifyAckDeadlineRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34} -} - -func (x *ModifyAckDeadlineRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -func (x *ModifyAckDeadlineRequest) GetAckIds() []string { - if x != nil { - return x.AckIds - } - return nil -} - -func (x *ModifyAckDeadlineRequest) GetAckDeadlineSeconds() int32 { - if x != nil { - return x.AckDeadlineSeconds - } - return 0 -} - -// Request for the Acknowledge method. -type AcknowledgeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The subscription whose message is being acknowledged. - // Format is `projects/{project}/subscriptions/{sub}`. - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` - // Required. The acknowledgment ID for the messages being acknowledged that - // was returned by the Pub/Sub system in the `Pull` response. Must not be - // empty. - AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` -} - -func (x *AcknowledgeRequest) Reset() { - *x = AcknowledgeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AcknowledgeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AcknowledgeRequest) ProtoMessage() {} - -func (x *AcknowledgeRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AcknowledgeRequest.ProtoReflect.Descriptor instead. -func (*AcknowledgeRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35} -} - -func (x *AcknowledgeRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -func (x *AcknowledgeRequest) GetAckIds() []string { - if x != nil { - return x.AckIds - } - return nil -} - -// Request for the `StreamingPull` streaming RPC method. This request is used to -// establish the initial stream as well as to stream acknowledgements and ack -// deadline modifications from the client to the server. -type StreamingPullRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The subscription for which to initialize the new stream. This - // must be provided in the first request on the stream, and must not be set in - // subsequent requests from client to server. - // Format is `projects/{project}/subscriptions/{sub}`. - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` - // Optional. List of acknowledgement IDs for acknowledging previously received - // messages (received on this stream or a different stream). If an ack ID has - // expired, the corresponding message may be redelivered later. Acknowledging - // a message more than once will not result in an error. If the - // acknowledgement ID is malformed, the stream will be aborted with status - // `INVALID_ARGUMENT`. - AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` - // Optional. The list of new ack deadlines for the IDs listed in - // `modify_deadline_ack_ids`. The size of this list must be the same as the - // size of `modify_deadline_ack_ids`. If it differs the stream will be aborted - // with `INVALID_ARGUMENT`. Each element in this list is applied to the - // element in the same position in `modify_deadline_ack_ids`. The new ack - // deadline is with respect to the time this request was sent to the Pub/Sub - // system. Must be >= 0. For example, if the value is 10, the new ack deadline - // will expire 10 seconds after this request is received. If the value is 0, - // the message is immediately made available for another streaming or - // non-streaming pull request. If the value is < 0 (an error), the stream will - // be aborted with status `INVALID_ARGUMENT`. - ModifyDeadlineSeconds []int32 `protobuf:"varint,3,rep,packed,name=modify_deadline_seconds,json=modifyDeadlineSeconds,proto3" json:"modify_deadline_seconds,omitempty"` - // Optional. List of acknowledgement IDs whose deadline will be modified based - // on the corresponding element in `modify_deadline_seconds`. This field can - // be used to indicate that more time is needed to process a message by the - // subscriber, or to make the message available for redelivery if the - // processing was interrupted. - ModifyDeadlineAckIds []string `protobuf:"bytes,4,rep,name=modify_deadline_ack_ids,json=modifyDeadlineAckIds,proto3" json:"modify_deadline_ack_ids,omitempty"` - // Required. The ack deadline to use for the stream. This must be provided in - // the first request on the stream, but it can also be updated on subsequent - // requests from client to server. The minimum deadline you can specify is 10 - // seconds. The maximum deadline you can specify is 600 seconds (10 minutes). - StreamAckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=stream_ack_deadline_seconds,json=streamAckDeadlineSeconds,proto3" json:"stream_ack_deadline_seconds,omitempty"` - // Optional. A unique identifier that is used to distinguish client instances - // from each other. Only needs to be provided on the initial request. When a - // stream disconnects and reconnects for the same stream, the client_id should - // be set to the same value so that state associated with the old stream can - // be transferred to the new stream. The same client_id should not be used for - // different client instances. - ClientId string `protobuf:"bytes,6,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - // Optional. Flow control settings for the maximum number of outstanding - // messages. When there are `max_outstanding_messages` currently sent to the - // streaming pull client that have not yet been acked or nacked, the server - // stops sending more messages. The sending of messages resumes once the - // number of outstanding messages is less than this value. If the value is - // <= 0, there is no limit to the number of outstanding messages. This - // property can only be set on the initial StreamingPullRequest. If it is set - // on a subsequent request, the stream will be aborted with status - // `INVALID_ARGUMENT`. - MaxOutstandingMessages int64 `protobuf:"varint,7,opt,name=max_outstanding_messages,json=maxOutstandingMessages,proto3" json:"max_outstanding_messages,omitempty"` - // Optional. Flow control settings for the maximum number of outstanding - // bytes. When there are `max_outstanding_bytes` or more worth of messages - // currently sent to the streaming pull client that have not yet been acked or - // nacked, the server will stop sending more messages. The sending of messages - // resumes once the number of outstanding bytes is less than this value. If - // the value is <= 0, there is no limit to the number of outstanding bytes. - // This property can only be set on the initial StreamingPullRequest. If it is - // set on a subsequent request, the stream will be aborted with status - // `INVALID_ARGUMENT`. - MaxOutstandingBytes int64 `protobuf:"varint,8,opt,name=max_outstanding_bytes,json=maxOutstandingBytes,proto3" json:"max_outstanding_bytes,omitempty"` -} - -func (x *StreamingPullRequest) Reset() { - *x = StreamingPullRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingPullRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingPullRequest) ProtoMessage() {} - -func (x *StreamingPullRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingPullRequest.ProtoReflect.Descriptor instead. -func (*StreamingPullRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{36} -} - -func (x *StreamingPullRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -func (x *StreamingPullRequest) GetAckIds() []string { - if x != nil { - return x.AckIds - } - return nil -} - -func (x *StreamingPullRequest) GetModifyDeadlineSeconds() []int32 { - if x != nil { - return x.ModifyDeadlineSeconds - } - return nil -} - -func (x *StreamingPullRequest) GetModifyDeadlineAckIds() []string { - if x != nil { - return x.ModifyDeadlineAckIds - } - return nil -} - -func (x *StreamingPullRequest) GetStreamAckDeadlineSeconds() int32 { - if x != nil { - return x.StreamAckDeadlineSeconds - } - return 0 -} - -func (x *StreamingPullRequest) GetClientId() string { - if x != nil { - return x.ClientId - } - return "" -} - -func (x *StreamingPullRequest) GetMaxOutstandingMessages() int64 { - if x != nil { - return x.MaxOutstandingMessages - } - return 0 -} - -func (x *StreamingPullRequest) GetMaxOutstandingBytes() int64 { - if x != nil { - return x.MaxOutstandingBytes - } - return 0 -} - -// Response for the `StreamingPull` method. This response is used to stream -// messages from the server to the client. -type StreamingPullResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. Received Pub/Sub messages. This will not be empty. - ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages,proto3" json:"received_messages,omitempty"` - // Optional. This field will only be set if `enable_exactly_once_delivery` is - // set to `true`. - AcknowledgeConfirmation *StreamingPullResponse_AcknowledgeConfirmation `protobuf:"bytes,5,opt,name=acknowledge_confirmation,json=acknowledgeConfirmation,proto3" json:"acknowledge_confirmation,omitempty"` - // Optional. This field will only be set if `enable_exactly_once_delivery` is - // set to `true`. - ModifyAckDeadlineConfirmation *StreamingPullResponse_ModifyAckDeadlineConfirmation `protobuf:"bytes,3,opt,name=modify_ack_deadline_confirmation,json=modifyAckDeadlineConfirmation,proto3" json:"modify_ack_deadline_confirmation,omitempty"` - // Optional. Properties associated with this subscription. - SubscriptionProperties *StreamingPullResponse_SubscriptionProperties `protobuf:"bytes,4,opt,name=subscription_properties,json=subscriptionProperties,proto3" json:"subscription_properties,omitempty"` -} - -func (x *StreamingPullResponse) Reset() { - *x = StreamingPullResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingPullResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingPullResponse) ProtoMessage() {} - -func (x *StreamingPullResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingPullResponse.ProtoReflect.Descriptor instead. -func (*StreamingPullResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37} -} - -func (x *StreamingPullResponse) GetReceivedMessages() []*ReceivedMessage { - if x != nil { - return x.ReceivedMessages - } - return nil -} - -func (x *StreamingPullResponse) GetAcknowledgeConfirmation() *StreamingPullResponse_AcknowledgeConfirmation { - if x != nil { - return x.AcknowledgeConfirmation - } - return nil -} - -func (x *StreamingPullResponse) GetModifyAckDeadlineConfirmation() *StreamingPullResponse_ModifyAckDeadlineConfirmation { - if x != nil { - return x.ModifyAckDeadlineConfirmation - } - return nil -} - -func (x *StreamingPullResponse) GetSubscriptionProperties() *StreamingPullResponse_SubscriptionProperties { - if x != nil { - return x.SubscriptionProperties - } - return nil -} - -// Request for the `CreateSnapshot` method. -type CreateSnapshotRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. User-provided name for this snapshot. If the name is not provided - // in the request, the server will assign a random name for this snapshot on - // the same project as the subscription. Note that for REST API requests, you - // must specify a name. See the [resource name - // rules](https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). - // Format is `projects/{project}/snapshots/{snap}`. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The subscription whose backlog the snapshot retains. - // Specifically, the created snapshot is guaranteed to retain: - // - // (a) The existing backlog on the subscription. More precisely, this is - // defined as the messages in the subscription's backlog that are - // unacknowledged upon the successful completion of the - // `CreateSnapshot` request; as well as: - // (b) Any messages published to the subscription's topic following the - // successful completion of the CreateSnapshot request. - // - // Format is `projects/{project}/subscriptions/{sub}`. - Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` - // Optional. See [Creating and managing - // labels](https://cloud.google.com/pubsub/docs/labels). - Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *CreateSnapshotRequest) Reset() { - *x = CreateSnapshotRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateSnapshotRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateSnapshotRequest) ProtoMessage() {} - -func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateSnapshotRequest.ProtoReflect.Descriptor instead. -func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{38} -} - -func (x *CreateSnapshotRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CreateSnapshotRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -func (x *CreateSnapshotRequest) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -// Request for the UpdateSnapshot method. -type UpdateSnapshotRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The updated snapshot object. - Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - // Required. Indicates which fields in the provided snapshot to update. - // Must be specified and non-empty. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateSnapshotRequest) Reset() { - *x = UpdateSnapshotRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateSnapshotRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateSnapshotRequest) ProtoMessage() {} - -func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateSnapshotRequest.ProtoReflect.Descriptor instead. -func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39} -} - -func (x *UpdateSnapshotRequest) GetSnapshot() *Snapshot { - if x != nil { - return x.Snapshot - } - return nil -} - -func (x *UpdateSnapshotRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// A snapshot resource. Snapshots are used in -// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow you to manage message acknowledgments in bulk. That -// is, you can set the acknowledgment state of messages in an existing -// subscription to the state captured by a snapshot. -type Snapshot struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The name of the snapshot. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Optional. The name of the topic from which this snapshot is retaining - // messages. - Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` - // Optional. The snapshot is guaranteed to exist up until this time. - // A newly-created snapshot expires no later than 7 days from the time of its - // creation. Its exact lifetime is determined at creation by the existing - // backlog in the source subscription. Specifically, the lifetime of the - // snapshot is `7 days - (age of oldest unacked message in the subscription)`. - // For example, consider a subscription whose oldest unacked message is 3 days - // old. If a snapshot is created from this subscription, the snapshot -- which - // will always capture this 3-day-old backlog as long as the snapshot - // exists -- will expire in 4 days. The service will refuse to create a - // snapshot that would expire in less than 1 hour after creation. - ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` - // Optional. See [Creating and managing labels] - // (https://cloud.google.com/pubsub/docs/labels). - Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *Snapshot) Reset() { - *x = Snapshot{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Snapshot) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Snapshot) ProtoMessage() {} - -func (x *Snapshot) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Snapshot.ProtoReflect.Descriptor instead. -func (*Snapshot) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{40} -} - -func (x *Snapshot) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Snapshot) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *Snapshot) GetExpireTime() *timestamppb.Timestamp { - if x != nil { - return x.ExpireTime - } - return nil -} - -func (x *Snapshot) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -// Request for the GetSnapshot method. -type GetSnapshotRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the snapshot to get. - // Format is `projects/{project}/snapshots/{snap}`. - Snapshot string `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` -} - -func (x *GetSnapshotRequest) Reset() { - *x = GetSnapshotRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetSnapshotRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSnapshotRequest) ProtoMessage() {} - -func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSnapshotRequest.ProtoReflect.Descriptor instead. -func (*GetSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41} -} - -func (x *GetSnapshotRequest) GetSnapshot() string { - if x != nil { - return x.Snapshot - } - return "" -} - -// Request for the `ListSnapshots` method. -type ListSnapshotsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the project in which to list snapshots. - // Format is `projects/{project-id}`. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // Optional. Maximum number of snapshots to return. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // Optional. The value returned by the last `ListSnapshotsResponse`; indicates - // that this is a continuation of a prior `ListSnapshots` call, and that the - // system should return the next page of data. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListSnapshotsRequest) Reset() { - *x = ListSnapshotsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListSnapshotsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSnapshotsRequest) ProtoMessage() {} - -func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. -func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{42} -} - -func (x *ListSnapshotsRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *ListSnapshotsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListSnapshotsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// Response for the `ListSnapshots` method. -type ListSnapshotsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The resulting snapshots. - Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` - // Optional. If not empty, indicates that there may be more snapshot that - // match the request; this value should be passed in a new - // `ListSnapshotsRequest`. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListSnapshotsResponse) Reset() { - *x = ListSnapshotsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListSnapshotsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSnapshotsResponse) ProtoMessage() {} - -func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. -func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{43} -} - -func (x *ListSnapshotsResponse) GetSnapshots() []*Snapshot { - if x != nil { - return x.Snapshots - } - return nil -} - -func (x *ListSnapshotsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request for the `DeleteSnapshot` method. -type DeleteSnapshotRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the snapshot to delete. - // Format is `projects/{project}/snapshots/{snap}`. - Snapshot string `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` -} - -func (x *DeleteSnapshotRequest) Reset() { - *x = DeleteSnapshotRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteSnapshotRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteSnapshotRequest) ProtoMessage() {} - -func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. -func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{44} -} - -func (x *DeleteSnapshotRequest) GetSnapshot() string { - if x != nil { - return x.Snapshot - } - return "" -} - -// Request for the `Seek` method. -type SeekRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The subscription to affect. - Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` - // Types that are assignable to Target: - // - // *SeekRequest_Time - // *SeekRequest_Snapshot - Target isSeekRequest_Target `protobuf_oneof:"target"` -} - -func (x *SeekRequest) Reset() { - *x = SeekRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SeekRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SeekRequest) ProtoMessage() {} - -func (x *SeekRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SeekRequest.ProtoReflect.Descriptor instead. -func (*SeekRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{45} -} - -func (x *SeekRequest) GetSubscription() string { - if x != nil { - return x.Subscription - } - return "" -} - -func (m *SeekRequest) GetTarget() isSeekRequest_Target { - if m != nil { - return m.Target - } - return nil -} - -func (x *SeekRequest) GetTime() *timestamppb.Timestamp { - if x, ok := x.GetTarget().(*SeekRequest_Time); ok { - return x.Time - } - return nil -} - -func (x *SeekRequest) GetSnapshot() string { - if x, ok := x.GetTarget().(*SeekRequest_Snapshot); ok { - return x.Snapshot - } - return "" -} - -type isSeekRequest_Target interface { - isSeekRequest_Target() -} - -type SeekRequest_Time struct { - // Optional. The time to seek to. - // Messages retained in the subscription that were published before this - // time are marked as acknowledged, and messages retained in the - // subscription that were published after this time are marked as - // unacknowledged. Note that this operation affects only those messages - // retained in the subscription (configured by the combination of - // `message_retention_duration` and `retain_acked_messages`). For example, - // if `time` corresponds to a point before the message retention - // window (or to a point before the system's notion of the subscription - // creation time), only retained messages will be marked as unacknowledged, - // and already-expunged messages will not be restored. - Time *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=time,proto3,oneof"` -} - -type SeekRequest_Snapshot struct { - // Optional. The snapshot to seek to. The snapshot's topic must be the same - // as that of the provided subscription. Format is - // `projects/{project}/snapshots/{snap}`. - Snapshot string `protobuf:"bytes,3,opt,name=snapshot,proto3,oneof"` -} - -func (*SeekRequest_Time) isSeekRequest_Target() {} - -func (*SeekRequest_Snapshot) isSeekRequest_Target() {} - -// Response for the `Seek` method (this response is empty). -type SeekResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *SeekResponse) Reset() { - *x = SeekResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SeekResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SeekResponse) ProtoMessage() {} - -func (x *SeekResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SeekResponse.ProtoReflect.Descriptor instead. -func (*SeekResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{46} -} - -// Ingestion settings for Amazon Kinesis Data Streams. -type IngestionDataSourceSettings_AwsKinesis struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Output only. An output-only field that indicates the state of the Kinesis - // ingestion source. - State IngestionDataSourceSettings_AwsKinesis_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.pubsub.v1.IngestionDataSourceSettings_AwsKinesis_State" json:"state,omitempty"` - // Required. The Kinesis stream ARN to ingest data from. - StreamArn string `protobuf:"bytes,2,opt,name=stream_arn,json=streamArn,proto3" json:"stream_arn,omitempty"` - // Required. The Kinesis consumer ARN to used for ingestion in Enhanced - // Fan-Out mode. The consumer must be already created and ready to be used. - ConsumerArn string `protobuf:"bytes,3,opt,name=consumer_arn,json=consumerArn,proto3" json:"consumer_arn,omitempty"` - // Required. AWS role ARN to be used for Federated Identity authentication - // with Kinesis. Check the Pub/Sub docs for how to set up this role and the - // required permissions that need to be attached to it. - AwsRoleArn string `protobuf:"bytes,4,opt,name=aws_role_arn,json=awsRoleArn,proto3" json:"aws_role_arn,omitempty"` - // Required. The GCP service account to be used for Federated Identity - // authentication with Kinesis (via a `AssumeRoleWithWebIdentity` call for - // the provided role). The `aws_role_arn` must be set up with - // `accounts.google.com:sub` equals to this service account number. - GcpServiceAccount string `protobuf:"bytes,5,opt,name=gcp_service_account,json=gcpServiceAccount,proto3" json:"gcp_service_account,omitempty"` -} - -func (x *IngestionDataSourceSettings_AwsKinesis) Reset() { - *x = IngestionDataSourceSettings_AwsKinesis{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IngestionDataSourceSettings_AwsKinesis) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IngestionDataSourceSettings_AwsKinesis) ProtoMessage() {} - -func (x *IngestionDataSourceSettings_AwsKinesis) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IngestionDataSourceSettings_AwsKinesis.ProtoReflect.Descriptor instead. -func (*IngestionDataSourceSettings_AwsKinesis) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *IngestionDataSourceSettings_AwsKinesis) GetState() IngestionDataSourceSettings_AwsKinesis_State { - if x != nil { - return x.State - } - return IngestionDataSourceSettings_AwsKinesis_STATE_UNSPECIFIED -} - -func (x *IngestionDataSourceSettings_AwsKinesis) GetStreamArn() string { - if x != nil { - return x.StreamArn - } - return "" -} - -func (x *IngestionDataSourceSettings_AwsKinesis) GetConsumerArn() string { - if x != nil { - return x.ConsumerArn - } - return "" -} - -func (x *IngestionDataSourceSettings_AwsKinesis) GetAwsRoleArn() string { - if x != nil { - return x.AwsRoleArn - } - return "" -} - -func (x *IngestionDataSourceSettings_AwsKinesis) GetGcpServiceAccount() string { - if x != nil { - return x.GcpServiceAccount - } - return "" -} - -// Contains information needed for generating an -// [OpenID Connect -// token](https://developers.google.com/identity/protocols/OpenIDConnect). -type PushConfig_OidcToken struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. [Service account - // email](https://cloud.google.com/iam/docs/service-accounts) - // used for generating the OIDC token. For more information - // on setting up authentication, see - // [Push subscriptions](https://cloud.google.com/pubsub/docs/push). - ServiceAccountEmail string `protobuf:"bytes,1,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // Optional. Audience to be used when generating OIDC token. The audience - // claim identifies the recipients that the JWT is intended for. The - // audience value is a single case-sensitive string. Having multiple values - // (array) for the audience field is not supported. More info about the OIDC - // JWT token audience here: - // https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not specified, - // the Push endpoint URL will be used. - Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"` -} - -func (x *PushConfig_OidcToken) Reset() { - *x = PushConfig_OidcToken{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PushConfig_OidcToken) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PushConfig_OidcToken) ProtoMessage() {} - -func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PushConfig_OidcToken.ProtoReflect.Descriptor instead. -func (*PushConfig_OidcToken) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 0} -} - -func (x *PushConfig_OidcToken) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -func (x *PushConfig_OidcToken) GetAudience() string { - if x != nil { - return x.Audience - } - return "" -} - -// The payload to the push endpoint is in the form of the JSON representation -// of a PubsubMessage -// (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). -type PushConfig_PubsubWrapper struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *PushConfig_PubsubWrapper) Reset() { - *x = PushConfig_PubsubWrapper{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PushConfig_PubsubWrapper) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PushConfig_PubsubWrapper) ProtoMessage() {} - -func (x *PushConfig_PubsubWrapper) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PushConfig_PubsubWrapper.ProtoReflect.Descriptor instead. -func (*PushConfig_PubsubWrapper) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 1} -} - -// Sets the `data` field as the HTTP body for delivery. -type PushConfig_NoWrapper struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. When true, writes the Pub/Sub message metadata to - // `x-goog-pubsub-:` headers of the HTTP request. Writes the - // Pub/Sub message attributes to `:` headers of the HTTP request. - WriteMetadata bool `protobuf:"varint,1,opt,name=write_metadata,json=writeMetadata,proto3" json:"write_metadata,omitempty"` -} - -func (x *PushConfig_NoWrapper) Reset() { - *x = PushConfig_NoWrapper{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PushConfig_NoWrapper) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PushConfig_NoWrapper) ProtoMessage() {} - -func (x *PushConfig_NoWrapper) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PushConfig_NoWrapper.ProtoReflect.Descriptor instead. -func (*PushConfig_NoWrapper) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 2} -} - -func (x *PushConfig_NoWrapper) GetWriteMetadata() bool { - if x != nil { - return x.WriteMetadata - } - return false -} - -// Configuration for writing message data in text format. -// Message payloads will be written to files as raw text, separated by a -// newline. -type CloudStorageConfig_TextConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *CloudStorageConfig_TextConfig) Reset() { - *x = CloudStorageConfig_TextConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CloudStorageConfig_TextConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloudStorageConfig_TextConfig) ProtoMessage() {} - -func (x *CloudStorageConfig_TextConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloudStorageConfig_TextConfig.ProtoReflect.Descriptor instead. -func (*CloudStorageConfig_TextConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 0} -} - -// Configuration for writing message data in Avro format. -// Message payloads and metadata will be written to files as an Avro binary. -type CloudStorageConfig_AvroConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. When true, write the subscription name, message_id, - // publish_time, attributes, and ordering_key as additional fields in the - // output. The subscription name, message_id, and publish_time fields are - // put in their own fields while all other message properties other than - // data (for example, an ordering_key, if present) are added as entries in - // the attributes map. - WriteMetadata bool `protobuf:"varint,1,opt,name=write_metadata,json=writeMetadata,proto3" json:"write_metadata,omitempty"` - // Optional. When true, the output Cloud Storage file will be serialized - // using the topic schema, if it exists. - UseTopicSchema bool `protobuf:"varint,2,opt,name=use_topic_schema,json=useTopicSchema,proto3" json:"use_topic_schema,omitempty"` -} - -func (x *CloudStorageConfig_AvroConfig) Reset() { - *x = CloudStorageConfig_AvroConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CloudStorageConfig_AvroConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloudStorageConfig_AvroConfig) ProtoMessage() {} - -func (x *CloudStorageConfig_AvroConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloudStorageConfig_AvroConfig.ProtoReflect.Descriptor instead. -func (*CloudStorageConfig_AvroConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 1} -} - -func (x *CloudStorageConfig_AvroConfig) GetWriteMetadata() bool { - if x != nil { - return x.WriteMetadata - } - return false -} - -func (x *CloudStorageConfig_AvroConfig) GetUseTopicSchema() bool { - if x != nil { - return x.UseTopicSchema - } - return false -} - -// Acknowledgement IDs sent in one or more previous requests to acknowledge a -// previously received message. -type StreamingPullResponse_AcknowledgeConfirmation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. Successfully processed acknowledgement IDs. - AckIds []string `protobuf:"bytes,1,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` - // Optional. List of acknowledgement IDs that were malformed or whose - // acknowledgement deadline has expired. - InvalidAckIds []string `protobuf:"bytes,2,rep,name=invalid_ack_ids,json=invalidAckIds,proto3" json:"invalid_ack_ids,omitempty"` - // Optional. List of acknowledgement IDs that were out of order. - UnorderedAckIds []string `protobuf:"bytes,3,rep,name=unordered_ack_ids,json=unorderedAckIds,proto3" json:"unordered_ack_ids,omitempty"` - // Optional. List of acknowledgement IDs that failed processing with - // temporary issues. - TemporaryFailedAckIds []string `protobuf:"bytes,4,rep,name=temporary_failed_ack_ids,json=temporaryFailedAckIds,proto3" json:"temporary_failed_ack_ids,omitempty"` -} - -func (x *StreamingPullResponse_AcknowledgeConfirmation) Reset() { - *x = StreamingPullResponse_AcknowledgeConfirmation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingPullResponse_AcknowledgeConfirmation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingPullResponse_AcknowledgeConfirmation) ProtoMessage() {} - -func (x *StreamingPullResponse_AcknowledgeConfirmation) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingPullResponse_AcknowledgeConfirmation.ProtoReflect.Descriptor instead. -func (*StreamingPullResponse_AcknowledgeConfirmation) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37, 0} -} - -func (x *StreamingPullResponse_AcknowledgeConfirmation) GetAckIds() []string { - if x != nil { - return x.AckIds - } - return nil -} - -func (x *StreamingPullResponse_AcknowledgeConfirmation) GetInvalidAckIds() []string { - if x != nil { - return x.InvalidAckIds - } - return nil -} - -func (x *StreamingPullResponse_AcknowledgeConfirmation) GetUnorderedAckIds() []string { - if x != nil { - return x.UnorderedAckIds - } - return nil -} - -func (x *StreamingPullResponse_AcknowledgeConfirmation) GetTemporaryFailedAckIds() []string { - if x != nil { - return x.TemporaryFailedAckIds - } - return nil -} - -// Acknowledgement IDs sent in one or more previous requests to modify the -// deadline for a specific message. -type StreamingPullResponse_ModifyAckDeadlineConfirmation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. Successfully processed acknowledgement IDs. - AckIds []string `protobuf:"bytes,1,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` - // Optional. List of acknowledgement IDs that were malformed or whose - // acknowledgement deadline has expired. - InvalidAckIds []string `protobuf:"bytes,2,rep,name=invalid_ack_ids,json=invalidAckIds,proto3" json:"invalid_ack_ids,omitempty"` - // Optional. List of acknowledgement IDs that failed processing with - // temporary issues. - TemporaryFailedAckIds []string `protobuf:"bytes,3,rep,name=temporary_failed_ack_ids,json=temporaryFailedAckIds,proto3" json:"temporary_failed_ack_ids,omitempty"` -} - -func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) Reset() { - *x = StreamingPullResponse_ModifyAckDeadlineConfirmation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoMessage() {} - -func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingPullResponse_ModifyAckDeadlineConfirmation.ProtoReflect.Descriptor instead. -func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37, 1} -} - -func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) GetAckIds() []string { - if x != nil { - return x.AckIds - } - return nil -} - -func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) GetInvalidAckIds() []string { - if x != nil { - return x.InvalidAckIds - } - return nil -} - -func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) GetTemporaryFailedAckIds() []string { - if x != nil { - return x.TemporaryFailedAckIds - } - return nil -} - -// Subscription properties sent as part of the response. -type StreamingPullResponse_SubscriptionProperties struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. True iff exactly once delivery is enabled for this - // subscription. - ExactlyOnceDeliveryEnabled bool `protobuf:"varint,1,opt,name=exactly_once_delivery_enabled,json=exactlyOnceDeliveryEnabled,proto3" json:"exactly_once_delivery_enabled,omitempty"` - // Optional. True iff message ordering is enabled for this subscription. - MessageOrderingEnabled bool `protobuf:"varint,2,opt,name=message_ordering_enabled,json=messageOrderingEnabled,proto3" json:"message_ordering_enabled,omitempty"` -} - -func (x *StreamingPullResponse_SubscriptionProperties) Reset() { - *x = StreamingPullResponse_SubscriptionProperties{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingPullResponse_SubscriptionProperties) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingPullResponse_SubscriptionProperties) ProtoMessage() {} - -func (x *StreamingPullResponse_SubscriptionProperties) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingPullResponse_SubscriptionProperties.ProtoReflect.Descriptor instead. -func (*StreamingPullResponse_SubscriptionProperties) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37, 2} -} - -func (x *StreamingPullResponse_SubscriptionProperties) GetExactlyOnceDeliveryEnabled() bool { - if x != nil { - return x.ExactlyOnceDeliveryEnabled - } - return false -} - -func (x *StreamingPullResponse_SubscriptionProperties) GetMessageOrderingEnabled() bool { - if x != nil { - return x.MessageOrderingEnabled - } - return false -} - -var File_google_pubsub_v1_pubsub_proto protoreflect.FileDescriptor - -var file_google_pubsub_v1_pubsub_proto_rawDesc = []byte{ - 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f, - 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x10, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, - 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x01, 0x0a, 0x14, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x1b, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x50, - 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x31, 0x0a, 0x12, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x5f, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x69, 0x74, 0x22, 0xeb, 0x01, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1e, 0x0a, - 0x1c, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3b, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x11, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x0f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, - 0x49, 0x64, 0x22, 0xb4, 0x04, 0x0a, 0x1b, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x60, 0x0a, 0x0b, 0x61, 0x77, 0x73, 0x5f, 0x6b, 0x69, 0x6e, 0x65, 0x73, 0x69, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, - 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, - 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x77, 0x73, 0x4b, 0x69, 0x6e, - 0x65, 0x73, 0x69, 0x73, 0x1a, 0xa8, 0x03, 0x0a, 0x0a, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, - 0x73, 0x69, 0x73, 0x12, 0x59, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x2e, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x22, - 0x0a, 0x0a, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, - 0x72, 0x6e, 0x12, 0x26, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x5f, 0x61, - 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x41, 0x72, 0x6e, 0x12, 0x25, 0x0a, 0x0c, 0x61, 0x77, - 0x73, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x61, 0x77, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x41, 0x72, - 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x67, 0x63, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x11, 0x67, 0x63, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, - 0x45, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x4b, 0x49, 0x4e, 0x45, 0x53, 0x49, 0x53, 0x5f, 0x50, - 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, - 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45, - 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, - 0x03, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x4f, 0x4e, 0x53, 0x55, - 0x4d, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x42, - 0x08, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xd2, 0x06, 0x0a, 0x05, 0x54, 0x6f, - 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x06, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x61, - 0x0a, 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x25, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, 0x6d, - 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, - 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, - 0x7a, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x38, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x69, 0x6e, - 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x48, - 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, - 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x49, 0x4e, - 0x47, 0x45, 0x53, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x3a, 0x54, 0xea, 0x41, 0x51, 0x0a, 0x1b, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, - 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x7d, 0x12, 0x0f, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x2d, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x22, 0xc3, - 0x02, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x54, 0x0a, 0x0a, 0x61, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, - 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x3d, - 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, - 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x22, 0x8a, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, - 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, - 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, - 0x8d, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, - 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, - 0x37, 0x0a, 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x73, 0x22, 0xa8, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, - 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, - 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x22, 0x77, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, - 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa0, 0x01, 0x0a, - 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, - 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, - 0x9f, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x01, 0xfa, 0x41, - 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, - 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0x6c, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, - 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4f, - 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, - 0x6b, 0x0a, 0x19, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, - 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x0b, 0x0a, 0x0c, 0x53, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, - 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, - 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x14, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x16, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x61, 0x69, - 0x6e, 0x5f, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x72, 0x65, 0x74, - 0x61, 0x69, 0x6e, 0x41, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x47, - 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65, - 0x72, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x12, 0x64, 0x65, 0x61, 0x64, 0x5f, - 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, - 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x64, 0x65, - 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x45, - 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, - 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65, - 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, - 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x12, 0x67, 0x0a, 0x20, - 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1d, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x13, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x3e, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, - 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x12, 0x0a, - 0x0e, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, - 0x02, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x9f, 0x01, 0x0a, 0x0b, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x47, 0x0a, 0x0f, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, - 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, - 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d, - 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x22, 0x7c, 0x0a, - 0x10, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x2f, 0x0a, 0x11, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, - 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x0f, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, - 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x44, 0x65, 0x6c, 0x69, 0x76, - 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x22, 0x44, 0x0a, 0x10, 0x45, - 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x03, 0x74, 0x74, - 0x6c, 0x22, 0x93, 0x05, 0x0a, 0x0a, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x28, 0x0a, 0x0d, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x70, 0x75, - 0x73, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x0a, 0x61, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x4c, 0x0a, - 0x0a, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, - 0x52, 0x09, 0x6f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x58, 0x0a, 0x0e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, - 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x0a, 0x6e, 0x6f, 0x5f, 0x77, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, - 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x09, 0x6e, 0x6f, 0x57, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x72, 0x1a, 0x65, 0x0a, 0x09, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x08, 0x61, 0x75, 0x64, - 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, - 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x1a, 0x0f, 0x0a, 0x0d, 0x50, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x09, 0x4e, - 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x09, 0x0a, 0x07, - 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x22, 0xf3, 0x03, 0x0a, 0x0e, 0x42, 0x69, 0x67, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x33, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, - 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, - 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, - 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, - 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, - 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, - 0x44, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, - 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x04, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e, 0x5f, 0x54, - 0x52, 0x41, 0x4e, 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x52, 0x45, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x22, 0xa0, 0x07, - 0x0a, 0x12, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, - 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x66, - 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x3d, 0x0a, - 0x18, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x44, 0x61, - 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x57, 0x0a, 0x0b, - 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x0b, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, - 0x48, 0x00, 0x52, 0x0a, 0x61, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, - 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, - 0x6d, 0x61, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x1a, 0x0c, 0x0a, 0x0a, 0x54, - 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x67, 0x0a, 0x0a, 0x41, 0x76, 0x72, - 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, - 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, - 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, - 0x55, 0x4e, 0x44, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x4e, - 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, - 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43, - 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x05, 0x42, - 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x22, 0x9d, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x06, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, 0x61, 0x63, 0x6b, 0x49, 0x64, - 0x12, 0x3e, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, - 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x0f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, - 0x22, 0x68, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x19, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, - 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, - 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, - 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6b, 0x0a, 0x19, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x4d, 0x6f, 0x64, - 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, - 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x70, 0x75, - 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x0b, 0x50, 0x75, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x18, 0x01, 0x52, 0x11, 0x72, 0x65, 0x74, - 0x75, 0x72, 0x6e, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x12, 0x26, - 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x63, 0x0a, 0x0c, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, - 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x69, - 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xbf, 0x01, 0x0a, 0x18, - 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, - 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, - 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x82, 0x01, - 0x0a, 0x12, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, - 0x64, 0x73, 0x22, 0xdb, 0x03, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, - 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x17, 0x6d, 0x6f, 0x64, - 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x15, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x3a, 0x0a, 0x17, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, - 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x6d, 0x6f, - 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x6b, 0x49, - 0x64, 0x73, 0x12, 0x42, 0x0a, 0x1b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x63, 0x6b, - 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x18, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, - 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x16, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x6f, - 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78, - 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x22, 0xa4, 0x08, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, - 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72, 0x65, - 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x72, - 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, - 0x7f, 0x0a, 0x18, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, - 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, - 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x17, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x93, 0x01, 0x0a, 0x20, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x61, 0x63, 0x6b, 0x5f, - 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, - 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, - 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7c, 0x0a, 0x17, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x1a, 0xd3, 0x01, 0x0a, 0x17, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2b, - 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69, 0x6e, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x11, 0x75, - 0x6e, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x75, 0x6e, 0x6f, - 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, - 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, 0xa8, 0x01, 0x0a, 0x1d, 0x4d, - 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, - 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x69, 0x6e, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, - 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, - 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, 0x9f, 0x01, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x12, 0x46, 0x0a, 0x1d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65, - 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1a, 0x65, 0x78, - 0x61, 0x63, 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, - 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x18, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xb0, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, - 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, - 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x15, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, - 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, - 0x61, 0x73, 0x6b, 0x22, 0xee, 0x02, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x1d, - 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, - 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x4c, 0xea, 0x41, 0x49, 0x0a, 0x1e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, - 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x7d, 0x22, 0x58, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, - 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0xab, - 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, - 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x83, 0x01, 0x0a, - 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x73, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, - 0xe4, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x35, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, - 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x20, - 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x48, 0x00, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x08, 0x0a, 0x06, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb8, 0x0b, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x73, 0x68, 0x65, 0x72, 0x12, 0x71, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x6f, - 0x70, 0x69, 0x63, 0x12, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x1a, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x30, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x43, 0xda, 0x41, 0x11, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x29, 0x3a, 0x01, 0x2a, 0x32, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x07, - 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, - 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, - 0x0e, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x01, 0x2a, 0x22, 0x27, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, - 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, - 0x68, 0x12, 0x77, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x2f, 0xda, 0x41, 0x05, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8a, 0x01, 0x0a, 0x0a, 0x4c, - 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, - 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, - 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xaa, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x12, 0x29, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, - 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2f, - 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x2a, 0x1f, - 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0xad, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, - 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x22, 0x34, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x1a, - 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x55, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, - 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x32, 0xd2, 0x15, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, - 0x12, 0xb4, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5e, 0xda, 0x41, 0x2b, 0x6e, 0x61, 0x6d, 0x65, - 0x2c, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x3a, 0x01, 0x2a, - 0x1a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa1, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, - 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xbb, 0x01, 0x0a, 0x12, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x58, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x32, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa6, 0x01, 0x0a, 0x11, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x76, 0x31, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x9f, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x44, - 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x2a, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcf, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, - 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, - 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x76, - 0xda, 0x41, 0x29, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, - 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x44, 0x3a, 0x01, 0x2a, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x41, 0x63, 0x6b, 0x6e, 0x6f, - 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x5b, 0xda, 0x41, 0x14, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x3e, 0x3a, 0x01, 0x2a, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, - 0x65, 0x12, 0xd0, 0x01, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, - 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, - 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0xda, 0x41, 0x2c, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x2c, 0x6d, - 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0xda, 0x41, 0x19, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, - 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, - 0x70, 0x75, 0x6c, 0x6c, 0x12, 0x66, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, - 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0xbb, 0x01, 0x0a, - 0x10, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x64, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x01, 0x2a, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f, - 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, - 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x0b, 0x47, - 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x38, 0xda, 0x41, - 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x12, - 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x96, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0xda, 0x41, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x12, 0x22, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, - 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x40, 0xda, 0x41, 0x11, 0x6e, 0x61, 0x6d, 0x65, - 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x1a, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa3, 0x01, 0x0a, 0x0e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x22, 0x4c, 0xda, 0x41, 0x14, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2c, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, - 0x3a, 0x01, 0x2a, 0x32, 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x22, 0x38, 0xda, 0x41, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x84, 0x01, - 0x0a, 0x04, 0x53, 0x65, 0x65, 0x6b, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, - 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, - 0x73, 0x65, 0x65, 0x6b, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, - 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, - 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x42, 0xaa, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x42, - 0x0b, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, - 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0x3b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x2e, 0x56, 0x31, 0xca, 0x02, - 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x50, 0x75, - 0x62, 0x53, 0x75, 0x62, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x3a, - 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_pubsub_v1_pubsub_proto_rawDescOnce sync.Once - file_google_pubsub_v1_pubsub_proto_rawDescData = file_google_pubsub_v1_pubsub_proto_rawDesc -) - -func file_google_pubsub_v1_pubsub_proto_rawDescGZIP() []byte { - file_google_pubsub_v1_pubsub_proto_rawDescOnce.Do(func() { - file_google_pubsub_v1_pubsub_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_pubsub_v1_pubsub_proto_rawDescData) - }) - return file_google_pubsub_v1_pubsub_proto_rawDescData -} - -var file_google_pubsub_v1_pubsub_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_google_pubsub_v1_pubsub_proto_msgTypes = make([]protoimpl.MessageInfo, 62) -var file_google_pubsub_v1_pubsub_proto_goTypes = []any{ - (IngestionDataSourceSettings_AwsKinesis_State)(0), // 0: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State - (Topic_State)(0), // 1: google.pubsub.v1.Topic.State - (Subscription_State)(0), // 2: google.pubsub.v1.Subscription.State - (BigQueryConfig_State)(0), // 3: google.pubsub.v1.BigQueryConfig.State - (CloudStorageConfig_State)(0), // 4: google.pubsub.v1.CloudStorageConfig.State - (*MessageStoragePolicy)(nil), // 5: google.pubsub.v1.MessageStoragePolicy - (*SchemaSettings)(nil), // 6: google.pubsub.v1.SchemaSettings - (*IngestionDataSourceSettings)(nil), // 7: google.pubsub.v1.IngestionDataSourceSettings - (*Topic)(nil), // 8: google.pubsub.v1.Topic - (*PubsubMessage)(nil), // 9: google.pubsub.v1.PubsubMessage - (*GetTopicRequest)(nil), // 10: google.pubsub.v1.GetTopicRequest - (*UpdateTopicRequest)(nil), // 11: google.pubsub.v1.UpdateTopicRequest - (*PublishRequest)(nil), // 12: google.pubsub.v1.PublishRequest - (*PublishResponse)(nil), // 13: google.pubsub.v1.PublishResponse - (*ListTopicsRequest)(nil), // 14: google.pubsub.v1.ListTopicsRequest - (*ListTopicsResponse)(nil), // 15: google.pubsub.v1.ListTopicsResponse - (*ListTopicSubscriptionsRequest)(nil), // 16: google.pubsub.v1.ListTopicSubscriptionsRequest - (*ListTopicSubscriptionsResponse)(nil), // 17: google.pubsub.v1.ListTopicSubscriptionsResponse - (*ListTopicSnapshotsRequest)(nil), // 18: google.pubsub.v1.ListTopicSnapshotsRequest - (*ListTopicSnapshotsResponse)(nil), // 19: google.pubsub.v1.ListTopicSnapshotsResponse - (*DeleteTopicRequest)(nil), // 20: google.pubsub.v1.DeleteTopicRequest - (*DetachSubscriptionRequest)(nil), // 21: google.pubsub.v1.DetachSubscriptionRequest - (*DetachSubscriptionResponse)(nil), // 22: google.pubsub.v1.DetachSubscriptionResponse - (*Subscription)(nil), // 23: google.pubsub.v1.Subscription - (*RetryPolicy)(nil), // 24: google.pubsub.v1.RetryPolicy - (*DeadLetterPolicy)(nil), // 25: google.pubsub.v1.DeadLetterPolicy - (*ExpirationPolicy)(nil), // 26: google.pubsub.v1.ExpirationPolicy - (*PushConfig)(nil), // 27: google.pubsub.v1.PushConfig - (*BigQueryConfig)(nil), // 28: google.pubsub.v1.BigQueryConfig - (*CloudStorageConfig)(nil), // 29: google.pubsub.v1.CloudStorageConfig - (*ReceivedMessage)(nil), // 30: google.pubsub.v1.ReceivedMessage - (*GetSubscriptionRequest)(nil), // 31: google.pubsub.v1.GetSubscriptionRequest - (*UpdateSubscriptionRequest)(nil), // 32: google.pubsub.v1.UpdateSubscriptionRequest - (*ListSubscriptionsRequest)(nil), // 33: google.pubsub.v1.ListSubscriptionsRequest - (*ListSubscriptionsResponse)(nil), // 34: google.pubsub.v1.ListSubscriptionsResponse - (*DeleteSubscriptionRequest)(nil), // 35: google.pubsub.v1.DeleteSubscriptionRequest - (*ModifyPushConfigRequest)(nil), // 36: google.pubsub.v1.ModifyPushConfigRequest - (*PullRequest)(nil), // 37: google.pubsub.v1.PullRequest - (*PullResponse)(nil), // 38: google.pubsub.v1.PullResponse - (*ModifyAckDeadlineRequest)(nil), // 39: google.pubsub.v1.ModifyAckDeadlineRequest - (*AcknowledgeRequest)(nil), // 40: google.pubsub.v1.AcknowledgeRequest - (*StreamingPullRequest)(nil), // 41: google.pubsub.v1.StreamingPullRequest - (*StreamingPullResponse)(nil), // 42: google.pubsub.v1.StreamingPullResponse - (*CreateSnapshotRequest)(nil), // 43: google.pubsub.v1.CreateSnapshotRequest - (*UpdateSnapshotRequest)(nil), // 44: google.pubsub.v1.UpdateSnapshotRequest - (*Snapshot)(nil), // 45: google.pubsub.v1.Snapshot - (*GetSnapshotRequest)(nil), // 46: google.pubsub.v1.GetSnapshotRequest - (*ListSnapshotsRequest)(nil), // 47: google.pubsub.v1.ListSnapshotsRequest - (*ListSnapshotsResponse)(nil), // 48: google.pubsub.v1.ListSnapshotsResponse - (*DeleteSnapshotRequest)(nil), // 49: google.pubsub.v1.DeleteSnapshotRequest - (*SeekRequest)(nil), // 50: google.pubsub.v1.SeekRequest - (*SeekResponse)(nil), // 51: google.pubsub.v1.SeekResponse - (*IngestionDataSourceSettings_AwsKinesis)(nil), // 52: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis - nil, // 53: google.pubsub.v1.Topic.LabelsEntry - nil, // 54: google.pubsub.v1.PubsubMessage.AttributesEntry - nil, // 55: google.pubsub.v1.Subscription.LabelsEntry - (*PushConfig_OidcToken)(nil), // 56: google.pubsub.v1.PushConfig.OidcToken - (*PushConfig_PubsubWrapper)(nil), // 57: google.pubsub.v1.PushConfig.PubsubWrapper - (*PushConfig_NoWrapper)(nil), // 58: google.pubsub.v1.PushConfig.NoWrapper - nil, // 59: google.pubsub.v1.PushConfig.AttributesEntry - (*CloudStorageConfig_TextConfig)(nil), // 60: google.pubsub.v1.CloudStorageConfig.TextConfig - (*CloudStorageConfig_AvroConfig)(nil), // 61: google.pubsub.v1.CloudStorageConfig.AvroConfig - (*StreamingPullResponse_AcknowledgeConfirmation)(nil), // 62: google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation - (*StreamingPullResponse_ModifyAckDeadlineConfirmation)(nil), // 63: google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation - (*StreamingPullResponse_SubscriptionProperties)(nil), // 64: google.pubsub.v1.StreamingPullResponse.SubscriptionProperties - nil, // 65: google.pubsub.v1.CreateSnapshotRequest.LabelsEntry - nil, // 66: google.pubsub.v1.Snapshot.LabelsEntry - (Encoding)(0), // 67: google.pubsub.v1.Encoding - (*durationpb.Duration)(nil), // 68: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 69: google.protobuf.Timestamp - (*fieldmaskpb.FieldMask)(nil), // 70: google.protobuf.FieldMask - (*emptypb.Empty)(nil), // 71: google.protobuf.Empty -} -var file_google_pubsub_v1_pubsub_proto_depIdxs = []int32{ - 67, // 0: google.pubsub.v1.SchemaSettings.encoding:type_name -> google.pubsub.v1.Encoding - 52, // 1: google.pubsub.v1.IngestionDataSourceSettings.aws_kinesis:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis - 53, // 2: google.pubsub.v1.Topic.labels:type_name -> google.pubsub.v1.Topic.LabelsEntry - 5, // 3: google.pubsub.v1.Topic.message_storage_policy:type_name -> google.pubsub.v1.MessageStoragePolicy - 6, // 4: google.pubsub.v1.Topic.schema_settings:type_name -> google.pubsub.v1.SchemaSettings - 68, // 5: google.pubsub.v1.Topic.message_retention_duration:type_name -> google.protobuf.Duration - 1, // 6: google.pubsub.v1.Topic.state:type_name -> google.pubsub.v1.Topic.State - 7, // 7: google.pubsub.v1.Topic.ingestion_data_source_settings:type_name -> google.pubsub.v1.IngestionDataSourceSettings - 54, // 8: google.pubsub.v1.PubsubMessage.attributes:type_name -> google.pubsub.v1.PubsubMessage.AttributesEntry - 69, // 9: google.pubsub.v1.PubsubMessage.publish_time:type_name -> google.protobuf.Timestamp - 8, // 10: google.pubsub.v1.UpdateTopicRequest.topic:type_name -> google.pubsub.v1.Topic - 70, // 11: google.pubsub.v1.UpdateTopicRequest.update_mask:type_name -> google.protobuf.FieldMask - 9, // 12: google.pubsub.v1.PublishRequest.messages:type_name -> google.pubsub.v1.PubsubMessage - 8, // 13: google.pubsub.v1.ListTopicsResponse.topics:type_name -> google.pubsub.v1.Topic - 27, // 14: google.pubsub.v1.Subscription.push_config:type_name -> google.pubsub.v1.PushConfig - 28, // 15: google.pubsub.v1.Subscription.bigquery_config:type_name -> google.pubsub.v1.BigQueryConfig - 29, // 16: google.pubsub.v1.Subscription.cloud_storage_config:type_name -> google.pubsub.v1.CloudStorageConfig - 68, // 17: google.pubsub.v1.Subscription.message_retention_duration:type_name -> google.protobuf.Duration - 55, // 18: google.pubsub.v1.Subscription.labels:type_name -> google.pubsub.v1.Subscription.LabelsEntry - 26, // 19: google.pubsub.v1.Subscription.expiration_policy:type_name -> google.pubsub.v1.ExpirationPolicy - 25, // 20: google.pubsub.v1.Subscription.dead_letter_policy:type_name -> google.pubsub.v1.DeadLetterPolicy - 24, // 21: google.pubsub.v1.Subscription.retry_policy:type_name -> google.pubsub.v1.RetryPolicy - 68, // 22: google.pubsub.v1.Subscription.topic_message_retention_duration:type_name -> google.protobuf.Duration - 2, // 23: google.pubsub.v1.Subscription.state:type_name -> google.pubsub.v1.Subscription.State - 68, // 24: google.pubsub.v1.RetryPolicy.minimum_backoff:type_name -> google.protobuf.Duration - 68, // 25: google.pubsub.v1.RetryPolicy.maximum_backoff:type_name -> google.protobuf.Duration - 68, // 26: google.pubsub.v1.ExpirationPolicy.ttl:type_name -> google.protobuf.Duration - 59, // 27: google.pubsub.v1.PushConfig.attributes:type_name -> google.pubsub.v1.PushConfig.AttributesEntry - 56, // 28: google.pubsub.v1.PushConfig.oidc_token:type_name -> google.pubsub.v1.PushConfig.OidcToken - 57, // 29: google.pubsub.v1.PushConfig.pubsub_wrapper:type_name -> google.pubsub.v1.PushConfig.PubsubWrapper - 58, // 30: google.pubsub.v1.PushConfig.no_wrapper:type_name -> google.pubsub.v1.PushConfig.NoWrapper - 3, // 31: google.pubsub.v1.BigQueryConfig.state:type_name -> google.pubsub.v1.BigQueryConfig.State - 60, // 32: google.pubsub.v1.CloudStorageConfig.text_config:type_name -> google.pubsub.v1.CloudStorageConfig.TextConfig - 61, // 33: google.pubsub.v1.CloudStorageConfig.avro_config:type_name -> google.pubsub.v1.CloudStorageConfig.AvroConfig - 68, // 34: google.pubsub.v1.CloudStorageConfig.max_duration:type_name -> google.protobuf.Duration - 4, // 35: google.pubsub.v1.CloudStorageConfig.state:type_name -> google.pubsub.v1.CloudStorageConfig.State - 9, // 36: google.pubsub.v1.ReceivedMessage.message:type_name -> google.pubsub.v1.PubsubMessage - 23, // 37: google.pubsub.v1.UpdateSubscriptionRequest.subscription:type_name -> google.pubsub.v1.Subscription - 70, // 38: google.pubsub.v1.UpdateSubscriptionRequest.update_mask:type_name -> google.protobuf.FieldMask - 23, // 39: google.pubsub.v1.ListSubscriptionsResponse.subscriptions:type_name -> google.pubsub.v1.Subscription - 27, // 40: google.pubsub.v1.ModifyPushConfigRequest.push_config:type_name -> google.pubsub.v1.PushConfig - 30, // 41: google.pubsub.v1.PullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage - 30, // 42: google.pubsub.v1.StreamingPullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage - 62, // 43: google.pubsub.v1.StreamingPullResponse.acknowledge_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation - 63, // 44: google.pubsub.v1.StreamingPullResponse.modify_ack_deadline_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation - 64, // 45: google.pubsub.v1.StreamingPullResponse.subscription_properties:type_name -> google.pubsub.v1.StreamingPullResponse.SubscriptionProperties - 65, // 46: google.pubsub.v1.CreateSnapshotRequest.labels:type_name -> google.pubsub.v1.CreateSnapshotRequest.LabelsEntry - 45, // 47: google.pubsub.v1.UpdateSnapshotRequest.snapshot:type_name -> google.pubsub.v1.Snapshot - 70, // 48: google.pubsub.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask - 69, // 49: google.pubsub.v1.Snapshot.expire_time:type_name -> google.protobuf.Timestamp - 66, // 50: google.pubsub.v1.Snapshot.labels:type_name -> google.pubsub.v1.Snapshot.LabelsEntry - 45, // 51: google.pubsub.v1.ListSnapshotsResponse.snapshots:type_name -> google.pubsub.v1.Snapshot - 69, // 52: google.pubsub.v1.SeekRequest.time:type_name -> google.protobuf.Timestamp - 0, // 53: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State - 8, // 54: google.pubsub.v1.Publisher.CreateTopic:input_type -> google.pubsub.v1.Topic - 11, // 55: google.pubsub.v1.Publisher.UpdateTopic:input_type -> google.pubsub.v1.UpdateTopicRequest - 12, // 56: google.pubsub.v1.Publisher.Publish:input_type -> google.pubsub.v1.PublishRequest - 10, // 57: google.pubsub.v1.Publisher.GetTopic:input_type -> google.pubsub.v1.GetTopicRequest - 14, // 58: google.pubsub.v1.Publisher.ListTopics:input_type -> google.pubsub.v1.ListTopicsRequest - 16, // 59: google.pubsub.v1.Publisher.ListTopicSubscriptions:input_type -> google.pubsub.v1.ListTopicSubscriptionsRequest - 18, // 60: google.pubsub.v1.Publisher.ListTopicSnapshots:input_type -> google.pubsub.v1.ListTopicSnapshotsRequest - 20, // 61: google.pubsub.v1.Publisher.DeleteTopic:input_type -> google.pubsub.v1.DeleteTopicRequest - 21, // 62: google.pubsub.v1.Publisher.DetachSubscription:input_type -> google.pubsub.v1.DetachSubscriptionRequest - 23, // 63: google.pubsub.v1.Subscriber.CreateSubscription:input_type -> google.pubsub.v1.Subscription - 31, // 64: google.pubsub.v1.Subscriber.GetSubscription:input_type -> google.pubsub.v1.GetSubscriptionRequest - 32, // 65: google.pubsub.v1.Subscriber.UpdateSubscription:input_type -> google.pubsub.v1.UpdateSubscriptionRequest - 33, // 66: google.pubsub.v1.Subscriber.ListSubscriptions:input_type -> google.pubsub.v1.ListSubscriptionsRequest - 35, // 67: google.pubsub.v1.Subscriber.DeleteSubscription:input_type -> google.pubsub.v1.DeleteSubscriptionRequest - 39, // 68: google.pubsub.v1.Subscriber.ModifyAckDeadline:input_type -> google.pubsub.v1.ModifyAckDeadlineRequest - 40, // 69: google.pubsub.v1.Subscriber.Acknowledge:input_type -> google.pubsub.v1.AcknowledgeRequest - 37, // 70: google.pubsub.v1.Subscriber.Pull:input_type -> google.pubsub.v1.PullRequest - 41, // 71: google.pubsub.v1.Subscriber.StreamingPull:input_type -> google.pubsub.v1.StreamingPullRequest - 36, // 72: google.pubsub.v1.Subscriber.ModifyPushConfig:input_type -> google.pubsub.v1.ModifyPushConfigRequest - 46, // 73: google.pubsub.v1.Subscriber.GetSnapshot:input_type -> google.pubsub.v1.GetSnapshotRequest - 47, // 74: google.pubsub.v1.Subscriber.ListSnapshots:input_type -> google.pubsub.v1.ListSnapshotsRequest - 43, // 75: google.pubsub.v1.Subscriber.CreateSnapshot:input_type -> google.pubsub.v1.CreateSnapshotRequest - 44, // 76: google.pubsub.v1.Subscriber.UpdateSnapshot:input_type -> google.pubsub.v1.UpdateSnapshotRequest - 49, // 77: google.pubsub.v1.Subscriber.DeleteSnapshot:input_type -> google.pubsub.v1.DeleteSnapshotRequest - 50, // 78: google.pubsub.v1.Subscriber.Seek:input_type -> google.pubsub.v1.SeekRequest - 8, // 79: google.pubsub.v1.Publisher.CreateTopic:output_type -> google.pubsub.v1.Topic - 8, // 80: google.pubsub.v1.Publisher.UpdateTopic:output_type -> google.pubsub.v1.Topic - 13, // 81: google.pubsub.v1.Publisher.Publish:output_type -> google.pubsub.v1.PublishResponse - 8, // 82: google.pubsub.v1.Publisher.GetTopic:output_type -> google.pubsub.v1.Topic - 15, // 83: google.pubsub.v1.Publisher.ListTopics:output_type -> google.pubsub.v1.ListTopicsResponse - 17, // 84: google.pubsub.v1.Publisher.ListTopicSubscriptions:output_type -> google.pubsub.v1.ListTopicSubscriptionsResponse - 19, // 85: google.pubsub.v1.Publisher.ListTopicSnapshots:output_type -> google.pubsub.v1.ListTopicSnapshotsResponse - 71, // 86: google.pubsub.v1.Publisher.DeleteTopic:output_type -> google.protobuf.Empty - 22, // 87: google.pubsub.v1.Publisher.DetachSubscription:output_type -> google.pubsub.v1.DetachSubscriptionResponse - 23, // 88: google.pubsub.v1.Subscriber.CreateSubscription:output_type -> google.pubsub.v1.Subscription - 23, // 89: google.pubsub.v1.Subscriber.GetSubscription:output_type -> google.pubsub.v1.Subscription - 23, // 90: google.pubsub.v1.Subscriber.UpdateSubscription:output_type -> google.pubsub.v1.Subscription - 34, // 91: google.pubsub.v1.Subscriber.ListSubscriptions:output_type -> google.pubsub.v1.ListSubscriptionsResponse - 71, // 92: google.pubsub.v1.Subscriber.DeleteSubscription:output_type -> google.protobuf.Empty - 71, // 93: google.pubsub.v1.Subscriber.ModifyAckDeadline:output_type -> google.protobuf.Empty - 71, // 94: google.pubsub.v1.Subscriber.Acknowledge:output_type -> google.protobuf.Empty - 38, // 95: google.pubsub.v1.Subscriber.Pull:output_type -> google.pubsub.v1.PullResponse - 42, // 96: google.pubsub.v1.Subscriber.StreamingPull:output_type -> google.pubsub.v1.StreamingPullResponse - 71, // 97: google.pubsub.v1.Subscriber.ModifyPushConfig:output_type -> google.protobuf.Empty - 45, // 98: google.pubsub.v1.Subscriber.GetSnapshot:output_type -> google.pubsub.v1.Snapshot - 48, // 99: google.pubsub.v1.Subscriber.ListSnapshots:output_type -> google.pubsub.v1.ListSnapshotsResponse - 45, // 100: google.pubsub.v1.Subscriber.CreateSnapshot:output_type -> google.pubsub.v1.Snapshot - 45, // 101: google.pubsub.v1.Subscriber.UpdateSnapshot:output_type -> google.pubsub.v1.Snapshot - 71, // 102: google.pubsub.v1.Subscriber.DeleteSnapshot:output_type -> google.protobuf.Empty - 51, // 103: google.pubsub.v1.Subscriber.Seek:output_type -> google.pubsub.v1.SeekResponse - 79, // [79:104] is the sub-list for method output_type - 54, // [54:79] is the sub-list for method input_type - 54, // [54:54] is the sub-list for extension type_name - 54, // [54:54] is the sub-list for extension extendee - 0, // [0:54] is the sub-list for field type_name -} - -func init() { file_google_pubsub_v1_pubsub_proto_init() } -func file_google_pubsub_v1_pubsub_proto_init() { - if File_google_pubsub_v1_pubsub_proto != nil { - return - } - file_google_pubsub_v1_schema_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_pubsub_v1_pubsub_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*MessageStoragePolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*SchemaSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*IngestionDataSourceSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Topic); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*PubsubMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*GetTopicRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*UpdateTopicRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*PublishRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*PublishResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicSubscriptionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicSubscriptionsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicSnapshotsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicSnapshotsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*DeleteTopicRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*DetachSubscriptionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*DetachSubscriptionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*Subscription); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*RetryPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*DeadLetterPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*ExpirationPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*PushConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*BigQueryConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*CloudStorageConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ReceivedMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*GetSubscriptionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*UpdateSubscriptionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*ListSubscriptionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*ListSubscriptionsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*DeleteSubscriptionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*ModifyPushConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*PullRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[33].Exporter = func(v any, i int) any { - switch v := v.(*PullResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*ModifyAckDeadlineRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*AcknowledgeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*StreamingPullRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*StreamingPullResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*CreateSnapshotRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[39].Exporter = func(v any, i int) any { - switch v := v.(*UpdateSnapshotRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[40].Exporter = func(v any, i int) any { - switch v := v.(*Snapshot); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[41].Exporter = func(v any, i int) any { - switch v := v.(*GetSnapshotRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[42].Exporter = func(v any, i int) any { - switch v := v.(*ListSnapshotsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[43].Exporter = func(v any, i int) any { - switch v := v.(*ListSnapshotsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[44].Exporter = func(v any, i int) any { - switch v := v.(*DeleteSnapshotRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[45].Exporter = func(v any, i int) any { - switch v := v.(*SeekRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[46].Exporter = func(v any, i int) any { - switch v := v.(*SeekResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[47].Exporter = func(v any, i int) any { - switch v := v.(*IngestionDataSourceSettings_AwsKinesis); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[51].Exporter = func(v any, i int) any { - switch v := v.(*PushConfig_OidcToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[52].Exporter = func(v any, i int) any { - switch v := v.(*PushConfig_PubsubWrapper); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[53].Exporter = func(v any, i int) any { - switch v := v.(*PushConfig_NoWrapper); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[55].Exporter = func(v any, i int) any { - switch v := v.(*CloudStorageConfig_TextConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[56].Exporter = func(v any, i int) any { - switch v := v.(*CloudStorageConfig_AvroConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[57].Exporter = func(v any, i int) any { - switch v := v.(*StreamingPullResponse_AcknowledgeConfirmation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[58].Exporter = func(v any, i int) any { - switch v := v.(*StreamingPullResponse_ModifyAckDeadlineConfirmation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[59].Exporter = func(v any, i int) any { - switch v := v.(*StreamingPullResponse_SubscriptionProperties); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_pubsub_v1_pubsub_proto_msgTypes[2].OneofWrappers = []any{ - (*IngestionDataSourceSettings_AwsKinesis_)(nil), - } - file_google_pubsub_v1_pubsub_proto_msgTypes[22].OneofWrappers = []any{ - (*PushConfig_OidcToken_)(nil), - (*PushConfig_PubsubWrapper_)(nil), - (*PushConfig_NoWrapper_)(nil), - } - file_google_pubsub_v1_pubsub_proto_msgTypes[24].OneofWrappers = []any{ - (*CloudStorageConfig_TextConfig_)(nil), - (*CloudStorageConfig_AvroConfig_)(nil), - } - file_google_pubsub_v1_pubsub_proto_msgTypes[45].OneofWrappers = []any{ - (*SeekRequest_Time)(nil), - (*SeekRequest_Snapshot)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_pubsub_v1_pubsub_proto_rawDesc, - NumEnums: 5, - NumMessages: 62, - NumExtensions: 0, - NumServices: 2, - }, - GoTypes: file_google_pubsub_v1_pubsub_proto_goTypes, - DependencyIndexes: file_google_pubsub_v1_pubsub_proto_depIdxs, - EnumInfos: file_google_pubsub_v1_pubsub_proto_enumTypes, - MessageInfos: file_google_pubsub_v1_pubsub_proto_msgTypes, - }.Build() - File_google_pubsub_v1_pubsub_proto = out.File - file_google_pubsub_v1_pubsub_proto_rawDesc = nil - file_google_pubsub_v1_pubsub_proto_goTypes = nil - file_google_pubsub_v1_pubsub_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// PublisherClient is the client API for Publisher service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type PublisherClient interface { - // Creates the given topic with the given name. See the [resource name rules] - // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). - CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) - // Updates an existing topic by updating the fields specified in the update - // mask. Note that certain properties of a topic are not modifiable. - UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error) - // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic - // does not exist. - Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) - // Gets the configuration of a topic. - GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) - // Lists matching topics. - ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) - // Lists the names of the attached subscriptions on this topic. - ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) - // Lists the names of the snapshots on this topic. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - ListTopicSnapshots(ctx context.Context, in *ListTopicSnapshotsRequest, opts ...grpc.CallOption) (*ListTopicSnapshotsResponse, error) - // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic - // does not exist. After a topic is deleted, a new topic may be created with - // the same name; this is an entirely new topic with none of the old - // configuration or subscriptions. Existing subscriptions to this topic are - // not deleted, but their `topic` field is set to `_deleted-topic_`. - DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Detaches a subscription from this topic. All messages retained in the - // subscription are dropped. Subsequent `Pull` and `StreamingPull` requests - // will return FAILED_PRECONDITION. If the subscription is a push - // subscription, pushes to the endpoint will stop. - DetachSubscription(ctx context.Context, in *DetachSubscriptionRequest, opts ...grpc.CallOption) (*DetachSubscriptionResponse, error) -} - -type publisherClient struct { - cc grpc.ClientConnInterface -} - -func NewPublisherClient(cc grpc.ClientConnInterface) PublisherClient { - return &publisherClient{cc} -} - -func (c *publisherClient) CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) { - out := new(Topic) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/CreateTopic", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *publisherClient) UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error) { - out := new(Topic) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/UpdateTopic", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *publisherClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) { - out := new(PublishResponse) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/Publish", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *publisherClient) GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) { - out := new(Topic) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/GetTopic", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *publisherClient) ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) { - out := new(ListTopicsResponse) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopics", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *publisherClient) ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) { - out := new(ListTopicSubscriptionsResponse) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopicSubscriptions", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *publisherClient) ListTopicSnapshots(ctx context.Context, in *ListTopicSnapshotsRequest, opts ...grpc.CallOption) (*ListTopicSnapshotsResponse, error) { - out := new(ListTopicSnapshotsResponse) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopicSnapshots", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *publisherClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/DeleteTopic", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *publisherClient) DetachSubscription(ctx context.Context, in *DetachSubscriptionRequest, opts ...grpc.CallOption) (*DetachSubscriptionResponse, error) { - out := new(DetachSubscriptionResponse) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/DetachSubscription", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// PublisherServer is the server API for Publisher service. -type PublisherServer interface { - // Creates the given topic with the given name. See the [resource name rules] - // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). - CreateTopic(context.Context, *Topic) (*Topic, error) - // Updates an existing topic by updating the fields specified in the update - // mask. Note that certain properties of a topic are not modifiable. - UpdateTopic(context.Context, *UpdateTopicRequest) (*Topic, error) - // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic - // does not exist. - Publish(context.Context, *PublishRequest) (*PublishResponse, error) - // Gets the configuration of a topic. - GetTopic(context.Context, *GetTopicRequest) (*Topic, error) - // Lists matching topics. - ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) - // Lists the names of the attached subscriptions on this topic. - ListTopicSubscriptions(context.Context, *ListTopicSubscriptionsRequest) (*ListTopicSubscriptionsResponse, error) - // Lists the names of the snapshots on this topic. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - ListTopicSnapshots(context.Context, *ListTopicSnapshotsRequest) (*ListTopicSnapshotsResponse, error) - // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic - // does not exist. After a topic is deleted, a new topic may be created with - // the same name; this is an entirely new topic with none of the old - // configuration or subscriptions. Existing subscriptions to this topic are - // not deleted, but their `topic` field is set to `_deleted-topic_`. - DeleteTopic(context.Context, *DeleteTopicRequest) (*emptypb.Empty, error) - // Detaches a subscription from this topic. All messages retained in the - // subscription are dropped. Subsequent `Pull` and `StreamingPull` requests - // will return FAILED_PRECONDITION. If the subscription is a push - // subscription, pushes to the endpoint will stop. - DetachSubscription(context.Context, *DetachSubscriptionRequest) (*DetachSubscriptionResponse, error) -} - -// UnimplementedPublisherServer can be embedded to have forward compatible implementations. -type UnimplementedPublisherServer struct { -} - -func (*UnimplementedPublisherServer) CreateTopic(context.Context, *Topic) (*Topic, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateTopic not implemented") -} -func (*UnimplementedPublisherServer) UpdateTopic(context.Context, *UpdateTopicRequest) (*Topic, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateTopic not implemented") -} -func (*UnimplementedPublisherServer) Publish(context.Context, *PublishRequest) (*PublishResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") -} -func (*UnimplementedPublisherServer) GetTopic(context.Context, *GetTopicRequest) (*Topic, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTopic not implemented") -} -func (*UnimplementedPublisherServer) ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListTopics not implemented") -} -func (*UnimplementedPublisherServer) ListTopicSubscriptions(context.Context, *ListTopicSubscriptionsRequest) (*ListTopicSubscriptionsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListTopicSubscriptions not implemented") -} -func (*UnimplementedPublisherServer) ListTopicSnapshots(context.Context, *ListTopicSnapshotsRequest) (*ListTopicSnapshotsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListTopicSnapshots not implemented") -} -func (*UnimplementedPublisherServer) DeleteTopic(context.Context, *DeleteTopicRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteTopic not implemented") -} -func (*UnimplementedPublisherServer) DetachSubscription(context.Context, *DetachSubscriptionRequest) (*DetachSubscriptionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DetachSubscription not implemented") -} - -func RegisterPublisherServer(s *grpc.Server, srv PublisherServer) { - s.RegisterService(&_Publisher_serviceDesc, srv) -} - -func _Publisher_CreateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Topic) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublisherServer).CreateTopic(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Publisher/CreateTopic", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublisherServer).CreateTopic(ctx, req.(*Topic)) - } - return interceptor(ctx, in, info, handler) -} - -func _Publisher_UpdateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateTopicRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublisherServer).UpdateTopic(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Publisher/UpdateTopic", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublisherServer).UpdateTopic(ctx, req.(*UpdateTopicRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Publisher_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PublishRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublisherServer).Publish(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Publisher/Publish", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublisherServer).Publish(ctx, req.(*PublishRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Publisher_GetTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetTopicRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublisherServer).GetTopic(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Publisher/GetTopic", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublisherServer).GetTopic(ctx, req.(*GetTopicRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Publisher_ListTopics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTopicsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublisherServer).ListTopics(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Publisher/ListTopics", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublisherServer).ListTopics(ctx, req.(*ListTopicsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Publisher_ListTopicSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTopicSubscriptionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublisherServer).ListTopicSubscriptions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Publisher/ListTopicSubscriptions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublisherServer).ListTopicSubscriptions(ctx, req.(*ListTopicSubscriptionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Publisher_ListTopicSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTopicSnapshotsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublisherServer).ListTopicSnapshots(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Publisher/ListTopicSnapshots", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublisherServer).ListTopicSnapshots(ctx, req.(*ListTopicSnapshotsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Publisher_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteTopicRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublisherServer).DeleteTopic(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Publisher/DeleteTopic", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublisherServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Publisher_DetachSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DetachSubscriptionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublisherServer).DetachSubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Publisher/DetachSubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublisherServer).DetachSubscription(ctx, req.(*DetachSubscriptionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Publisher_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.pubsub.v1.Publisher", - HandlerType: (*PublisherServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateTopic", - Handler: _Publisher_CreateTopic_Handler, - }, - { - MethodName: "UpdateTopic", - Handler: _Publisher_UpdateTopic_Handler, - }, - { - MethodName: "Publish", - Handler: _Publisher_Publish_Handler, - }, - { - MethodName: "GetTopic", - Handler: _Publisher_GetTopic_Handler, - }, - { - MethodName: "ListTopics", - Handler: _Publisher_ListTopics_Handler, - }, - { - MethodName: "ListTopicSubscriptions", - Handler: _Publisher_ListTopicSubscriptions_Handler, - }, - { - MethodName: "ListTopicSnapshots", - Handler: _Publisher_ListTopicSnapshots_Handler, - }, - { - MethodName: "DeleteTopic", - Handler: _Publisher_DeleteTopic_Handler, - }, - { - MethodName: "DetachSubscription", - Handler: _Publisher_DetachSubscription_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/pubsub/v1/pubsub.proto", -} - -// SubscriberClient is the client API for Subscriber service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SubscriberClient interface { - // Creates a subscription to a given topic. See the [resource name rules] - // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). - // If the subscription already exists, returns `ALREADY_EXISTS`. - // If the corresponding topic doesn't exist, returns `NOT_FOUND`. - // - // If the name is not provided in the request, the server will assign a random - // name for this subscription on the same project as the topic, conforming - // to the [resource name format] - // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The - // generated name is populated in the returned Subscription object. Note that - // for REST API requests, you must specify a name in the request. - CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) - // Gets the configuration details of a subscription. - GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) - // Updates an existing subscription by updating the fields specified in the - // update mask. Note that certain properties of a subscription, such as its - // topic, are not modifiable. - UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) - // Lists matching subscriptions. - ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) - // Deletes an existing subscription. All messages retained in the subscription - // are immediately dropped. Calls to `Pull` after deletion will return - // `NOT_FOUND`. After a subscription is deleted, a new one may be created with - // the same name, but the new one has no association with the old - // subscription or its topic unless the same topic is specified. - DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Modifies the ack deadline for a specific message. This method is useful - // to indicate that more time is needed to process a message by the - // subscriber, or to make the message available for redelivery if the - // processing was interrupted. Note that this does not modify the - // subscription-level `ackDeadlineSeconds` used for subsequent messages. - ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Acknowledges the messages associated with the `ack_ids` in the - // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages - // from the subscription. - // - // Acknowledging a message whose ack deadline has expired may succeed, - // but such a message may be redelivered later. Acknowledging a message more - // than once will not result in an error. - Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Pulls messages from the server. - Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) - // Establishes a stream with the server, which sends messages down to the - // client. The client streams acknowledgements and ack deadline modifications - // back to the server. The server will close the stream and return the status - // on any error. The server may close the stream with status `UNAVAILABLE` to - // reassign server-side resources, in which case, the client should - // re-establish the stream. Flow control can be achieved by configuring the - // underlying RPC channel. - StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error) - // Modifies the `PushConfig` for a specified subscription. - // - // This may be used to change a push subscription to a pull one (signified by - // an empty `PushConfig`) or vice versa, or change the endpoint URL and other - // attributes of a push subscription. Messages will accumulate for delivery - // continuously through the call regardless of changes to the `PushConfig`. - ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Gets the configuration details of a snapshot. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - GetSnapshot(ctx context.Context, in *GetSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) - // Lists the existing snapshots. Snapshots are used in [Seek]( - // https://cloud.google.com/pubsub/docs/replay-overview) operations, which - // allow you to manage message acknowledgments in bulk. That is, you can set - // the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) - // Creates a snapshot from the requested subscription. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - // If the snapshot already exists, returns `ALREADY_EXISTS`. - // If the requested subscription doesn't exist, returns `NOT_FOUND`. - // If the backlog in the subscription is too old -- and the resulting snapshot - // would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. - // See also the `Snapshot.expire_time` field. If the name is not provided in - // the request, the server will assign a random - // name for this snapshot on the same project as the subscription, conforming - // to the [resource name format] - // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The - // generated name is populated in the returned Snapshot object. Note that for - // REST API requests, you must specify a name in the request. - CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) - // Updates an existing snapshot by updating the fields specified in the update - // mask. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) - // Removes an existing snapshot. Snapshots are used in [Seek] - // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which - // allow you to manage message acknowledgments in bulk. That is, you can set - // the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - // When the snapshot is deleted, all messages retained in the snapshot - // are immediately dropped. After a snapshot is deleted, a new one may be - // created with the same name, but the new one has no association with the old - // snapshot or its subscription, unless the same subscription is specified. - DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Seeks an existing subscription to a point in time or to a given snapshot, - // whichever is provided in the request. Snapshots are used in [Seek] - // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which - // allow you to manage message acknowledgments in bulk. That is, you can set - // the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. Note that both the subscription and the - // snapshot must be on the same topic. - Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error) -} - -type subscriberClient struct { - cc grpc.ClientConnInterface -} - -func NewSubscriberClient(cc grpc.ClientConnInterface) SubscriberClient { - return &subscriberClient{cc} -} - -func (c *subscriberClient) CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) { - out := new(Subscription) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSubscription", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) { - out := new(Subscription) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/GetSubscription", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) { - out := new(Subscription) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSubscription", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) { - out := new(ListSubscriptionsResponse) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSubscriptions", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSubscription", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyAckDeadline", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Acknowledge", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) { - out := new(PullResponse) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Pull", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error) { - stream, err := c.cc.NewStream(ctx, &_Subscriber_serviceDesc.Streams[0], "/google.pubsub.v1.Subscriber/StreamingPull", opts...) - if err != nil { - return nil, err - } - x := &subscriberStreamingPullClient{stream} - return x, nil -} - -type Subscriber_StreamingPullClient interface { - Send(*StreamingPullRequest) error - Recv() (*StreamingPullResponse, error) - grpc.ClientStream -} - -type subscriberStreamingPullClient struct { - grpc.ClientStream -} - -func (x *subscriberStreamingPullClient) Send(m *StreamingPullRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *subscriberStreamingPullClient) Recv() (*StreamingPullResponse, error) { - m := new(StreamingPullResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *subscriberClient) ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyPushConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) GetSnapshot(ctx context.Context, in *GetSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { - out := new(Snapshot) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/GetSnapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { - out := new(ListSnapshotsResponse) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSnapshots", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { - out := new(Snapshot) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSnapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { - out := new(Snapshot) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSnapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSnapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *subscriberClient) Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error) { - out := new(SeekResponse) - err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Seek", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SubscriberServer is the server API for Subscriber service. -type SubscriberServer interface { - // Creates a subscription to a given topic. See the [resource name rules] - // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). - // If the subscription already exists, returns `ALREADY_EXISTS`. - // If the corresponding topic doesn't exist, returns `NOT_FOUND`. - // - // If the name is not provided in the request, the server will assign a random - // name for this subscription on the same project as the topic, conforming - // to the [resource name format] - // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The - // generated name is populated in the returned Subscription object. Note that - // for REST API requests, you must specify a name in the request. - CreateSubscription(context.Context, *Subscription) (*Subscription, error) - // Gets the configuration details of a subscription. - GetSubscription(context.Context, *GetSubscriptionRequest) (*Subscription, error) - // Updates an existing subscription by updating the fields specified in the - // update mask. Note that certain properties of a subscription, such as its - // topic, are not modifiable. - UpdateSubscription(context.Context, *UpdateSubscriptionRequest) (*Subscription, error) - // Lists matching subscriptions. - ListSubscriptions(context.Context, *ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) - // Deletes an existing subscription. All messages retained in the subscription - // are immediately dropped. Calls to `Pull` after deletion will return - // `NOT_FOUND`. After a subscription is deleted, a new one may be created with - // the same name, but the new one has no association with the old - // subscription or its topic unless the same topic is specified. - DeleteSubscription(context.Context, *DeleteSubscriptionRequest) (*emptypb.Empty, error) - // Modifies the ack deadline for a specific message. This method is useful - // to indicate that more time is needed to process a message by the - // subscriber, or to make the message available for redelivery if the - // processing was interrupted. Note that this does not modify the - // subscription-level `ackDeadlineSeconds` used for subsequent messages. - ModifyAckDeadline(context.Context, *ModifyAckDeadlineRequest) (*emptypb.Empty, error) - // Acknowledges the messages associated with the `ack_ids` in the - // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages - // from the subscription. - // - // Acknowledging a message whose ack deadline has expired may succeed, - // but such a message may be redelivered later. Acknowledging a message more - // than once will not result in an error. - Acknowledge(context.Context, *AcknowledgeRequest) (*emptypb.Empty, error) - // Pulls messages from the server. - Pull(context.Context, *PullRequest) (*PullResponse, error) - // Establishes a stream with the server, which sends messages down to the - // client. The client streams acknowledgements and ack deadline modifications - // back to the server. The server will close the stream and return the status - // on any error. The server may close the stream with status `UNAVAILABLE` to - // reassign server-side resources, in which case, the client should - // re-establish the stream. Flow control can be achieved by configuring the - // underlying RPC channel. - StreamingPull(Subscriber_StreamingPullServer) error - // Modifies the `PushConfig` for a specified subscription. - // - // This may be used to change a push subscription to a pull one (signified by - // an empty `PushConfig`) or vice versa, or change the endpoint URL and other - // attributes of a push subscription. Messages will accumulate for delivery - // continuously through the call regardless of changes to the `PushConfig`. - ModifyPushConfig(context.Context, *ModifyPushConfigRequest) (*emptypb.Empty, error) - // Gets the configuration details of a snapshot. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - GetSnapshot(context.Context, *GetSnapshotRequest) (*Snapshot, error) - // Lists the existing snapshots. Snapshots are used in [Seek]( - // https://cloud.google.com/pubsub/docs/replay-overview) operations, which - // allow you to manage message acknowledgments in bulk. That is, you can set - // the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) - // Creates a snapshot from the requested subscription. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - // If the snapshot already exists, returns `ALREADY_EXISTS`. - // If the requested subscription doesn't exist, returns `NOT_FOUND`. - // If the backlog in the subscription is too old -- and the resulting snapshot - // would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. - // See also the `Snapshot.expire_time` field. If the name is not provided in - // the request, the server will assign a random - // name for this snapshot on the same project as the subscription, conforming - // to the [resource name format] - // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The - // generated name is populated in the returned Snapshot object. Note that for - // REST API requests, you must specify a name in the request. - CreateSnapshot(context.Context, *CreateSnapshotRequest) (*Snapshot, error) - // Updates an existing snapshot by updating the fields specified in the update - // mask. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - UpdateSnapshot(context.Context, *UpdateSnapshotRequest) (*Snapshot, error) - // Removes an existing snapshot. Snapshots are used in [Seek] - // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which - // allow you to manage message acknowledgments in bulk. That is, you can set - // the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - // When the snapshot is deleted, all messages retained in the snapshot - // are immediately dropped. After a snapshot is deleted, a new one may be - // created with the same name, but the new one has no association with the old - // snapshot or its subscription, unless the same subscription is specified. - DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) - // Seeks an existing subscription to a point in time or to a given snapshot, - // whichever is provided in the request. Snapshots are used in [Seek] - // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which - // allow you to manage message acknowledgments in bulk. That is, you can set - // the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. Note that both the subscription and the - // snapshot must be on the same topic. - Seek(context.Context, *SeekRequest) (*SeekResponse, error) -} - -// UnimplementedSubscriberServer can be embedded to have forward compatible implementations. -type UnimplementedSubscriberServer struct { -} - -func (*UnimplementedSubscriberServer) CreateSubscription(context.Context, *Subscription) (*Subscription, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateSubscription not implemented") -} -func (*UnimplementedSubscriberServer) GetSubscription(context.Context, *GetSubscriptionRequest) (*Subscription, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSubscription not implemented") -} -func (*UnimplementedSubscriberServer) UpdateSubscription(context.Context, *UpdateSubscriptionRequest) (*Subscription, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateSubscription not implemented") -} -func (*UnimplementedSubscriberServer) ListSubscriptions(context.Context, *ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListSubscriptions not implemented") -} -func (*UnimplementedSubscriberServer) DeleteSubscription(context.Context, *DeleteSubscriptionRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteSubscription not implemented") -} -func (*UnimplementedSubscriberServer) ModifyAckDeadline(context.Context, *ModifyAckDeadlineRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ModifyAckDeadline not implemented") -} -func (*UnimplementedSubscriberServer) Acknowledge(context.Context, *AcknowledgeRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Acknowledge not implemented") -} -func (*UnimplementedSubscriberServer) Pull(context.Context, *PullRequest) (*PullResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Pull not implemented") -} -func (*UnimplementedSubscriberServer) StreamingPull(Subscriber_StreamingPullServer) error { - return status.Errorf(codes.Unimplemented, "method StreamingPull not implemented") -} -func (*UnimplementedSubscriberServer) ModifyPushConfig(context.Context, *ModifyPushConfigRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ModifyPushConfig not implemented") -} -func (*UnimplementedSubscriberServer) GetSnapshot(context.Context, *GetSnapshotRequest) (*Snapshot, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSnapshot not implemented") -} -func (*UnimplementedSubscriberServer) ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") -} -func (*UnimplementedSubscriberServer) CreateSnapshot(context.Context, *CreateSnapshotRequest) (*Snapshot, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") -} -func (*UnimplementedSubscriberServer) UpdateSnapshot(context.Context, *UpdateSnapshotRequest) (*Snapshot, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateSnapshot not implemented") -} -func (*UnimplementedSubscriberServer) DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") -} -func (*UnimplementedSubscriberServer) Seek(context.Context, *SeekRequest) (*SeekResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Seek not implemented") -} - -func RegisterSubscriberServer(s *grpc.Server, srv SubscriberServer) { - s.RegisterService(&_Subscriber_serviceDesc, srv) -} - -func _Subscriber_CreateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Subscription) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).CreateSubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/CreateSubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).CreateSubscription(ctx, req.(*Subscription)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_GetSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSubscriptionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).GetSubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/GetSubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).GetSubscription(ctx, req.(*GetSubscriptionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_UpdateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateSubscriptionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).UpdateSubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/UpdateSubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).UpdateSubscription(ctx, req.(*UpdateSubscriptionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_ListSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListSubscriptionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).ListSubscriptions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/ListSubscriptions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).ListSubscriptions(ctx, req.(*ListSubscriptionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_DeleteSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteSubscriptionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).DeleteSubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/DeleteSubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).DeleteSubscription(ctx, req.(*DeleteSubscriptionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_ModifyAckDeadline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ModifyAckDeadlineRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).ModifyAckDeadline(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/ModifyAckDeadline", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).ModifyAckDeadline(ctx, req.(*ModifyAckDeadlineRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_Acknowledge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AcknowledgeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).Acknowledge(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/Acknowledge", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).Acknowledge(ctx, req.(*AcknowledgeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_Pull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PullRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).Pull(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/Pull", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).Pull(ctx, req.(*PullRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_StreamingPull_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SubscriberServer).StreamingPull(&subscriberStreamingPullServer{stream}) -} - -type Subscriber_StreamingPullServer interface { - Send(*StreamingPullResponse) error - Recv() (*StreamingPullRequest, error) - grpc.ServerStream -} - -type subscriberStreamingPullServer struct { - grpc.ServerStream -} - -func (x *subscriberStreamingPullServer) Send(m *StreamingPullResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *subscriberStreamingPullServer) Recv() (*StreamingPullRequest, error) { - m := new(StreamingPullRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Subscriber_ModifyPushConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ModifyPushConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).ModifyPushConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/ModifyPushConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).ModifyPushConfig(ctx, req.(*ModifyPushConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_GetSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).GetSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/GetSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).GetSnapshot(ctx, req.(*GetSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListSnapshotsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).ListSnapshots(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/ListSnapshots", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).CreateSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/CreateSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_UpdateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).UpdateSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/UpdateSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).UpdateSnapshot(ctx, req.(*UpdateSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).DeleteSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/DeleteSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Subscriber_Seek_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SeekRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubscriberServer).Seek(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.pubsub.v1.Subscriber/Seek", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubscriberServer).Seek(ctx, req.(*SeekRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Subscriber_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.pubsub.v1.Subscriber", - HandlerType: (*SubscriberServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateSubscription", - Handler: _Subscriber_CreateSubscription_Handler, - }, - { - MethodName: "GetSubscription", - Handler: _Subscriber_GetSubscription_Handler, - }, - { - MethodName: "UpdateSubscription", - Handler: _Subscriber_UpdateSubscription_Handler, - }, - { - MethodName: "ListSubscriptions", - Handler: _Subscriber_ListSubscriptions_Handler, - }, - { - MethodName: "DeleteSubscription", - Handler: _Subscriber_DeleteSubscription_Handler, - }, - { - MethodName: "ModifyAckDeadline", - Handler: _Subscriber_ModifyAckDeadline_Handler, - }, - { - MethodName: "Acknowledge", - Handler: _Subscriber_Acknowledge_Handler, - }, - { - MethodName: "Pull", - Handler: _Subscriber_Pull_Handler, - }, - { - MethodName: "ModifyPushConfig", - Handler: _Subscriber_ModifyPushConfig_Handler, - }, - { - MethodName: "GetSnapshot", - Handler: _Subscriber_GetSnapshot_Handler, - }, - { - MethodName: "ListSnapshots", - Handler: _Subscriber_ListSnapshots_Handler, - }, - { - MethodName: "CreateSnapshot", - Handler: _Subscriber_CreateSnapshot_Handler, - }, - { - MethodName: "UpdateSnapshot", - Handler: _Subscriber_UpdateSnapshot_Handler, - }, - { - MethodName: "DeleteSnapshot", - Handler: _Subscriber_DeleteSnapshot_Handler, - }, - { - MethodName: "Seek", - Handler: _Subscriber_Seek_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamingPull", - Handler: _Subscriber_StreamingPull_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "google/pubsub/v1/pubsub.proto", -} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go index 4013a77e..0cc01c52 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ import ( "bytes" "context" "fmt" - "io" + "log/slog" "math" "net/http" "net/url" @@ -29,7 +29,6 @@ import ( iampb "cloud.google.com/go/iam/apiv1/iampb" pubsubpb "cloud.google.com/go/pubsub/apiv1/pubsubpb" gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -69,6 +68,7 @@ func defaultSchemaGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -468,6 +468,8 @@ type schemaGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string + + logger *slog.Logger } // NewSchemaClient creates a new schema service client based on gRPC. @@ -494,6 +496,7 @@ func NewSchemaClient(ctx context.Context, opts ...option.ClientOption) (*SchemaC connPool: connPool, schemaClient: pubsubpb.NewSchemaServiceClient(connPool), CallOptions: &client.CallOptions, + logger: internaloption.GetLogger(opts), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), } c.setGoogleClientInfo() @@ -541,6 +544,8 @@ type schemaRESTClient struct { // Points back to the CallOptions field of the containing SchemaClient CallOptions **SchemaCallOptions + + logger *slog.Logger } // NewSchemaRESTClient creates a new schema service rest client. @@ -558,6 +563,7 @@ func NewSchemaRESTClient(ctx context.Context, opts ...option.ClientOption) (*Sch endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, + logger: internaloption.GetLogger(opts), } c.setGoogleClientInfo() @@ -572,6 +578,7 @@ func defaultSchemaRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -609,7 +616,7 @@ func (c *schemaGRPCClient) CreateSchema(ctx context.Context, req *pubsubpb.Creat var resp *pubsubpb.Schema err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.schemaClient.CreateSchema(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.schemaClient.CreateSchema, req, settings.GRPC, c.logger, "CreateSchema") return err }, opts...) if err != nil { @@ -627,7 +634,7 @@ func (c *schemaGRPCClient) GetSchema(ctx context.Context, req *pubsubpb.GetSchem var resp *pubsubpb.Schema err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.schemaClient.GetSchema(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.schemaClient.GetSchema, req, settings.GRPC, c.logger, "GetSchema") return err }, opts...) if err != nil { @@ -656,7 +663,7 @@ func (c *schemaGRPCClient) ListSchemas(ctx context.Context, req *pubsubpb.ListSc } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.schemaClient.ListSchemas(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.schemaClient.ListSchemas, req, settings.GRPC, c.logger, "ListSchemas") return err }, opts...) if err != nil { @@ -702,7 +709,7 @@ func (c *schemaGRPCClient) ListSchemaRevisions(ctx context.Context, req *pubsubp } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.schemaClient.ListSchemaRevisions(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.schemaClient.ListSchemaRevisions, req, settings.GRPC, c.logger, "ListSchemaRevisions") return err }, opts...) if err != nil { @@ -737,7 +744,7 @@ func (c *schemaGRPCClient) CommitSchema(ctx context.Context, req *pubsubpb.Commi var resp *pubsubpb.Schema err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.schemaClient.CommitSchema(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.schemaClient.CommitSchema, req, settings.GRPC, c.logger, "CommitSchema") return err }, opts...) if err != nil { @@ -755,7 +762,7 @@ func (c *schemaGRPCClient) RollbackSchema(ctx context.Context, req *pubsubpb.Rol var resp *pubsubpb.Schema err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.schemaClient.RollbackSchema(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.schemaClient.RollbackSchema, req, settings.GRPC, c.logger, "RollbackSchema") return err }, opts...) if err != nil { @@ -773,7 +780,7 @@ func (c *schemaGRPCClient) DeleteSchemaRevision(ctx context.Context, req *pubsub var resp *pubsubpb.Schema err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.schemaClient.DeleteSchemaRevision(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.schemaClient.DeleteSchemaRevision, req, settings.GRPC, c.logger, "DeleteSchemaRevision") return err }, opts...) if err != nil { @@ -790,7 +797,7 @@ func (c *schemaGRPCClient) DeleteSchema(ctx context.Context, req *pubsubpb.Delet opts = append((*c.CallOptions).DeleteSchema[0:len((*c.CallOptions).DeleteSchema):len((*c.CallOptions).DeleteSchema)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.schemaClient.DeleteSchema(ctx, req, settings.GRPC...) + _, err = executeRPC(ctx, c.schemaClient.DeleteSchema, req, settings.GRPC, c.logger, "DeleteSchema") return err }, opts...) return err @@ -805,7 +812,7 @@ func (c *schemaGRPCClient) ValidateSchema(ctx context.Context, req *pubsubpb.Val var resp *pubsubpb.ValidateSchemaResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.schemaClient.ValidateSchema(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.schemaClient.ValidateSchema, req, settings.GRPC, c.logger, "ValidateSchema") return err }, opts...) if err != nil { @@ -823,7 +830,7 @@ func (c *schemaGRPCClient) ValidateMessage(ctx context.Context, req *pubsubpb.Va var resp *pubsubpb.ValidateMessageResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.schemaClient.ValidateMessage(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.schemaClient.ValidateMessage, req, settings.GRPC, c.logger, "ValidateMessage") return err }, opts...) if err != nil { @@ -841,7 +848,7 @@ func (c *schemaGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPo var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") return err }, opts...) if err != nil { @@ -859,7 +866,7 @@ func (c *schemaGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPo var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") return err }, opts...) if err != nil { @@ -877,7 +884,7 @@ func (c *schemaGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.Te var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") return err }, opts...) if err != nil { @@ -929,17 +936,7 @@ func (c *schemaRESTClient) CreateSchema(ctx context.Context, req *pubsubpb.Creat httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateSchema") if err != nil { return err } @@ -992,17 +989,7 @@ func (c *schemaRESTClient) GetSchema(ctx context.Context, req *pubsubpb.GetSchem httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetSchema") if err != nil { return err } @@ -1067,21 +1054,10 @@ func (c *schemaRESTClient) ListSchemas(ctx context.Context, req *pubsubpb.ListSc } httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListSchemas") if err != nil { return err } - if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1159,21 +1135,10 @@ func (c *schemaRESTClient) ListSchemaRevisions(ctx context.Context, req *pubsubp } httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListSchemaRevisions") if err != nil { return err } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1242,17 +1207,7 @@ func (c *schemaRESTClient) CommitSchema(ctx context.Context, req *pubsubpb.Commi httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CommitSchema") if err != nil { return err } @@ -1308,17 +1263,7 @@ func (c *schemaRESTClient) RollbackSchema(ctx context.Context, req *pubsubpb.Rol httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RollbackSchema") if err != nil { return err } @@ -1371,17 +1316,7 @@ func (c *schemaRESTClient) DeleteSchemaRevision(ctx context.Context, req *pubsub httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "DeleteSchemaRevision") if err != nil { return err } @@ -1428,15 +1363,8 @@ func (c *schemaRESTClient) DeleteSchema(ctx context.Context, req *pubsubpb.Delet httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - // Returns nil if there is no error, otherwise wraps - // the response code and body into a non-nil error - return googleapi.CheckResponse(httpRsp) + _, err = executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "DeleteSchema") + return err }, opts...) } @@ -1479,17 +1407,7 @@ func (c *schemaRESTClient) ValidateSchema(ctx context.Context, req *pubsubpb.Val httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "ValidateSchema") if err != nil { return err } @@ -1545,17 +1463,7 @@ func (c *schemaRESTClient) ValidateMessage(ctx context.Context, req *pubsubpb.Va httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "ValidateMessage") if err != nil { return err } @@ -1609,17 +1517,7 @@ func (c *schemaRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPo httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") if err != nil { return err } @@ -1679,17 +1577,7 @@ func (c *schemaRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPo httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") if err != nil { return err } @@ -1751,17 +1639,7 @@ func (c *schemaRESTClient) TestIamPermissions(ctx context.Context, req *iampb.Te httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") if err != nil { return err } diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go index 65d3ce16..b89aa4ec 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( "context" "errors" "fmt" - "io" + "log/slog" "math" "net/http" "net/url" @@ -30,7 +30,6 @@ import ( iampb "cloud.google.com/go/iam/apiv1/iampb" pubsubpb "cloud.google.com/go/pubsub/apiv1/pubsubpb" gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -76,6 +75,7 @@ func defaultSubscriberGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -768,6 +768,8 @@ type subscriberGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string + + logger *slog.Logger } // NewSubscriberClient creates a new subscriber client based on gRPC. @@ -796,6 +798,7 @@ func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*Sub connPool: connPool, subscriberClient: pubsubpb.NewSubscriberClient(connPool), CallOptions: &client.CallOptions, + logger: internaloption.GetLogger(opts), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), } c.setGoogleClientInfo() @@ -843,6 +846,8 @@ type subscriberRESTClient struct { // Points back to the CallOptions field of the containing SubscriberClient CallOptions **SubscriberCallOptions + + logger *slog.Logger } // NewSubscriberRESTClient creates a new subscriber rest client. @@ -862,6 +867,7 @@ func NewSubscriberRESTClient(ctx context.Context, opts ...option.ClientOption) ( endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, + logger: internaloption.GetLogger(opts), } c.setGoogleClientInfo() @@ -876,6 +882,7 @@ func defaultSubscriberRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -913,7 +920,7 @@ func (c *subscriberGRPCClient) CreateSubscription(ctx context.Context, req *pubs var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.CreateSubscription, req, settings.GRPC, c.logger, "CreateSubscription") return err }, opts...) if err != nil { @@ -931,7 +938,7 @@ func (c *subscriberGRPCClient) GetSubscription(ctx context.Context, req *pubsubp var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.GetSubscription, req, settings.GRPC, c.logger, "GetSubscription") return err }, opts...) if err != nil { @@ -949,7 +956,7 @@ func (c *subscriberGRPCClient) UpdateSubscription(ctx context.Context, req *pubs var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.UpdateSubscription, req, settings.GRPC, c.logger, "UpdateSubscription") return err }, opts...) if err != nil { @@ -978,7 +985,7 @@ func (c *subscriberGRPCClient) ListSubscriptions(ctx context.Context, req *pubsu } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.ListSubscriptions, req, settings.GRPC, c.logger, "ListSubscriptions") return err }, opts...) if err != nil { @@ -1012,7 +1019,7 @@ func (c *subscriberGRPCClient) DeleteSubscription(ctx context.Context, req *pubs opts = append((*c.CallOptions).DeleteSubscription[0:len((*c.CallOptions).DeleteSubscription):len((*c.CallOptions).DeleteSubscription)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...) + _, err = executeRPC(ctx, c.subscriberClient.DeleteSubscription, req, settings.GRPC, c.logger, "DeleteSubscription") return err }, opts...) return err @@ -1026,7 +1033,7 @@ func (c *subscriberGRPCClient) ModifyAckDeadline(ctx context.Context, req *pubsu opts = append((*c.CallOptions).ModifyAckDeadline[0:len((*c.CallOptions).ModifyAckDeadline):len((*c.CallOptions).ModifyAckDeadline)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...) + _, err = executeRPC(ctx, c.subscriberClient.ModifyAckDeadline, req, settings.GRPC, c.logger, "ModifyAckDeadline") return err }, opts...) return err @@ -1040,7 +1047,7 @@ func (c *subscriberGRPCClient) Acknowledge(ctx context.Context, req *pubsubpb.Ac opts = append((*c.CallOptions).Acknowledge[0:len((*c.CallOptions).Acknowledge):len((*c.CallOptions).Acknowledge)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...) + _, err = executeRPC(ctx, c.subscriberClient.Acknowledge, req, settings.GRPC, c.logger, "Acknowledge") return err }, opts...) return err @@ -1055,7 +1062,7 @@ func (c *subscriberGRPCClient) Pull(ctx context.Context, req *pubsubpb.PullReque var resp *pubsubpb.PullResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.Pull, req, settings.GRPC, c.logger, "Pull") return err }, opts...) if err != nil { @@ -1070,7 +1077,9 @@ func (c *subscriberGRPCClient) StreamingPull(ctx context.Context, opts ...gax.Ca opts = append((*c.CallOptions).StreamingPull[0:len((*c.CallOptions).StreamingPull):len((*c.CallOptions).StreamingPull)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error + c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "StreamingPull") resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...) + c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "StreamingPull") return err }, opts...) if err != nil { @@ -1087,7 +1096,7 @@ func (c *subscriberGRPCClient) ModifyPushConfig(ctx context.Context, req *pubsub opts = append((*c.CallOptions).ModifyPushConfig[0:len((*c.CallOptions).ModifyPushConfig):len((*c.CallOptions).ModifyPushConfig)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...) + _, err = executeRPC(ctx, c.subscriberClient.ModifyPushConfig, req, settings.GRPC, c.logger, "ModifyPushConfig") return err }, opts...) return err @@ -1102,7 +1111,7 @@ func (c *subscriberGRPCClient) GetSnapshot(ctx context.Context, req *pubsubpb.Ge var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.GetSnapshot(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.GetSnapshot, req, settings.GRPC, c.logger, "GetSnapshot") return err }, opts...) if err != nil { @@ -1131,7 +1140,7 @@ func (c *subscriberGRPCClient) ListSnapshots(ctx context.Context, req *pubsubpb. } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.ListSnapshots, req, settings.GRPC, c.logger, "ListSnapshots") return err }, opts...) if err != nil { @@ -1166,7 +1175,7 @@ func (c *subscriberGRPCClient) CreateSnapshot(ctx context.Context, req *pubsubpb var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.CreateSnapshot, req, settings.GRPC, c.logger, "CreateSnapshot") return err }, opts...) if err != nil { @@ -1184,7 +1193,7 @@ func (c *subscriberGRPCClient) UpdateSnapshot(ctx context.Context, req *pubsubpb var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.UpdateSnapshot(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.UpdateSnapshot, req, settings.GRPC, c.logger, "UpdateSnapshot") return err }, opts...) if err != nil { @@ -1201,7 +1210,7 @@ func (c *subscriberGRPCClient) DeleteSnapshot(ctx context.Context, req *pubsubpb opts = append((*c.CallOptions).DeleteSnapshot[0:len((*c.CallOptions).DeleteSnapshot):len((*c.CallOptions).DeleteSnapshot)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...) + _, err = executeRPC(ctx, c.subscriberClient.DeleteSnapshot, req, settings.GRPC, c.logger, "DeleteSnapshot") return err }, opts...) return err @@ -1216,7 +1225,7 @@ func (c *subscriberGRPCClient) Seek(ctx context.Context, req *pubsubpb.SeekReque var resp *pubsubpb.SeekResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.subscriberClient.Seek, req, settings.GRPC, c.logger, "Seek") return err }, opts...) if err != nil { @@ -1234,7 +1243,7 @@ func (c *subscriberGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetI var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") return err }, opts...) if err != nil { @@ -1252,7 +1261,7 @@ func (c *subscriberGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetI var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") return err }, opts...) if err != nil { @@ -1270,7 +1279,7 @@ func (c *subscriberGRPCClient) TestIamPermissions(ctx context.Context, req *iamp var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") return err }, opts...) if err != nil { @@ -1328,17 +1337,7 @@ func (c *subscriberRESTClient) CreateSubscription(ctx context.Context, req *pubs httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateSubscription") if err != nil { return err } @@ -1388,17 +1387,7 @@ func (c *subscriberRESTClient) GetSubscription(ctx context.Context, req *pubsubp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetSubscription") if err != nil { return err } @@ -1456,17 +1445,7 @@ func (c *subscriberRESTClient) UpdateSubscription(ctx context.Context, req *pubs httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateSubscription") if err != nil { return err } @@ -1528,21 +1507,10 @@ func (c *subscriberRESTClient) ListSubscriptions(ctx context.Context, req *pubsu } httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListSubscriptions") if err != nil { return err } - if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1606,15 +1574,8 @@ func (c *subscriberRESTClient) DeleteSubscription(ctx context.Context, req *pubs httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - // Returns nil if there is no error, otherwise wraps - // the response code and body into a non-nil error - return googleapi.CheckResponse(httpRsp) + _, err = executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "DeleteSubscription") + return err }, opts...) } @@ -1658,15 +1619,8 @@ func (c *subscriberRESTClient) ModifyAckDeadline(ctx context.Context, req *pubsu httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - // Returns nil if there is no error, otherwise wraps - // the response code and body into a non-nil error - return googleapi.CheckResponse(httpRsp) + _, err = executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "ModifyAckDeadline") + return err }, opts...) } @@ -1712,15 +1666,8 @@ func (c *subscriberRESTClient) Acknowledge(ctx context.Context, req *pubsubpb.Ac httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - // Returns nil if there is no error, otherwise wraps - // the response code and body into a non-nil error - return googleapi.CheckResponse(httpRsp) + _, err = executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Acknowledge") + return err }, opts...) } @@ -1763,17 +1710,7 @@ func (c *subscriberRESTClient) Pull(ctx context.Context, req *pubsubpb.PullReque httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Pull") if err != nil { return err } @@ -1844,15 +1781,8 @@ func (c *subscriberRESTClient) ModifyPushConfig(ctx context.Context, req *pubsub httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - // Returns nil if there is no error, otherwise wraps - // the response code and body into a non-nil error - return googleapi.CheckResponse(httpRsp) + _, err = executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "ModifyPushConfig") + return err }, opts...) } @@ -1893,17 +1823,7 @@ func (c *subscriberRESTClient) GetSnapshot(ctx context.Context, req *pubsubpb.Ge httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetSnapshot") if err != nil { return err } @@ -1968,21 +1888,10 @@ func (c *subscriberRESTClient) ListSnapshots(ctx context.Context, req *pubsubpb. } httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListSnapshots") if err != nil { return err } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -2066,17 +1975,7 @@ func (c *subscriberRESTClient) CreateSnapshot(ctx context.Context, req *pubsubpb httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateSnapshot") if err != nil { return err } @@ -2137,17 +2036,7 @@ func (c *subscriberRESTClient) UpdateSnapshot(ctx context.Context, req *pubsubpb httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateSnapshot") if err != nil { return err } @@ -2202,15 +2091,8 @@ func (c *subscriberRESTClient) DeleteSnapshot(ctx context.Context, req *pubsubpb httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - // Returns nil if there is no error, otherwise wraps - // the response code and body into a non-nil error - return googleapi.CheckResponse(httpRsp) + _, err = executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "DeleteSnapshot") + return err }, opts...) } @@ -2259,17 +2141,7 @@ func (c *subscriberRESTClient) Seek(ctx context.Context, req *pubsubpb.SeekReque httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Seek") if err != nil { return err } @@ -2323,17 +2195,7 @@ func (c *subscriberRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetI httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") if err != nil { return err } @@ -2393,17 +2255,7 @@ func (c *subscriberRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetI httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") if err != nil { return err } @@ -2465,17 +2317,7 @@ func (c *subscriberRESTClient) TestIamPermissions(ctx context.Context, req *iamp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) + buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") if err != nil { return err } diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go index 6107d7a6..b255a705 100644 --- a/vendor/cloud.google.com/go/pubsub/doc.go +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -14,11 +14,11 @@ /* Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub -messages, hiding the details of the underlying server RPCs. Google Cloud +messages, hiding the details of the underlying server RPCs. Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders and receivers. -More information about Google Cloud Pub/Sub is available at +More information about Pub/Sub is available at https://cloud.google.com/pubsub/docs See https://godoc.org/cloud.google.com/go for authentication, timeouts, @@ -26,39 +26,39 @@ connection pooling and similar aspects of this package. # Publishing -Google Cloud Pub/Sub messages are published to topics. Topics may be created -using the pubsub package like so: +Pub/Sub messages are published to topics. A [Topic] may be created +using [Client.CreateTopic] like so: topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") -Messages may then be published to a topic: +Messages may then be published to a [Topic]: res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) -Publish queues the message for publishing and returns immediately. When enough +[Topic.Publish] queues the message for publishing and returns immediately. When enough messages have accumulated, or enough time has elapsed, the batch of messages is sent to the Pub/Sub service. -Publish returns a PublishResult, which behaves like a future: its Get method +[Topic.Publish] returns a [PublishResult], which behaves like a future: its Get method blocks until the message has been sent to the service. -The first time you call Publish on a topic, goroutines are started in the -background. To clean up these goroutines, call Stop: +The first time you call [Topic.Publish] on a [Topic], goroutines are started in the +background. To clean up these goroutines, call [Topic.Stop]: topic.Stop() # Receiving -To receive messages published to a topic, clients create subscriptions -to the topic. There may be more than one subscription per topic; each message -that is published to the topic will be delivered to all of its subscriptions. +To receive messages published to a [Topic], clients create a [Subscription] +for the topic. There may be more than one subscription per topic ; each message +that is published to the topic will be delivered to all associated subscriptions. -Subscriptions may be created like so: +A [Subscription] may be created like so: sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", pubsub.SubscriptionConfig{Topic: topic}) -Messages are then consumed from a subscription via callback. +Messages are then consumed from a [Subscription] via callback. err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { log.Printf("Got message: %s", m.Data) @@ -69,19 +69,19 @@ Messages are then consumed from a subscription via callback. } The callback is invoked concurrently by multiple goroutines, maximizing -throughput. To terminate a call to Receive, cancel its context. +throughput. To terminate a call to [Subscription.Receive], cancel its context. -Once client code has processed the message, it must call Message.Ack or -Message.Nack; otherwise the message will eventually be redelivered. Ack/Nack -MUST be called within the Receive handler function, and not from a goroutine. +Once client code has processed the [Message], it must call Message.Ack or +Message.Nack; otherwise the Message will eventually be redelivered. Ack/Nack +MUST be called within the [Subscription.Receive] handler function, and not from a goroutine. Otherwise, flow control (e.g. ReceiveSettings.MaxOutstandingMessages) will not be respected, and messages can get orphaned when cancelling Receive. If the client cannot or doesn't want to process the message, it can call Message.Nack to speed redelivery. For more information and configuration options, see -"Ack Deadlines" below. +Ack Deadlines below. -Note: It is possible for Messages to be redelivered even if Message.Ack has +Note: It is possible for a [Message] to be redelivered even if Message.Ack has been called. Client code must be robust to multiple deliveries of messages. Note: This uses pubsub's streaming pull feature. This feature has properties that @@ -91,7 +91,7 @@ pull method. # Streams Management -The number of StreamingPull connections can be configured by setting sub.ReceiveSettings.NumGoroutines. +The number of StreamingPull connections can be configured by setting NumGoroutines in [ReceiveSettings]. The default value of 10 means the client library will maintain 10 StreamingPull connections. This is more than sufficient for most use cases, as StreamingPull connections can handle up to 10 MB/s https://cloud.google.com/pubsub/quotas#resource_limits. In some cases, using too many streams @@ -125,10 +125,8 @@ either: - The "MaxExtension" duration elapses from the time the message is fetched from the server. This defaults to 60m. -Ack deadlines are extended periodically by the client. The initial ack -deadline given to messages is based on the subscription's AckDeadline property, -which defaults to 10s. The period between extensions, as well as the -length of the extension, automatically adjusts based on the time it takes the +Ack deadlines are extended periodically by the client. The period between extensions, +as well as the length of the extension, automatically adjusts based on the time it takes the subscriber application to ack messages (based on the 99th percentile of ack latency). By default, this extension period is capped at 10m, but this limit can be configured by the "MaxExtensionPeriod" setting. This has the effect that subscribers that process @@ -144,15 +142,7 @@ library sends such an extension: the Pub/Sub server would wait the remaining 2m55s before re-sending the messages out to other subscribers. Please note that by default, the client library does not use the subscription's -AckDeadline for the MaxExtension value. To enforce the subscription's AckDeadline, -set MaxExtension to the subscription's AckDeadline: - - cfg, err := sub.Config(ctx) - if err != nil { - // TODO: handle err - } - - sub.ReceiveSettings.MaxExtension = cfg.AckDeadline +AckDeadline for the MaxExtension value. # Slow Message Processing @@ -164,7 +154,7 @@ by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/ap To use an emulator with this library, you can set the PUBSUB_EMULATOR_HOST environment variable to the address at which your emulator is running. This will -send requests to that address instead of to Cloud Pub/Sub. You can then create +send requests to that address instead of to Pub/Sub. You can then create and use a client as usual: // Set PUBSUB_EMULATOR_HOST environment variable. @@ -178,5 +168,7 @@ and use a client as usual: // TODO: Handle error. } defer client.Close() + +Deprecated: Please use cloud.google.com/go/pubsub/v2. */ package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller.go b/vendor/cloud.google.com/go/pubsub/flow_controller.go index 8b352508..44c2faef 100644 --- a/vendor/cloud.google.com/go/pubsub/flow_controller.go +++ b/vendor/cloud.google.com/go/pubsub/flow_controller.go @@ -125,8 +125,6 @@ func newSubscriptionFlowController(fc FlowControlSettings) flowController { // as if it were equal to maxSize. func (f *flowController) acquire(ctx context.Context, size int) error { switch f.limitBehavior { - case FlowControlIgnore: - return nil case FlowControlBlock: if f.semCount != nil { if err := f.semCount.Acquire(ctx, 1); err != nil { @@ -156,6 +154,7 @@ func (f *flowController) acquire(ctx context.Context, size int) error { return ErrFlowControllerMaxOutstandingBytes } } + case FlowControlIgnore: } if f.semCount != nil { @@ -172,19 +171,19 @@ func (f *flowController) acquire(ctx context.Context, size int) error { // release notes that one message of size bytes is no longer outstanding. func (f *flowController) release(ctx context.Context, size int) { - if f.limitBehavior == FlowControlIgnore { - return - } - if f.semCount != nil { outstandingMessages := atomic.AddInt64(&f.countRemaining, -1) f.recordOutstandingMessages(ctx, outstandingMessages) - f.semCount.Release(1) + if f.limitBehavior != FlowControlIgnore { + f.semCount.Release(1) + } } if f.semSize != nil { outstandingBytes := atomic.AddInt64(&f.bytesRemaining, -1*f.bound(size)) f.recordOutstandingBytes(ctx, outstandingBytes) - f.semSize.Release(f.bound(size)) + if f.limitBehavior != FlowControlIgnore { + f.semSize.Release(f.bound(size)) + } } } @@ -195,18 +194,25 @@ func (f *flowController) bound(size int) int64 { return int64(size) } -// count returns the number of outstanding messages. +// count returns the number of outstanding messages availalble. // if maxCount is 0, this will always return 0. func (f *flowController) count() int { return int(atomic.LoadInt64(&f.countRemaining)) } +// size returns the size of outstanding messages. +// if maxSize is 0, this will always return 0. +func (f *flowController) size() int { + return int(atomic.LoadInt64(&f.bytesRemaining)) +} + func (f *flowController) recordOutstandingMessages(ctx context.Context, n int64) { if f.purpose == flowControllerPurposeTopic { recordStat(ctx, PublisherOutstandingMessages, n) return } + // Otherwise record this as subscriber outstanding messages. recordStat(ctx, OutstandingMessages, n) } @@ -216,5 +222,6 @@ func (f *flowController) recordOutstandingBytes(ctx context.Context, n int64) { return } + // Otherwise record this as subscriber outstanding bytes. recordStat(ctx, OutstandingBytes, n) } diff --git a/vendor/cloud.google.com/go/pubsub/internal/version.go b/vendor/cloud.google.com/go/pubsub/internal/version.go index f37b8600..97049964 100644 --- a/vendor/cloud.google.com/go/pubsub/internal/version.go +++ b/vendor/cloud.google.com/go/pubsub/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.42.0" +const Version = "1.50.1" diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go index 9f603590..762f2d5f 100644 --- a/vendor/cloud.google.com/go/pubsub/iterator.go +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -279,7 +279,7 @@ func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) { // If the cancellation comes from the underlying grpc client getting closed, // do propagate the cancellation error. // See https://github.com/googleapis/google-cloud-go/pull/10153#discussion_r1600814775 - if err != nil && it.ps.ctx.Err() == context.Canceled { + if err != nil && errors.Is(it.ps.ctx.Err(), context.Canceled) { err = io.EOF } } @@ -335,14 +335,17 @@ func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) { if m.Attributes != nil { ctx = propagation.TraceContext{}.Extract(ctx, newMessageCarrier(m)) } - attr := getSubscriberOpts(it.projectID, it.subID, m) - _, span := startSpan(ctx, subscribeSpanName, it.subID, attr...) - span.SetAttributes( - attribute.Bool(eosAttribute, it.enableExactlyOnceDelivery), - attribute.String(ackIDAttribute, ackID), - semconv.MessagingBatchMessageCount(len(msgs)), - semconv.CodeFunction("receive"), + opts := getSubscriberOpts(it.projectID, it.subID, m) + opts = append( + opts, + trace.WithAttributes( + attribute.Bool(eosAttribute, it.enableExactlyOnceDelivery), + semconv.MessagingGCPPubsubMessageAckID(ackID), + semconv.MessagingBatchMessageCount(len(msgs)), + semconv.CodeFunction("receive"), + ), ) + _, span := startSpan(ctx, subscribeSpanName, it.subID, opts...) // Always store the subscribe span, even if sampling isn't enabled. // This is useful since we need to propagate the sampling flag // to the callback in Receive, so traces have an unbroken sampling decision. @@ -403,7 +406,7 @@ func (it *messageIterator) pullMessages(maxToPull int32) ([]*pb.ReceivedMessage, MaxMessages: maxToPull, }, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) switch { - case err == context.Canceled: + case errors.Is(err, context.Canceled): return nil, nil case status.Code(err) == codes.Canceled: return nil, nil @@ -658,11 +661,16 @@ func (it *messageIterator) sendAck(m map[string]*AckResult) { // Create the single ack span for this request, and for each // message, add Subscribe<->Ack links. opts := getCommonOptions(it.projectID, it.subID) - opts = append(opts, trace.WithLinks(links...)) + opts = append( + opts, + trace.WithLinks(links...), + trace.WithAttributes( + semconv.MessagingBatchMessageCount(len(ackIDs)), + semconv.CodeFunction("sendAck"), + ), + ) _, ackSpan := startSpan(context.Background(), ackSpanName, it.subID, opts...) defer ackSpan.End() - ackSpan.SetAttributes(semconv.MessagingBatchMessageCount(len(ackIDs)), - semconv.CodeFunction("sendAck")) if ackSpan.SpanContext().IsSampled() { for _, s := range subscribeSpans { s.AddLink(trace.Link{ @@ -740,16 +748,25 @@ func (it *messageIterator) sendModAck(m map[string]*AckResult, deadline time.Dur // Create the single modack/nack span for this request, and for each // message, add Subscribe<->Modack links. opts := getCommonOptions(it.projectID, it.subID) - opts = append(opts, trace.WithLinks(links...)) - _, mSpan := startSpan(context.Background(), spanName, it.subID, opts...) - defer mSpan.End() + opts = append( + opts, + trace.WithLinks(links...), + trace.WithAttributes( + semconv.MessagingBatchMessageCount(len(ackIDs)), + semconv.CodeFunction("sendModAck"), + ), + ) if !isNack { - mSpan.SetAttributes( - semconv.MessagingGCPPubsubMessageAckDeadline(int(deadlineSec)), - attribute.Bool(receiptModackAttribute, isReceipt)) + opts = append( + opts, + trace.WithAttributes( + semconv.MessagingGCPPubsubMessageAckDeadline(int(deadlineSec)), + attribute.Bool(receiptModackAttribute, isReceipt), + ), + ) } - mSpan.SetAttributes(semconv.MessagingBatchMessageCount(len(ackIDs)), - semconv.CodeFunction("sendModAck")) + _, mSpan := startSpan(context.Background(), spanName, it.subID, opts...) + defer mSpan.End() if mSpan.SpanContext().IsSampled() { for _, s := range subscribeSpans { s.AddLink(trace.Link{ @@ -876,7 +893,7 @@ func (it *messageIterator) pingStream() { spr := &pb.StreamingPullRequest{} it.eoMu.RLock() if it.sendNewAckDeadline { - spr.StreamAckDeadlineSeconds = int32(it.ackDeadline()) + spr.StreamAckDeadlineSeconds = int32(it.ackDeadline().Seconds()) it.sendNewAckDeadline = false } it.eoMu.RUnlock() diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go index d4124bcb..508a9905 100644 --- a/vendor/cloud.google.com/go/pubsub/pubsub.go +++ b/vendor/cloud.google.com/go/pubsub/pubsub.go @@ -31,6 +31,7 @@ import ( "google.golang.org/api/option" "google.golang.org/api/option/internaloption" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" ) @@ -155,7 +156,7 @@ func NewClientWithConfig(ctx context.Context, projectID string, config *ClientCo if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { emulatorOpts := []option.ClientOption{ option.WithEndpoint(addr), - option.WithGRPCDialOption(grpc.WithInsecure()), + option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())), option.WithoutAuthentication(), option.WithTelemetryDisabled(), internaloption.SkipDialSettingsValidation(), diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go index c5ea8f51..231e5a64 100644 --- a/vendor/cloud.google.com/go/pubsub/pullstream.go +++ b/vendor/cloud.google.com/go/pubsub/pullstream.go @@ -31,8 +31,9 @@ import ( // the stream on a retryable error. type pullStream struct { ctx context.Context - open func() (pb.Subscriber_StreamingPullClient, error) - cancel context.CancelFunc + cancel context.CancelFunc // cancel function of the context above + open func() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) + close context.CancelFunc // cancel function to close down the currently open stream mu sync.Mutex spc *pb.Subscriber_StreamingPullClient @@ -50,8 +51,9 @@ func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName return &pullStream{ ctx: ctx, cancel: cancel, - open: func() (pb.Subscriber_StreamingPullClient, error) { - spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) + open: func() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) { + sctx, close := context.WithCancel(ctx) + spc, err := streamingPull(sctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) if err == nil { recordStat(ctx, StreamRequestCount, 1) streamAckDeadline := int32(maxDurationPerLeaseExtension / time.Second) @@ -69,9 +71,10 @@ func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName }) } if err != nil { - return nil, err + close() + return nil, nil, err } - return spc, nil + return spc, close, nil }, } } @@ -100,29 +103,33 @@ func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber if spc != s.spc { return s.spc, nil } + // we are about to open a new stream: if necessary, make sure the previous one is closed + if s.close != nil { + s.close() + } // Either this is the very first call on this stream (s.spc == nil), or we have a valid // retry request. Either way, open a new stream. // The lock is held here for a long time, but it doesn't matter because no callers could get // anything done anyway. s.spc = new(pb.Subscriber_StreamingPullClient) - *s.spc, s.err = s.openWithRetry() // Any error from openWithRetry is permanent. + *s.spc, s.close, s.err = s.openWithRetry() // Any error from openWithRetry is permanent. return s.spc, s.err } -func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, error) { +func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) { r := defaultRetryer{} for { recordStat(s.ctx, StreamOpenCount, 1) - spc, err := s.open() + spc, close, err := s.open() bo, shouldRetry := r.Retry(err) if err != nil && shouldRetry { recordStat(s.ctx, StreamRetryCount, 1) if err := gax.Sleep(s.ctx, bo); err != nil { - return nil, err + return nil, nil, err } continue } - return spc, err + return spc, close, err } } diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go index c6311d6e..b42288eb 100644 --- a/vendor/cloud.google.com/go/pubsub/subscription.go +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -310,9 +310,13 @@ type BigQueryConfig struct { Table string // When true, use the topic's schema as the columns to write to in BigQuery, - // if it exists. + // if it exists. Cannot be enabled at the same time as UseTableSchema. UseTopicSchema bool + // When true, use the table's schema as the columns to write to in BigQuery, + // if it exists. Cannot be enabled at the same time as UseTopicSchema. + UseTableSchema bool + // When true, write the subscription name, message_id, publish_time, // attributes, and ordering_key to additional columns in the table. The // subscription name, message_id, and publish_time fields are put in their own @@ -345,6 +349,7 @@ func (bc *BigQueryConfig) toProto() *pb.BigQueryConfig { pbCfg := &pb.BigQueryConfig{ Table: bc.Table, UseTopicSchema: bc.UseTopicSchema, + UseTableSchema: bc.UseTableSchema, WriteMetadata: bc.WriteMetadata, DropUnknownFields: bc.DropUnknownFields, State: pb.BigQueryConfig_State(bc.State), @@ -546,7 +551,7 @@ type SubscriptionConfig struct { // When calling Subscription.Receive(), the client will check this // value with a call to Subscription.Config(), which requires the // roles/viewer or roles/pubsub.viewer role on your service account. - // If that call fails, mesages with ordering keys will be delivered in order. + // If that call fails, messages with ordering keys will be delivered in order. EnableMessageOrdering bool // DeadLetterPolicy specifies the conditions for dead lettering messages in @@ -602,6 +607,10 @@ type SubscriptionConfig struct { // receive messages. This field is set only in responses from the server; // it is ignored if it is set in any requests. State SubscriptionState + + // MessageTransforms are the transforms to be applied to messages before they are delivered + // to subscribers. Transforms are applied in the order specified. + MessageTransforms []MessageTransform } // String returns the globally unique printable name of the subscription config. @@ -660,6 +669,7 @@ func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription { RetryPolicy: pbRetryPolicy, Detached: cfg.Detached, EnableExactlyOnceDelivery: cfg.EnableExactlyOnceDelivery, + MessageTransforms: messageTransformsToProto(cfg.MessageTransforms), } } @@ -690,6 +700,7 @@ func protoToSubscriptionConfig(pbSub *pb.Subscription, c *Client) (SubscriptionC TopicMessageRetentionDuration: pbSub.TopicMessageRetentionDuration.AsDuration(), EnableExactlyOnceDelivery: pbSub.EnableExactlyOnceDelivery, State: SubscriptionState(pbSub.State), + MessageTransforms: protoToMessageTransforms(pbSub.MessageTransforms), } if pc := protoToPushConfig(pbSub.PushConfig); pc != nil { subC.PushConfig = *pc @@ -739,6 +750,7 @@ func protoToBQConfig(pbBQ *pb.BigQueryConfig) *BigQueryConfig { bq := &BigQueryConfig{ Table: pbBQ.GetTable(), UseTopicSchema: pbBQ.GetUseTopicSchema(), + UseTableSchema: pbBQ.GetUseTableSchema(), DropUnknownFields: pbBQ.GetDropUnknownFields(), WriteMetadata: pbBQ.GetWriteMetadata(), State: BigQueryConfigState(pbBQ.State), @@ -899,8 +911,7 @@ type ReceiveSettings struct { // // MinExtensionPeriod must be between 10s and 600s (inclusive). This configuration // can be disabled by specifying a duration less than (or equal to) 0. - // Defaults to off but set to 60 seconds if the subscription has exactly-once delivery enabled, - // which will be added in a future release. + // Disabled by default but set to 60 seconds if the subscription has exactly-once delivery enabled. MinExtensionPeriod time.Duration // MaxOutstandingMessages is the maximum number of unprocessed messages @@ -915,6 +926,8 @@ type ReceiveSettings struct { // be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If // the value is negative, then there will be no limit on the number of bytes // for unprocessed messages. + // This defaults to 1e9 or 1 GB. For machines that have less memory available, + // it is recommended to decrease this value so as to not run into OOM issues. MaxOutstandingBytes int // UseLegacyFlowControl disables enforcing flow control settings at the Cloud @@ -1058,6 +1071,9 @@ type SubscriptionConfigToUpdate struct { // If set, EnableExactlyOnce is changed. EnableExactlyOnceDelivery optional.Bool + + // If non-nil, the entire list of message transforms is replaced with the following. + MessageTransforms []MessageTransform } // Update changes an existing subscription according to the fields set in cfg. @@ -1126,6 +1142,10 @@ func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.Update psub.EnableExactlyOnceDelivery = optional.ToBool(cfg.EnableExactlyOnceDelivery) paths = append(paths, "enable_exactly_once_delivery") } + if cfg.MessageTransforms != nil { + psub.MessageTransforms = messageTransformsToProto(cfg.MessageTransforms) + paths = append(paths, "message_transforms") + } return &pb.UpdateSubscriptionRequest{ Subscription: psub, UpdateMask: &fmpb.FieldMask{Paths: paths}, @@ -1379,7 +1399,7 @@ func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Mes } msgs, err := iter.receive(maxToPull) - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } if err != nil { diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go index 1991fa7f..8e402a7f 100644 --- a/vendor/cloud.google.com/go/pubsub/topic.go +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -44,6 +44,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/durationpb" fmpb "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -281,6 +282,10 @@ type TopicConfig struct { // IngestionDataSourceSettings are settings for ingestion from a // data source into this topic. IngestionDataSourceSettings *IngestionDataSourceSettings + + // MessageTransforms are the transforms to be applied to messages published to the topic. + // Transforms are applied in the order specified. + MessageTransforms []MessageTransform } // String returns the printable globally unique name for the topic config. @@ -315,6 +320,7 @@ func (tc *TopicConfig) toProto() *pb.Topic { SchemaSettings: schemaSettingsToProto(tc.SchemaSettings), MessageRetentionDuration: retDur, IngestionDataSourceSettings: tc.IngestionDataSourceSettings.toProto(), + MessageTransforms: messageTransformsToProto(tc.MessageTransforms), } return pbt } @@ -350,8 +356,14 @@ type TopicConfigToUpdate struct { // IngestionDataSourceSettings are settings for ingestion from a // data source into this topic. // + // When changing this value, the entire data source settings object must be applied, + // rather than just the differences. This includes the source and logging settings. + // // Use the zero value &IngestionDataSourceSettings{} to remove the ingestion settings from the topic. IngestionDataSourceSettings *IngestionDataSourceSettings + + // If non-nil, the entire list of message transforms is replaced with the following. + MessageTransforms []MessageTransform } func protoToTopicConfig(pbt *pb.Topic) TopicConfig { @@ -363,6 +375,7 @@ func protoToTopicConfig(pbt *pb.Topic) TopicConfig { SchemaSettings: protoToSchemaSettings(pbt.SchemaSettings), State: TopicState(pbt.State), IngestionDataSourceSettings: protoToIngestionDataSourceSettings(pbt.IngestionDataSourceSettings), + MessageTransforms: protoToMessageTransforms(pbt.MessageTransforms), } if pbt.GetMessageRetentionDuration() != nil { tc.RetentionDuration = pbt.GetMessageRetentionDuration().AsDuration() @@ -425,6 +438,8 @@ func messageStoragePolicyToProto(msp *MessageStoragePolicy) *pb.MessageStoragePo // IngestionDataSourceSettings enables ingestion from a data source into this topic. type IngestionDataSourceSettings struct { Source IngestionDataSource + + PlatformLogsSettings *PlatformLogsSettings } // IngestionDataSource is the kind of ingestion source to be used. @@ -495,6 +510,280 @@ func (i *IngestionDataSourceAWSKinesis) isIngestionDataSource() bool { return true } +// CloudStorageIngestionState denotes the possible states for ingestion from Cloud Storage. +type CloudStorageIngestionState int + +const ( + // CloudStorageIngestionStateUnspecified is the default value. This value is unused. + CloudStorageIngestionStateUnspecified = iota + + // CloudStorageIngestionStateActive means ingestion is active. + CloudStorageIngestionStateActive + + // CloudStorageIngestionPermissionDenied means encountering an error while calling the Cloud Storage API. + // This can happen if the Pub/Sub SA has not been granted the + // [appropriate permissions](https://cloud.google.com/storage/docs/access-control/iam-permissions): + // - storage.objects.list: to list the objects in a bucket. + // - storage.objects.get: to read the objects in a bucket. + // - storage.buckets.get: to verify the bucket exists. + CloudStorageIngestionPermissionDenied + + // CloudStorageIngestionPublishPermissionDenied means encountering an error when publishing to the topic. + // This can happen if the Pub/Sub SA has not been granted the [appropriate publish + // permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publisher) + CloudStorageIngestionPublishPermissionDenied + + // CloudStorageIngestionBucketNotFound means the provided bucket doesn't exist. + CloudStorageIngestionBucketNotFound + + // CloudStorageIngestionTooManyObjects means the bucket has too many objects, ingestion will be paused. + CloudStorageIngestionTooManyObjects +) + +// IngestionDataSourceCloudStorage are ingestion settings for Cloud Storage. +type IngestionDataSourceCloudStorage struct { + // State is an output-only field indicating the state of the Cloud storage ingestion source. + State CloudStorageIngestionState + + // Bucket is the Cloud Storage bucket. The bucket name must be without any + // prefix like "gs://". See the bucket naming requirements (https://cloud.google.com/storage/docs/buckets#naming). + Bucket string + + // InputFormat is the format of objects in Cloud Storage. + // Defaults to TextFormat. + InputFormat ingestionDataSourceCloudStorageInputFormat + + // MinimumObjectCreateTime means objects with a larger or equal creation timestamp will be + // ingested. + MinimumObjectCreateTime time.Time + + // MatchGlob is the pattern used to match objects that will be ingested. If + // empty, all objects will be ingested. See the [supported + // patterns](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob). + MatchGlob string +} + +var _ IngestionDataSource = (*IngestionDataSourceCloudStorage)(nil) + +func (i *IngestionDataSourceCloudStorage) isIngestionDataSource() bool { + return true +} + +type ingestionDataSourceCloudStorageInputFormat interface { + isCloudStorageIngestionInputFormat() bool +} + +var _ ingestionDataSourceCloudStorageInputFormat = (*IngestionDataSourceCloudStorageTextFormat)(nil) +var _ ingestionDataSourceCloudStorageInputFormat = (*IngestionDataSourceCloudStorageAvroFormat)(nil) +var _ ingestionDataSourceCloudStorageInputFormat = (*IngestionDataSourceCloudStoragePubSubAvroFormat)(nil) + +// IngestionDataSourceCloudStorageTextFormat means Cloud Storage data will be interpreted as text. +type IngestionDataSourceCloudStorageTextFormat struct { + Delimiter string +} + +func (i *IngestionDataSourceCloudStorageTextFormat) isCloudStorageIngestionInputFormat() bool { + return true +} + +// IngestionDataSourceCloudStorageAvroFormat means Cloud Storage data will be interpreted in Avro format. +type IngestionDataSourceCloudStorageAvroFormat struct{} + +func (i *IngestionDataSourceCloudStorageAvroFormat) isCloudStorageIngestionInputFormat() bool { + return true +} + +// IngestionDataSourceCloudStoragePubSubAvroFormat is used assuming the data was written using Cloud +// Storage subscriptions https://cloud.google.com/pubsub/docs/cloudstorage. +type IngestionDataSourceCloudStoragePubSubAvroFormat struct{} + +func (i *IngestionDataSourceCloudStoragePubSubAvroFormat) isCloudStorageIngestionInputFormat() bool { + return true +} + +// EventHubsState denotes the possible states for ingestion from Event Hubs. +type EventHubsState int + +const ( + // EventHubsStateUnspecified is the default value. This value is unused. + EventHubsStateUnspecified = iota + + // EventHubsStateActive means the state is active. + EventHubsStateActive + + // EventHubsStatePermissionDenied indicates encountered permission denied error + // while consuming data from Event Hubs. + // This can happen when `client_id`, or `tenant_id` are invalid. Or the + // right permissions haven't been granted. + EventHubsStatePermissionDenied + + // EventHubsStatePublishPermissionDenied indicates permission denied encountered + // while publishing to the topic. + EventHubsStatePublishPermissionDenied + + // EventHubsStateNamespaceNotFound indicates the provided Event Hubs namespace couldn't be found. + EventHubsStateNamespaceNotFound + + // EventHubsStateNotFound indicates the provided Event Hub couldn't be found. + EventHubsStateNotFound + + // EventHubsStateSubscriptionNotFound indicates the provided Event Hubs subscription couldn't be found. + EventHubsStateSubscriptionNotFound + + // EventHubsStateResourceGroupNotFound indicates the provided Event Hubs resource group couldn't be found. + EventHubsStateResourceGroupNotFound +) + +// IngestionDataSourceAzureEventHubs are ingestion settings for Azure Event Hubs. +type IngestionDataSourceAzureEventHubs struct { + // Output only field that indicates the state of the Event Hubs ingestion source. + State EventHubsState + + // Name of the resource group within the Azure subscription + ResourceGroup string + + // Name of the Event Hubs namespace + Namespace string + + // Rame of the Event Hub. + EventHub string + + // Client ID of the Azure application that is being used to authenticate Pub/Sub. + ClientID string + + // Tenant ID of the Azure application that is being used to authenticate Pub/Sub. + TenantID string + + // The Azure subscription ID + SubscriptionID string + + // GCPServiceAccount is the GCP service account to be used for Federated Identity + // authentication. + GCPServiceAccount string +} + +var _ IngestionDataSource = (*IngestionDataSourceAzureEventHubs)(nil) + +func (i *IngestionDataSourceAzureEventHubs) isIngestionDataSource() bool { + return true +} + +// AmazonMSKState denotes the possible states for ingestion from Amazon MSK. +type AmazonMSKState int + +const ( + // AmazonMSKStateUnspecified is the default value. This value is unused. + AmazonMSKStateUnspecified = iota + + // AmazonMSKActive indicates MSK topic is active. + AmazonMSKActive + + // AmazonMSKPermissionDenied indicates permission denied encountered while consuming data from Amazon MSK. + AmazonMSKPermissionDenied + + // AmazonMSKPublishPermissionDenied indicates permission denied encountered while publishing to the topic. + AmazonMSKPublishPermissionDenied + + // AmazonMSKClusterNotFound indicates the provided Msk cluster wasn't found. + AmazonMSKClusterNotFound + + // AmazonMSKTopicNotFound indicates the provided topic wasn't found. + AmazonMSKTopicNotFound +) + +// IngestionDataSourceAmazonMSK are ingestion settings for Amazon MSK. +type IngestionDataSourceAmazonMSK struct { + // An output-only field that indicates the state of the Amazon + // MSK ingestion source. + State AmazonMSKState + + // The Amazon Resource Name (ARN) that uniquely identifies the + // cluster. + ClusterARN string + + // The name of the topic in the Amazon MSK cluster that Pub/Sub + // will import from. + Topic string + + // AWS role ARN to be used for Federated Identity authentication + // with Amazon MSK. Check the Pub/Sub docs for how to set up this role and + // the required permissions that need to be attached to it. + AWSRoleARN string + + // The GCP service account to be used for Federated Identity + // authentication with Amazon MSK (via a `AssumeRoleWithWebIdentity` call + // for the provided role). The `aws_role_arn` must be set up with + // `accounts.google.com:sub` equals to this service account number. + GCPServiceAccount string +} + +var _ IngestionDataSource = (*IngestionDataSourceAmazonMSK)(nil) + +func (i *IngestionDataSourceAmazonMSK) isIngestionDataSource() bool { + return true +} + +// ConfluentCloudState denotes state of ingestion topic with confluent cloud +type ConfluentCloudState int + +const ( + // ConfluentCloudStateUnspecified is the default value. This value is unused. + ConfluentCloudStateUnspecified = iota + + // ConfluentCloudActive indicates the state is active. + ConfluentCloudActive = 1 + + // ConfluentCloudPermissionDenied indicates permission denied encountered + // while consuming data from Confluent Cloud. + ConfluentCloudPermissionDenied = 2 + + // ConfluentCloudPublishPermissionDenied indicates permission denied encountered + // while publishing to the topic. + ConfluentCloudPublishPermissionDenied = 3 + + // ConfluentCloudUnreachableBootstrapServer indicates the provided bootstrap + // server address is unreachable. + ConfluentCloudUnreachableBootstrapServer = 4 + + // ConfluentCloudClusterNotFound indicates the provided cluster wasn't found. + ConfluentCloudClusterNotFound = 5 + + // ConfluentCloudTopicNotFound indicates the provided topic wasn't found. + ConfluentCloudTopicNotFound = 6 +) + +// IngestionDataSourceConfluentCloud are ingestion settings for confluent cloud. +type IngestionDataSourceConfluentCloud struct { + // An output-only field that indicates the state of the + // Confluent Cloud ingestion source. + State ConfluentCloudState + + // The address of the bootstrap server. The format is url:port. + BootstrapServer string + + // The id of the cluster. + ClusterID string + + // The name of the topic in the Confluent Cloud cluster that + // Pub/Sub will import from. + Topic string + + // The id of the identity pool to be used for Federated Identity + // authentication with Confluent Cloud. See + // https://docs.confluent.io/cloud/current/security/authenticate/workload-identities/identity-providers/oauth/identity-pools.html#add-oauth-identity-pools. + IdentityPoolID string + + // The GCP service account to be used for Federated Identity + // authentication with `identity_pool_id`. + GCPServiceAccount string +} + +var _ IngestionDataSource = (*IngestionDataSourceConfluentCloud)(nil) + +func (i *IngestionDataSourceConfluentCloud) isIngestionDataSource() bool { + return true +} + func protoToIngestionDataSourceSettings(pbs *pb.IngestionDataSourceSettings) *IngestionDataSourceSettings { if pbs == nil { return nil @@ -509,7 +798,61 @@ func protoToIngestionDataSourceSettings(pbs *pb.IngestionDataSourceSettings) *In AWSRoleARN: k.GetAwsRoleArn(), GCPServiceAccount: k.GetGcpServiceAccount(), } + } else if cs := pbs.GetCloudStorage(); cs != nil { + var format ingestionDataSourceCloudStorageInputFormat + switch t := cs.InputFormat.(type) { + case *pb.IngestionDataSourceSettings_CloudStorage_TextFormat_: + format = &IngestionDataSourceCloudStorageTextFormat{ + Delimiter: *t.TextFormat.Delimiter, + } + case *pb.IngestionDataSourceSettings_CloudStorage_AvroFormat_: + format = &IngestionDataSourceCloudStorageAvroFormat{} + case *pb.IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat: + format = &IngestionDataSourceCloudStoragePubSubAvroFormat{} + } + s.Source = &IngestionDataSourceCloudStorage{ + State: CloudStorageIngestionState(cs.GetState()), + Bucket: cs.GetBucket(), + InputFormat: format, + MinimumObjectCreateTime: cs.GetMinimumObjectCreateTime().AsTime(), + MatchGlob: cs.GetMatchGlob(), + } + } else if e := pbs.GetAzureEventHubs(); e != nil { + s.Source = &IngestionDataSourceAzureEventHubs{ + State: EventHubsState(e.GetState()), + ResourceGroup: e.GetResourceGroup(), + Namespace: e.GetNamespace(), + EventHub: e.GetEventHub(), + ClientID: e.GetClientId(), + TenantID: e.GetTenantId(), + SubscriptionID: e.GetSubscriptionId(), + GCPServiceAccount: e.GetGcpServiceAccount(), + } + } else if m := pbs.GetAwsMsk(); m != nil { + s.Source = &IngestionDataSourceAmazonMSK{ + State: AmazonMSKState(m.GetState()), + ClusterARN: m.GetClusterArn(), + Topic: m.GetTopic(), + AWSRoleARN: m.GetAwsRoleArn(), + GCPServiceAccount: m.GetGcpServiceAccount(), + } + } else if c := pbs.GetConfluentCloud(); c != nil { + s.Source = &IngestionDataSourceConfluentCloud{ + State: ConfluentCloudState(c.GetState()), + BootstrapServer: c.GetBootstrapServer(), + ClusterID: c.GetClusterId(), + Topic: c.GetTopic(), + IdentityPoolID: c.GetIdentityPoolId(), + GCPServiceAccount: c.GetGcpServiceAccount(), + } } + + if pbs.PlatformLogsSettings != nil { + s.PlatformLogsSettings = &PlatformLogsSettings{ + Severity: PlatformLogsSeverity(pbs.PlatformLogsSettings.Severity), + } + } + return s } @@ -522,6 +865,11 @@ func (i *IngestionDataSourceSettings) toProto() *pb.IngestionDataSourceSettings return nil } pbs := &pb.IngestionDataSourceSettings{} + if i.PlatformLogsSettings != nil { + pbs.PlatformLogsSettings = &pb.PlatformLogsSettings{ + Severity: pb.PlatformLogsSettings_Severity(i.PlatformLogsSettings.Severity), + } + } if out := i.Source; out != nil { if k, ok := out.(*IngestionDataSourceAWSKinesis); ok { pbs.Source = &pb.IngestionDataSourceSettings_AwsKinesis_{ @@ -534,10 +882,109 @@ func (i *IngestionDataSourceSettings) toProto() *pb.IngestionDataSourceSettings }, } } + if cs, ok := out.(*IngestionDataSourceCloudStorage); ok { + switch format := cs.InputFormat.(type) { + case *IngestionDataSourceCloudStorageTextFormat: + pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{ + CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{ + State: pb.IngestionDataSourceSettings_CloudStorage_State(cs.State), + Bucket: cs.Bucket, + InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_TextFormat_{ + TextFormat: &pb.IngestionDataSourceSettings_CloudStorage_TextFormat{ + Delimiter: &format.Delimiter, + }, + }, + MinimumObjectCreateTime: timestamppb.New(cs.MinimumObjectCreateTime), + MatchGlob: cs.MatchGlob, + }, + } + case *IngestionDataSourceCloudStorageAvroFormat: + pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{ + CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{ + Bucket: cs.Bucket, + InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_AvroFormat_{ + AvroFormat: &pb.IngestionDataSourceSettings_CloudStorage_AvroFormat{}, + }, + MinimumObjectCreateTime: timestamppb.New(cs.MinimumObjectCreateTime), + MatchGlob: cs.MatchGlob, + }, + } + case *IngestionDataSourceCloudStoragePubSubAvroFormat: + pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{ + CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{ + State: pb.IngestionDataSourceSettings_CloudStorage_State(cs.State), + Bucket: cs.Bucket, + InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat{ + PubsubAvroFormat: &pb.IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat{}, + }, + MinimumObjectCreateTime: timestamppb.New(cs.MinimumObjectCreateTime), + MatchGlob: cs.MatchGlob, + }, + } + } + } + if e, ok := out.(*IngestionDataSourceAzureEventHubs); ok { + pbs.Source = &pb.IngestionDataSourceSettings_AzureEventHubs_{ + AzureEventHubs: &pb.IngestionDataSourceSettings_AzureEventHubs{ + ResourceGroup: e.ResourceGroup, + Namespace: e.Namespace, + EventHub: e.EventHub, + ClientId: e.ClientID, + TenantId: e.TenantID, + SubscriptionId: e.SubscriptionID, + GcpServiceAccount: e.GCPServiceAccount, + }, + } + } + if m, ok := out.(*IngestionDataSourceAmazonMSK); ok { + pbs.Source = &pb.IngestionDataSourceSettings_AwsMsk_{ + AwsMsk: &pb.IngestionDataSourceSettings_AwsMsk{ + ClusterArn: m.ClusterARN, + Topic: m.Topic, + AwsRoleArn: m.AWSRoleARN, + GcpServiceAccount: m.GCPServiceAccount, + }, + } + } + if c, ok := out.(*IngestionDataSourceConfluentCloud); ok { + pbs.Source = &pb.IngestionDataSourceSettings_ConfluentCloud_{ + ConfluentCloud: &pb.IngestionDataSourceSettings_ConfluentCloud{ + BootstrapServer: c.BootstrapServer, + ClusterId: c.ClusterID, + Topic: c.Topic, + IdentityPoolId: c.IdentityPoolID, + GcpServiceAccount: c.GCPServiceAccount, + }, + } + } } return pbs } +// PlatformLogsSettings configures logging produced by Pub/Sub. +// Currently only valid on Cloud Storage ingestion topics. +type PlatformLogsSettings struct { + Severity PlatformLogsSeverity +} + +// PlatformLogsSeverity are the severity levels of Platform Logs. +type PlatformLogsSeverity int32 + +const ( + // PlatformLogsSeverityUnspecified is the default value. Logs level is unspecified. Logs will be disabled. + PlatformLogsSeverityUnspecified PlatformLogsSeverity = iota + // PlatformLogsSeverityDisabled means logs will be disabled. + PlatformLogsSeverityDisabled + // PlatformLogsSeverityDebug means debug logs and higher-severity logs will be written. + PlatformLogsSeverityDebug + // PlatformLogsSeverityInfo means info logs and higher-severity logs will be written. + PlatformLogsSeverityInfo + // PlatformLogsSeverityWarning means warning logs and higher-severity logs will be written. + PlatformLogsSeverityWarning + // PlatformLogsSeverityError means only error logs will be written. + PlatformLogsSeverityError +) + // Config returns the TopicConfig for the topic. func (t *Topic) Config(ctx context.Context) (TopicConfig, error) { pbt, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name}) @@ -615,6 +1062,10 @@ func (t *Topic) updateRequest(cfg TopicConfigToUpdate) *pb.UpdateTopicRequest { pt.IngestionDataSourceSettings = cfg.IngestionDataSourceSettings.toProto() paths = append(paths, "ingestion_data_source_settings") } + if cfg.MessageTransforms != nil { + pt.MessageTransforms = messageTransformsToProto(cfg.MessageTransforms) + paths = append(paths, "message_transforms") + } return &pb.UpdateTopicRequest{ Topic: pt, UpdateMask: &fmpb.FieldMask{Paths: paths}, @@ -748,8 +1199,8 @@ func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { var createSpan trace.Span if t.enableTracing { opts := getPublishSpanAttributes(t.c.projectID, t.ID(), msg) + opts = append(opts, trace.WithAttributes(semconv.CodeFunction("Publish"))) ctx, createSpan = startSpan(ctx, createSpanName, t.ID(), opts...) - createSpan.SetAttributes(semconv.CodeFunction("Publish")) } ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name)) if err != nil { @@ -799,8 +1250,6 @@ func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { fcSpan.End() } - _, batcherSpan = startSpan(ctx, batcherSpanName, "") - bmsg := &bundledMessage{ msg: msg, res: r, @@ -809,6 +1258,7 @@ func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { } if t.enableTracing { + _, batcherSpan = startSpan(ctx, batcherSpanName, "") bmsg.batcherSpan = batcherSpan // Inject the context from the first publish span rather than from flow control / batching. @@ -893,11 +1343,15 @@ func (t *Topic) initBundler() { for _, m := range bmsgs { m.batcherSpan.End() m.createSpan.AddEvent(eventPublishStart, trace.WithAttributes(semconv.MessagingBatchMessageCount(len(bmsgs)))) - defer m.createSpan.End() - defer m.createSpan.AddEvent(eventPublishEnd) } } t.publishMessageBundle(ctx, bmsgs) + if t.enableTracing { + for _, m := range bmsgs { + m.createSpan.AddEvent(eventPublishEnd) + m.createSpan.End() + } + } }) t.scheduler.DelayThreshold = t.PublishSettings.DelayThreshold t.scheduler.BundleCountThreshold = t.PublishSettings.CountThreshold @@ -973,8 +1427,14 @@ func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) opts := getCommonOptions(projectID, topicID) // Add link to publish RPC span of createSpan(s). opts = append(opts, trace.WithLinks(links...)) + opts = append( + opts, + trace.WithAttributes( + semconv.MessagingBatchMessageCount(numMsgs), + semconv.CodeFunction("publishMessageBundle"), + ), + ) ctx, pSpan = startSpan(ctx, publishRPCSpanName, topicID, opts...) - pSpan.SetAttributes(semconv.MessagingBatchMessageCount(numMsgs), semconv.CodeFunction("publishMessageBundle")) defer pSpan.End() // Add the reverse link to createSpan(s) of publish RPC span. diff --git a/vendor/cloud.google.com/go/pubsub/trace.go b/vendor/cloud.google.com/go/pubsub/trace.go index 1d41e9d8..6ff88a84 100644 --- a/vendor/cloud.google.com/go/pubsub/trace.go +++ b/vendor/cloud.google.com/go/pubsub/trace.go @@ -20,6 +20,7 @@ import ( "log" "sync" + pb "cloud.google.com/go/pubsub/apiv1/pubsubpb" "cloud.google.com/go/pubsub/internal" "go.opencensus.io/stats" "go.opencensus.io/stats/view" @@ -273,33 +274,42 @@ func tracer() trace.Tracer { var _ propagation.TextMapCarrier = (*messageCarrier)(nil) -// messageCarrier injects and extracts traces from a pubsub.Message. +// messageCarrier injects and extracts traces from pubsub.Message attributes. type messageCarrier struct { - msg *Message + attributes map[string]string } const googclientPrefix string = "googclient_" // newMessageCarrier creates a new PubsubMessageCarrier. func newMessageCarrier(msg *Message) messageCarrier { - return messageCarrier{msg: msg} + return messageCarrier{attributes: msg.Attributes} +} + +// NewMessageCarrierFromPB creates a propagation.TextMapCarrier that can be used to extract the trace +// context from a protobuf PubsubMessage. +// +// Example: +// ctx = propagation.TraceContext{}.Extract(ctx, pubsub.NewMessageCarrierFromPB(msg)) +func NewMessageCarrierFromPB(msg *pb.PubsubMessage) propagation.TextMapCarrier { + return messageCarrier{attributes: msg.Attributes} } // Get retrieves a single value for a given key. func (c messageCarrier) Get(key string) string { - return c.msg.Attributes[googclientPrefix+key] + return c.attributes[googclientPrefix+key] } // Set sets an attribute. func (c messageCarrier) Set(key, val string) { - c.msg.Attributes[googclientPrefix+key] = val + c.attributes[googclientPrefix+key] = val } // Keys returns a slice of all keys in the carrier. func (c messageCarrier) Keys() []string { i := 0 - out := make([]string, len(c.msg.Attributes)) - for k := range c.msg.Attributes { + out := make([]string, len(c.attributes)) + for k := range c.attributes { out[i] = k i++ } @@ -350,14 +360,11 @@ const ( resultExpired = "expired" // custom pubsub specific attributes - gcpProjectIDAttribute = "gcp.project_id" - pubsubPrefix = "messaging.gcp_pubsub." - orderingAttribute = pubsubPrefix + "message.ordering_key" - deliveryAttemptAttribute = pubsubPrefix + "message.delivery_attempt" - eosAttribute = pubsubPrefix + "exactly_once_delivery" - ackIDAttribute = pubsubPrefix + "message.ack_id" - resultAttribute = pubsubPrefix + "result" - receiptModackAttribute = pubsubPrefix + "is_receipt_modack" + gcpProjectIDAttribute = "gcp.project_id" + pubsubPrefix = "messaging.gcp_pubsub." + eosAttribute = pubsubPrefix + "exactly_once_delivery" + resultAttribute = pubsubPrefix + "result" + receiptModackAttribute = pubsubPrefix + "is_receipt_modack" ) func startSpan(ctx context.Context, spanType, resourceID string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { @@ -373,7 +380,7 @@ func getPublishSpanAttributes(project, dst string, msg *Message, attrs ...attrib trace.WithAttributes( semconv.MessagingMessageID(msg.ID), semconv.MessagingMessageBodySize(len(msg.Data)), - attribute.String(orderingAttribute, msg.OrderingKey), + semconv.MessagingGCPPubsubMessageOrderingKey(msg.OrderingKey), ), trace.WithAttributes(attrs...), trace.WithSpanKind(trace.SpanKindProducer), @@ -387,13 +394,13 @@ func getSubscriberOpts(project, dst string, msg *Message, attrs ...attribute.Key trace.WithAttributes( semconv.MessagingMessageID(msg.ID), semconv.MessagingMessageBodySize(len(msg.Data)), - attribute.String(orderingAttribute, msg.OrderingKey), + semconv.MessagingGCPPubsubMessageOrderingKey(msg.OrderingKey), ), trace.WithAttributes(attrs...), trace.WithSpanKind(trace.SpanKindConsumer), } if msg.DeliveryAttempt != nil { - opts = append(opts, trace.WithAttributes(attribute.Int(deliveryAttemptAttribute, *msg.DeliveryAttempt))) + opts = append(opts, trace.WithAttributes(semconv.MessagingGCPPubsubMessageDeliveryAttempt(*msg.DeliveryAttempt))) } opts = append(opts, getCommonOptions(project, dst)...) return opts diff --git a/vendor/cloud.google.com/go/pubsub/transform.go b/vendor/cloud.google.com/go/pubsub/transform.go new file mode 100644 index 00000000..ff78d7de --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/transform.go @@ -0,0 +1,134 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + pb "cloud.google.com/go/pubsub/apiv1/pubsubpb" +) + +// MessageTransform is a single instance of a message transformation to apply to messages. +type MessageTransform struct { + // The transform to apply to messages. + // If multiple JavaScriptUDF's are specified on a resource, + // each must have a unique `function_name`. + Transform Transform + + // If true, the transform is disabled and will not be applied to + // messages. Defaults to `false`. + Disabled bool +} + +func messageTransformsToProto(m []MessageTransform) []*pb.MessageTransform { + if m == nil { + return nil + } + var transforms []*pb.MessageTransform + for _, mt := range m { + switch transform := mt.Transform.(type) { + case JavaScriptUDF: + transforms = append(transforms, &pb.MessageTransform{ + Disabled: mt.Disabled, + Transform: transform.toProto(), + }) + default: + } + } + return transforms +} + +func protoToMessageTransforms(m []*pb.MessageTransform) []MessageTransform { + if m == nil { + return nil + } + var transforms []MessageTransform + for _, mt := range m { + switch t := mt.Transform.(type) { + case *pb.MessageTransform_JavascriptUdf: + transform := MessageTransform{ + Transform: protoToJavaScriptUDF(t), + Disabled: mt.Disabled, + } + transforms = append(transforms, transform) + default: + } + } + return transforms +} + +// Transform represents the type of transforms that can be applied to messages. +// Currently JavaScriptUDF is the only type that satisfies this. +type Transform interface { + isTransform() bool +} + +// JavaScriptUDF is a user-defined JavaScript function +// that can transform or filter a Pub/Sub message. +type JavaScriptUDF struct { + // Name of the JavaScript function that should applied to Pub/Sub + // messages. + FunctionName string + + // JavaScript code that contains a function `function_name` with the + // below signature: + // + // /** + // * Transforms a Pub/Sub message. + // + // * @return {(Object)>|null)} - To + // * filter a message, return `null`. To transform a message return a map + // * with the following keys: + // * - (required) 'data' : {string} + // * - (optional) 'attributes' : {Object} + // * Returning empty `attributes` will remove all attributes from the + // * message. + // * + // * @param {(Object)>} Pub/Sub + // * message. Keys: + // * - (required) 'data' : {string} + // * - (required) 'attributes' : {Object} + // * + // * @param {Object} metadata - Pub/Sub message metadata. + // * Keys: + // * - (required) 'message_id' : {string} + // * - (optional) 'publish_time': {string} YYYY-MM-DDTHH:MM:SSZ format + // * - (optional) 'ordering_key': {string} + // */ + // + // function (message, metadata) { + // } + Code string +} + +var _ Transform = (*JavaScriptUDF)(nil) + +func (i JavaScriptUDF) isTransform() bool { + return true +} + +func (j *JavaScriptUDF) toProto() *pb.MessageTransform_JavascriptUdf { + return &pb.MessageTransform_JavascriptUdf{ + JavascriptUdf: &pb.JavaScriptUDF{ + FunctionName: j.FunctionName, + Code: j.Code, + }, + } +} + +func protoToJavaScriptUDF(m *pb.MessageTransform_JavascriptUdf) JavaScriptUDF { + return JavaScriptUDF{ + FunctionName: m.JavascriptUdf.FunctionName, + Code: m.JavascriptUdf.Code, + } +} diff --git a/vendor/cloud.google.com/go/pubsub/v2/LICENSE b/vendor/cloud.google.com/go/pubsub/v2/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/v2/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb/pubsub.pb.go b/vendor/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb/pubsub.pb.go new file mode 100644 index 00000000..c69a0e5f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb/pubsub.pb.go @@ -0,0 +1,10022 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.2 +// protoc v4.25.7 +// source: google/pubsub/v1/pubsub.proto + +package pubsubpb + +import ( + context "context" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Possible states for ingestion from Amazon Kinesis Data Streams. +type IngestionDataSourceSettings_AwsKinesis_State int32 + +const ( + // Default value. This value is unused. + IngestionDataSourceSettings_AwsKinesis_STATE_UNSPECIFIED IngestionDataSourceSettings_AwsKinesis_State = 0 + // Ingestion is active. + IngestionDataSourceSettings_AwsKinesis_ACTIVE IngestionDataSourceSettings_AwsKinesis_State = 1 + // Permission denied encountered while consuming data from Kinesis. + // This can happen if: + // - The provided `aws_role_arn` does not exist or does not have the + // appropriate permissions attached. + // - The provided `aws_role_arn` is not set up properly for Identity + // Federation using `gcp_service_account`. + // - The Pub/Sub SA is not granted the + // `iam.serviceAccounts.getOpenIdToken` permission on + // `gcp_service_account`. + IngestionDataSourceSettings_AwsKinesis_KINESIS_PERMISSION_DENIED IngestionDataSourceSettings_AwsKinesis_State = 2 + // Permission denied encountered while publishing to the topic. This can + // happen if the Pub/Sub SA has not been granted the [appropriate publish + // permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publisher) + IngestionDataSourceSettings_AwsKinesis_PUBLISH_PERMISSION_DENIED IngestionDataSourceSettings_AwsKinesis_State = 3 + // The Kinesis stream does not exist. + IngestionDataSourceSettings_AwsKinesis_STREAM_NOT_FOUND IngestionDataSourceSettings_AwsKinesis_State = 4 + // The Kinesis consumer does not exist. + IngestionDataSourceSettings_AwsKinesis_CONSUMER_NOT_FOUND IngestionDataSourceSettings_AwsKinesis_State = 5 +) + +// Enum value maps for IngestionDataSourceSettings_AwsKinesis_State. +var ( + IngestionDataSourceSettings_AwsKinesis_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "KINESIS_PERMISSION_DENIED", + 3: "PUBLISH_PERMISSION_DENIED", + 4: "STREAM_NOT_FOUND", + 5: "CONSUMER_NOT_FOUND", + } + IngestionDataSourceSettings_AwsKinesis_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "KINESIS_PERMISSION_DENIED": 2, + "PUBLISH_PERMISSION_DENIED": 3, + "STREAM_NOT_FOUND": 4, + "CONSUMER_NOT_FOUND": 5, + } +) + +func (x IngestionDataSourceSettings_AwsKinesis_State) Enum() *IngestionDataSourceSettings_AwsKinesis_State { + p := new(IngestionDataSourceSettings_AwsKinesis_State) + *p = x + return p +} + +func (x IngestionDataSourceSettings_AwsKinesis_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (IngestionDataSourceSettings_AwsKinesis_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[0].Descriptor() +} + +func (IngestionDataSourceSettings_AwsKinesis_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[0] +} + +func (x IngestionDataSourceSettings_AwsKinesis_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use IngestionDataSourceSettings_AwsKinesis_State.Descriptor instead. +func (IngestionDataSourceSettings_AwsKinesis_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 0, 0} +} + +// Possible states for ingestion from Cloud Storage. +type IngestionDataSourceSettings_CloudStorage_State int32 + +const ( + // Default value. This value is unused. + IngestionDataSourceSettings_CloudStorage_STATE_UNSPECIFIED IngestionDataSourceSettings_CloudStorage_State = 0 + // Ingestion is active. + IngestionDataSourceSettings_CloudStorage_ACTIVE IngestionDataSourceSettings_CloudStorage_State = 1 + // Permission denied encountered while calling the Cloud Storage API. This + // can happen if the Pub/Sub SA has not been granted the + // [appropriate + // permissions](https://cloud.google.com/storage/docs/access-control/iam-permissions): + // - storage.objects.list: to list the objects in a bucket. + // - storage.objects.get: to read the objects in a bucket. + // - storage.buckets.get: to verify the bucket exists. + IngestionDataSourceSettings_CloudStorage_CLOUD_STORAGE_PERMISSION_DENIED IngestionDataSourceSettings_CloudStorage_State = 2 + // Permission denied encountered while publishing to the topic. This can + // happen if the Pub/Sub SA has not been granted the [appropriate publish + // permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publisher) + IngestionDataSourceSettings_CloudStorage_PUBLISH_PERMISSION_DENIED IngestionDataSourceSettings_CloudStorage_State = 3 + // The provided Cloud Storage bucket doesn't exist. + IngestionDataSourceSettings_CloudStorage_BUCKET_NOT_FOUND IngestionDataSourceSettings_CloudStorage_State = 4 + // The Cloud Storage bucket has too many objects, ingestion will be + // paused. + IngestionDataSourceSettings_CloudStorage_TOO_MANY_OBJECTS IngestionDataSourceSettings_CloudStorage_State = 5 +) + +// Enum value maps for IngestionDataSourceSettings_CloudStorage_State. +var ( + IngestionDataSourceSettings_CloudStorage_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "CLOUD_STORAGE_PERMISSION_DENIED", + 3: "PUBLISH_PERMISSION_DENIED", + 4: "BUCKET_NOT_FOUND", + 5: "TOO_MANY_OBJECTS", + } + IngestionDataSourceSettings_CloudStorage_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "CLOUD_STORAGE_PERMISSION_DENIED": 2, + "PUBLISH_PERMISSION_DENIED": 3, + "BUCKET_NOT_FOUND": 4, + "TOO_MANY_OBJECTS": 5, + } +) + +func (x IngestionDataSourceSettings_CloudStorage_State) Enum() *IngestionDataSourceSettings_CloudStorage_State { + p := new(IngestionDataSourceSettings_CloudStorage_State) + *p = x + return p +} + +func (x IngestionDataSourceSettings_CloudStorage_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (IngestionDataSourceSettings_CloudStorage_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[1].Descriptor() +} + +func (IngestionDataSourceSettings_CloudStorage_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[1] +} + +func (x IngestionDataSourceSettings_CloudStorage_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use IngestionDataSourceSettings_CloudStorage_State.Descriptor instead. +func (IngestionDataSourceSettings_CloudStorage_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 0} +} + +// Possible states for managed ingestion from Event Hubs. +type IngestionDataSourceSettings_AzureEventHubs_State int32 + +const ( + // Default value. This value is unused. + IngestionDataSourceSettings_AzureEventHubs_STATE_UNSPECIFIED IngestionDataSourceSettings_AzureEventHubs_State = 0 + // Ingestion is active. + IngestionDataSourceSettings_AzureEventHubs_ACTIVE IngestionDataSourceSettings_AzureEventHubs_State = 1 + // Permission denied encountered while consuming data from Event Hubs. + // This can happen when `client_id`, or `tenant_id` are invalid. Or the + // right permissions haven't been granted. + IngestionDataSourceSettings_AzureEventHubs_EVENT_HUBS_PERMISSION_DENIED IngestionDataSourceSettings_AzureEventHubs_State = 2 + // Permission denied encountered while publishing to the topic. + IngestionDataSourceSettings_AzureEventHubs_PUBLISH_PERMISSION_DENIED IngestionDataSourceSettings_AzureEventHubs_State = 3 + // The provided Event Hubs namespace couldn't be found. + IngestionDataSourceSettings_AzureEventHubs_NAMESPACE_NOT_FOUND IngestionDataSourceSettings_AzureEventHubs_State = 4 + // The provided Event Hub couldn't be found. + IngestionDataSourceSettings_AzureEventHubs_EVENT_HUB_NOT_FOUND IngestionDataSourceSettings_AzureEventHubs_State = 5 + // The provided Event Hubs subscription couldn't be found. + IngestionDataSourceSettings_AzureEventHubs_SUBSCRIPTION_NOT_FOUND IngestionDataSourceSettings_AzureEventHubs_State = 6 + // The provided Event Hubs resource group couldn't be found. + IngestionDataSourceSettings_AzureEventHubs_RESOURCE_GROUP_NOT_FOUND IngestionDataSourceSettings_AzureEventHubs_State = 7 +) + +// Enum value maps for IngestionDataSourceSettings_AzureEventHubs_State. +var ( + IngestionDataSourceSettings_AzureEventHubs_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "EVENT_HUBS_PERMISSION_DENIED", + 3: "PUBLISH_PERMISSION_DENIED", + 4: "NAMESPACE_NOT_FOUND", + 5: "EVENT_HUB_NOT_FOUND", + 6: "SUBSCRIPTION_NOT_FOUND", + 7: "RESOURCE_GROUP_NOT_FOUND", + } + IngestionDataSourceSettings_AzureEventHubs_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "EVENT_HUBS_PERMISSION_DENIED": 2, + "PUBLISH_PERMISSION_DENIED": 3, + "NAMESPACE_NOT_FOUND": 4, + "EVENT_HUB_NOT_FOUND": 5, + "SUBSCRIPTION_NOT_FOUND": 6, + "RESOURCE_GROUP_NOT_FOUND": 7, + } +) + +func (x IngestionDataSourceSettings_AzureEventHubs_State) Enum() *IngestionDataSourceSettings_AzureEventHubs_State { + p := new(IngestionDataSourceSettings_AzureEventHubs_State) + *p = x + return p +} + +func (x IngestionDataSourceSettings_AzureEventHubs_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (IngestionDataSourceSettings_AzureEventHubs_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[2].Descriptor() +} + +func (IngestionDataSourceSettings_AzureEventHubs_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[2] +} + +func (x IngestionDataSourceSettings_AzureEventHubs_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use IngestionDataSourceSettings_AzureEventHubs_State.Descriptor instead. +func (IngestionDataSourceSettings_AzureEventHubs_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 2, 0} +} + +// Possible states for managed ingestion from Amazon MSK. +type IngestionDataSourceSettings_AwsMsk_State int32 + +const ( + // Default value. This value is unused. + IngestionDataSourceSettings_AwsMsk_STATE_UNSPECIFIED IngestionDataSourceSettings_AwsMsk_State = 0 + // Ingestion is active. + IngestionDataSourceSettings_AwsMsk_ACTIVE IngestionDataSourceSettings_AwsMsk_State = 1 + // Permission denied encountered while consuming data from Amazon MSK. + IngestionDataSourceSettings_AwsMsk_MSK_PERMISSION_DENIED IngestionDataSourceSettings_AwsMsk_State = 2 + // Permission denied encountered while publishing to the topic. + IngestionDataSourceSettings_AwsMsk_PUBLISH_PERMISSION_DENIED IngestionDataSourceSettings_AwsMsk_State = 3 + // The provided MSK cluster wasn't found. + IngestionDataSourceSettings_AwsMsk_CLUSTER_NOT_FOUND IngestionDataSourceSettings_AwsMsk_State = 4 + // The provided topic wasn't found. + IngestionDataSourceSettings_AwsMsk_TOPIC_NOT_FOUND IngestionDataSourceSettings_AwsMsk_State = 5 +) + +// Enum value maps for IngestionDataSourceSettings_AwsMsk_State. +var ( + IngestionDataSourceSettings_AwsMsk_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "MSK_PERMISSION_DENIED", + 3: "PUBLISH_PERMISSION_DENIED", + 4: "CLUSTER_NOT_FOUND", + 5: "TOPIC_NOT_FOUND", + } + IngestionDataSourceSettings_AwsMsk_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "MSK_PERMISSION_DENIED": 2, + "PUBLISH_PERMISSION_DENIED": 3, + "CLUSTER_NOT_FOUND": 4, + "TOPIC_NOT_FOUND": 5, + } +) + +func (x IngestionDataSourceSettings_AwsMsk_State) Enum() *IngestionDataSourceSettings_AwsMsk_State { + p := new(IngestionDataSourceSettings_AwsMsk_State) + *p = x + return p +} + +func (x IngestionDataSourceSettings_AwsMsk_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (IngestionDataSourceSettings_AwsMsk_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[3].Descriptor() +} + +func (IngestionDataSourceSettings_AwsMsk_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[3] +} + +func (x IngestionDataSourceSettings_AwsMsk_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use IngestionDataSourceSettings_AwsMsk_State.Descriptor instead. +func (IngestionDataSourceSettings_AwsMsk_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 3, 0} +} + +// Possible states for managed ingestion from Confluent Cloud. +type IngestionDataSourceSettings_ConfluentCloud_State int32 + +const ( + // Default value. This value is unused. + IngestionDataSourceSettings_ConfluentCloud_STATE_UNSPECIFIED IngestionDataSourceSettings_ConfluentCloud_State = 0 + // Ingestion is active. + IngestionDataSourceSettings_ConfluentCloud_ACTIVE IngestionDataSourceSettings_ConfluentCloud_State = 1 + // Permission denied encountered while consuming data from Confluent + // Cloud. + IngestionDataSourceSettings_ConfluentCloud_CONFLUENT_CLOUD_PERMISSION_DENIED IngestionDataSourceSettings_ConfluentCloud_State = 2 + // Permission denied encountered while publishing to the topic. + IngestionDataSourceSettings_ConfluentCloud_PUBLISH_PERMISSION_DENIED IngestionDataSourceSettings_ConfluentCloud_State = 3 + // The provided bootstrap server address is unreachable. + IngestionDataSourceSettings_ConfluentCloud_UNREACHABLE_BOOTSTRAP_SERVER IngestionDataSourceSettings_ConfluentCloud_State = 4 + // The provided cluster wasn't found. + IngestionDataSourceSettings_ConfluentCloud_CLUSTER_NOT_FOUND IngestionDataSourceSettings_ConfluentCloud_State = 5 + // The provided topic wasn't found. + IngestionDataSourceSettings_ConfluentCloud_TOPIC_NOT_FOUND IngestionDataSourceSettings_ConfluentCloud_State = 6 +) + +// Enum value maps for IngestionDataSourceSettings_ConfluentCloud_State. +var ( + IngestionDataSourceSettings_ConfluentCloud_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "CONFLUENT_CLOUD_PERMISSION_DENIED", + 3: "PUBLISH_PERMISSION_DENIED", + 4: "UNREACHABLE_BOOTSTRAP_SERVER", + 5: "CLUSTER_NOT_FOUND", + 6: "TOPIC_NOT_FOUND", + } + IngestionDataSourceSettings_ConfluentCloud_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "CONFLUENT_CLOUD_PERMISSION_DENIED": 2, + "PUBLISH_PERMISSION_DENIED": 3, + "UNREACHABLE_BOOTSTRAP_SERVER": 4, + "CLUSTER_NOT_FOUND": 5, + "TOPIC_NOT_FOUND": 6, + } +) + +func (x IngestionDataSourceSettings_ConfluentCloud_State) Enum() *IngestionDataSourceSettings_ConfluentCloud_State { + p := new(IngestionDataSourceSettings_ConfluentCloud_State) + *p = x + return p +} + +func (x IngestionDataSourceSettings_ConfluentCloud_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (IngestionDataSourceSettings_ConfluentCloud_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[4].Descriptor() +} + +func (IngestionDataSourceSettings_ConfluentCloud_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[4] +} + +func (x IngestionDataSourceSettings_ConfluentCloud_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use IngestionDataSourceSettings_ConfluentCloud_State.Descriptor instead. +func (IngestionDataSourceSettings_ConfluentCloud_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 4, 0} +} + +// Severity levels of Platform Logs. +type PlatformLogsSettings_Severity int32 + +const ( + // Default value. Logs level is unspecified. Logs will be disabled. + PlatformLogsSettings_SEVERITY_UNSPECIFIED PlatformLogsSettings_Severity = 0 + // Logs will be disabled. + PlatformLogsSettings_DISABLED PlatformLogsSettings_Severity = 1 + // Debug logs and higher-severity logs will be written. + PlatformLogsSettings_DEBUG PlatformLogsSettings_Severity = 2 + // Info logs and higher-severity logs will be written. + PlatformLogsSettings_INFO PlatformLogsSettings_Severity = 3 + // Warning logs and higher-severity logs will be written. + PlatformLogsSettings_WARNING PlatformLogsSettings_Severity = 4 + // Only error logs will be written. + PlatformLogsSettings_ERROR PlatformLogsSettings_Severity = 5 +) + +// Enum value maps for PlatformLogsSettings_Severity. +var ( + PlatformLogsSettings_Severity_name = map[int32]string{ + 0: "SEVERITY_UNSPECIFIED", + 1: "DISABLED", + 2: "DEBUG", + 3: "INFO", + 4: "WARNING", + 5: "ERROR", + } + PlatformLogsSettings_Severity_value = map[string]int32{ + "SEVERITY_UNSPECIFIED": 0, + "DISABLED": 1, + "DEBUG": 2, + "INFO": 3, + "WARNING": 4, + "ERROR": 5, + } +) + +func (x PlatformLogsSettings_Severity) Enum() *PlatformLogsSettings_Severity { + p := new(PlatformLogsSettings_Severity) + *p = x + return p +} + +func (x PlatformLogsSettings_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PlatformLogsSettings_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[5].Descriptor() +} + +func (PlatformLogsSettings_Severity) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[5] +} + +func (x PlatformLogsSettings_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PlatformLogsSettings_Severity.Descriptor instead. +func (PlatformLogsSettings_Severity) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3, 0} +} + +// The state of the topic. +type Topic_State int32 + +const ( + // Default value. This value is unused. + Topic_STATE_UNSPECIFIED Topic_State = 0 + // The topic does not have any persistent errors. + Topic_ACTIVE Topic_State = 1 + // Ingestion from the data source has encountered a permanent error. + // See the more detailed error state in the corresponding ingestion + // source configuration. + Topic_INGESTION_RESOURCE_ERROR Topic_State = 2 +) + +// Enum value maps for Topic_State. +var ( + Topic_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "INGESTION_RESOURCE_ERROR", + } + Topic_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "INGESTION_RESOURCE_ERROR": 2, + } +) + +func (x Topic_State) Enum() *Topic_State { + p := new(Topic_State) + *p = x + return p +} + +func (x Topic_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Topic_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[6].Descriptor() +} + +func (Topic_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[6] +} + +func (x Topic_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Topic_State.Descriptor instead. +func (Topic_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{7, 0} +} + +// Possible states for a subscription. +type Subscription_State int32 + +const ( + // Default value. This value is unused. + Subscription_STATE_UNSPECIFIED Subscription_State = 0 + // The subscription can actively receive messages + Subscription_ACTIVE Subscription_State = 1 + // The subscription cannot receive messages because of an error with the + // resource to which it pushes messages. See the more detailed error state + // in the corresponding configuration. + Subscription_RESOURCE_ERROR Subscription_State = 2 +) + +// Enum value maps for Subscription_State. +var ( + Subscription_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "RESOURCE_ERROR", + } + Subscription_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "RESOURCE_ERROR": 2, + } +) + +func (x Subscription_State) Enum() *Subscription_State { + p := new(Subscription_State) + *p = x + return p +} + +func (x Subscription_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Subscription_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[7].Descriptor() +} + +func (Subscription_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[7] +} + +func (x Subscription_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Subscription_State.Descriptor instead. +func (Subscription_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 0} +} + +// Possible states for a BigQuery subscription. +type BigQueryConfig_State int32 + +const ( + // Default value. This value is unused. + BigQueryConfig_STATE_UNSPECIFIED BigQueryConfig_State = 0 + // The subscription can actively send messages to BigQuery + BigQueryConfig_ACTIVE BigQueryConfig_State = 1 + // Cannot write to the BigQuery table because of permission denied errors. + // This can happen if + // - Pub/Sub SA has not been granted the [appropriate BigQuery IAM + // permissions](https://cloud.google.com/pubsub/docs/create-subscription#assign_bigquery_service_account) + // - bigquery.googleapis.com API is not enabled for the project + // ([instructions](https://cloud.google.com/service-usage/docs/enable-disable)) + BigQueryConfig_PERMISSION_DENIED BigQueryConfig_State = 2 + // Cannot write to the BigQuery table because it does not exist. + BigQueryConfig_NOT_FOUND BigQueryConfig_State = 3 + // Cannot write to the BigQuery table due to a schema mismatch. + BigQueryConfig_SCHEMA_MISMATCH BigQueryConfig_State = 4 + // Cannot write to the destination because enforce_in_transit is set to true + // and the destination locations are not in the allowed regions. + BigQueryConfig_IN_TRANSIT_LOCATION_RESTRICTION BigQueryConfig_State = 5 +) + +// Enum value maps for BigQueryConfig_State. +var ( + BigQueryConfig_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "PERMISSION_DENIED", + 3: "NOT_FOUND", + 4: "SCHEMA_MISMATCH", + 5: "IN_TRANSIT_LOCATION_RESTRICTION", + } + BigQueryConfig_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "PERMISSION_DENIED": 2, + "NOT_FOUND": 3, + "SCHEMA_MISMATCH": 4, + "IN_TRANSIT_LOCATION_RESTRICTION": 5, + } +) + +func (x BigQueryConfig_State) Enum() *BigQueryConfig_State { + p := new(BigQueryConfig_State) + *p = x + return p +} + +func (x BigQueryConfig_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BigQueryConfig_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[8].Descriptor() +} + +func (BigQueryConfig_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[8] +} + +func (x BigQueryConfig_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BigQueryConfig_State.Descriptor instead. +func (BigQueryConfig_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27, 0} +} + +// Possible states for a Cloud Storage subscription. +type CloudStorageConfig_State int32 + +const ( + // Default value. This value is unused. + CloudStorageConfig_STATE_UNSPECIFIED CloudStorageConfig_State = 0 + // The subscription can actively send messages to Cloud Storage. + CloudStorageConfig_ACTIVE CloudStorageConfig_State = 1 + // Cannot write to the Cloud Storage bucket because of permission denied + // errors. + CloudStorageConfig_PERMISSION_DENIED CloudStorageConfig_State = 2 + // Cannot write to the Cloud Storage bucket because it does not exist. + CloudStorageConfig_NOT_FOUND CloudStorageConfig_State = 3 + // Cannot write to the destination because enforce_in_transit is set to true + // and the destination locations are not in the allowed regions. + CloudStorageConfig_IN_TRANSIT_LOCATION_RESTRICTION CloudStorageConfig_State = 4 + // Cannot write to the Cloud Storage bucket due to an incompatibility + // between the topic schema and subscription settings. + CloudStorageConfig_SCHEMA_MISMATCH CloudStorageConfig_State = 5 +) + +// Enum value maps for CloudStorageConfig_State. +var ( + CloudStorageConfig_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "PERMISSION_DENIED", + 3: "NOT_FOUND", + 4: "IN_TRANSIT_LOCATION_RESTRICTION", + 5: "SCHEMA_MISMATCH", + } + CloudStorageConfig_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "PERMISSION_DENIED": 2, + "NOT_FOUND": 3, + "IN_TRANSIT_LOCATION_RESTRICTION": 4, + "SCHEMA_MISMATCH": 5, + } +) + +func (x CloudStorageConfig_State) Enum() *CloudStorageConfig_State { + p := new(CloudStorageConfig_State) + *p = x + return p +} + +func (x CloudStorageConfig_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CloudStorageConfig_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[9].Descriptor() +} + +func (CloudStorageConfig_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[9] +} + +func (x CloudStorageConfig_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CloudStorageConfig_State.Descriptor instead. +func (CloudStorageConfig_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28, 0} +} + +// A policy constraining the storage of messages published to the topic. +type MessageStoragePolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. A list of IDs of Google Cloud regions where messages that are + // published to the topic may be persisted in storage. Messages published by + // publishers running in non-allowed Google Cloud regions (or running outside + // of Google Cloud altogether) are routed for storage in one of the allowed + // regions. An empty list means that no regions are allowed, and is not a + // valid configuration. + AllowedPersistenceRegions []string `protobuf:"bytes,1,rep,name=allowed_persistence_regions,json=allowedPersistenceRegions,proto3" json:"allowed_persistence_regions,omitempty"` + // Optional. If true, `allowed_persistence_regions` is also used to enforce + // in-transit guarantees for messages. That is, Pub/Sub will fail + // Publish operations on this topic and subscribe operations + // on any subscription attached to this topic in any region that is + // not in `allowed_persistence_regions`. + EnforceInTransit bool `protobuf:"varint,2,opt,name=enforce_in_transit,json=enforceInTransit,proto3" json:"enforce_in_transit,omitempty"` +} + +func (x *MessageStoragePolicy) Reset() { + *x = MessageStoragePolicy{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageStoragePolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageStoragePolicy) ProtoMessage() {} + +func (x *MessageStoragePolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageStoragePolicy.ProtoReflect.Descriptor instead. +func (*MessageStoragePolicy) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{0} +} + +func (x *MessageStoragePolicy) GetAllowedPersistenceRegions() []string { + if x != nil { + return x.AllowedPersistenceRegions + } + return nil +} + +func (x *MessageStoragePolicy) GetEnforceInTransit() bool { + if x != nil { + return x.EnforceInTransit + } + return false +} + +// Settings for validating messages published against a schema. +type SchemaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the schema that messages published should be + // validated against. Format is `projects/{project}/schemas/{schema}`. The + // value of this field will be `_deleted-schema_` if the schema has been + // deleted. + Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` + // Optional. The encoding of messages validated against `schema`. + Encoding Encoding `protobuf:"varint,2,opt,name=encoding,proto3,enum=google.pubsub.v1.Encoding" json:"encoding,omitempty"` + // Optional. The minimum (inclusive) revision allowed for validating messages. + // If empty or not present, allow any revision to be validated against + // last_revision or any revision created before. + FirstRevisionId string `protobuf:"bytes,3,opt,name=first_revision_id,json=firstRevisionId,proto3" json:"first_revision_id,omitempty"` + // Optional. The maximum (inclusive) revision allowed for validating messages. + // If empty or not present, allow any revision to be validated against + // first_revision or any revision created after. + LastRevisionId string `protobuf:"bytes,4,opt,name=last_revision_id,json=lastRevisionId,proto3" json:"last_revision_id,omitempty"` +} + +func (x *SchemaSettings) Reset() { + *x = SchemaSettings{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SchemaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemaSettings) ProtoMessage() {} + +func (x *SchemaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemaSettings.ProtoReflect.Descriptor instead. +func (*SchemaSettings) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{1} +} + +func (x *SchemaSettings) GetSchema() string { + if x != nil { + return x.Schema + } + return "" +} + +func (x *SchemaSettings) GetEncoding() Encoding { + if x != nil { + return x.Encoding + } + return Encoding_ENCODING_UNSPECIFIED +} + +func (x *SchemaSettings) GetFirstRevisionId() string { + if x != nil { + return x.FirstRevisionId + } + return "" +} + +func (x *SchemaSettings) GetLastRevisionId() string { + if x != nil { + return x.LastRevisionId + } + return "" +} + +// Settings for an ingestion data source on a topic. +type IngestionDataSourceSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only one source type can have settings set. + // + // Types that are assignable to Source: + // + // *IngestionDataSourceSettings_AwsKinesis_ + // *IngestionDataSourceSettings_CloudStorage_ + // *IngestionDataSourceSettings_AzureEventHubs_ + // *IngestionDataSourceSettings_AwsMsk_ + // *IngestionDataSourceSettings_ConfluentCloud_ + Source isIngestionDataSourceSettings_Source `protobuf_oneof:"source"` + // Optional. Platform Logs settings. If unset, no Platform Logs will be + // generated. + PlatformLogsSettings *PlatformLogsSettings `protobuf:"bytes,4,opt,name=platform_logs_settings,json=platformLogsSettings,proto3" json:"platform_logs_settings,omitempty"` +} + +func (x *IngestionDataSourceSettings) Reset() { + *x = IngestionDataSourceSettings{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionDataSourceSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings) ProtoMessage() {} + +func (x *IngestionDataSourceSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2} +} + +func (m *IngestionDataSourceSettings) GetSource() isIngestionDataSourceSettings_Source { + if m != nil { + return m.Source + } + return nil +} + +func (x *IngestionDataSourceSettings) GetAwsKinesis() *IngestionDataSourceSettings_AwsKinesis { + if x, ok := x.GetSource().(*IngestionDataSourceSettings_AwsKinesis_); ok { + return x.AwsKinesis + } + return nil +} + +func (x *IngestionDataSourceSettings) GetCloudStorage() *IngestionDataSourceSettings_CloudStorage { + if x, ok := x.GetSource().(*IngestionDataSourceSettings_CloudStorage_); ok { + return x.CloudStorage + } + return nil +} + +func (x *IngestionDataSourceSettings) GetAzureEventHubs() *IngestionDataSourceSettings_AzureEventHubs { + if x, ok := x.GetSource().(*IngestionDataSourceSettings_AzureEventHubs_); ok { + return x.AzureEventHubs + } + return nil +} + +func (x *IngestionDataSourceSettings) GetAwsMsk() *IngestionDataSourceSettings_AwsMsk { + if x, ok := x.GetSource().(*IngestionDataSourceSettings_AwsMsk_); ok { + return x.AwsMsk + } + return nil +} + +func (x *IngestionDataSourceSettings) GetConfluentCloud() *IngestionDataSourceSettings_ConfluentCloud { + if x, ok := x.GetSource().(*IngestionDataSourceSettings_ConfluentCloud_); ok { + return x.ConfluentCloud + } + return nil +} + +func (x *IngestionDataSourceSettings) GetPlatformLogsSettings() *PlatformLogsSettings { + if x != nil { + return x.PlatformLogsSettings + } + return nil +} + +type isIngestionDataSourceSettings_Source interface { + isIngestionDataSourceSettings_Source() +} + +type IngestionDataSourceSettings_AwsKinesis_ struct { + // Optional. Amazon Kinesis Data Streams. + AwsKinesis *IngestionDataSourceSettings_AwsKinesis `protobuf:"bytes,1,opt,name=aws_kinesis,json=awsKinesis,proto3,oneof"` +} + +type IngestionDataSourceSettings_CloudStorage_ struct { + // Optional. Cloud Storage. + CloudStorage *IngestionDataSourceSettings_CloudStorage `protobuf:"bytes,2,opt,name=cloud_storage,json=cloudStorage,proto3,oneof"` +} + +type IngestionDataSourceSettings_AzureEventHubs_ struct { + // Optional. Azure Event Hubs. + AzureEventHubs *IngestionDataSourceSettings_AzureEventHubs `protobuf:"bytes,3,opt,name=azure_event_hubs,json=azureEventHubs,proto3,oneof"` +} + +type IngestionDataSourceSettings_AwsMsk_ struct { + // Optional. Amazon MSK. + AwsMsk *IngestionDataSourceSettings_AwsMsk `protobuf:"bytes,5,opt,name=aws_msk,json=awsMsk,proto3,oneof"` +} + +type IngestionDataSourceSettings_ConfluentCloud_ struct { + // Optional. Confluent Cloud. + ConfluentCloud *IngestionDataSourceSettings_ConfluentCloud `protobuf:"bytes,6,opt,name=confluent_cloud,json=confluentCloud,proto3,oneof"` +} + +func (*IngestionDataSourceSettings_AwsKinesis_) isIngestionDataSourceSettings_Source() {} + +func (*IngestionDataSourceSettings_CloudStorage_) isIngestionDataSourceSettings_Source() {} + +func (*IngestionDataSourceSettings_AzureEventHubs_) isIngestionDataSourceSettings_Source() {} + +func (*IngestionDataSourceSettings_AwsMsk_) isIngestionDataSourceSettings_Source() {} + +func (*IngestionDataSourceSettings_ConfluentCloud_) isIngestionDataSourceSettings_Source() {} + +// Settings for Platform Logs produced by Pub/Sub. +type PlatformLogsSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The minimum severity level of Platform Logs that will be written. + Severity PlatformLogsSettings_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=google.pubsub.v1.PlatformLogsSettings_Severity" json:"severity,omitempty"` +} + +func (x *PlatformLogsSettings) Reset() { + *x = PlatformLogsSettings{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PlatformLogsSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlatformLogsSettings) ProtoMessage() {} + +func (x *PlatformLogsSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlatformLogsSettings.ProtoReflect.Descriptor instead. +func (*PlatformLogsSettings) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3} +} + +func (x *PlatformLogsSettings) GetSeverity() PlatformLogsSettings_Severity { + if x != nil { + return x.Severity + } + return PlatformLogsSettings_SEVERITY_UNSPECIFIED +} + +// Payload of the Platform Log entry sent when a failure is encountered while +// ingesting. +type IngestionFailureEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the import topic. Format is: + // projects/{project_name}/topics/{topic_name}. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Required. Error details explaining why ingestion to Pub/Sub has failed. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + // Types that are assignable to Failure: + // + // *IngestionFailureEvent_CloudStorageFailure_ + // *IngestionFailureEvent_AwsMskFailure + // *IngestionFailureEvent_AzureEventHubsFailure + // *IngestionFailureEvent_ConfluentCloudFailure + // *IngestionFailureEvent_AwsKinesisFailure + Failure isIngestionFailureEvent_Failure `protobuf_oneof:"failure"` +} + +func (x *IngestionFailureEvent) Reset() { + *x = IngestionFailureEvent{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent) ProtoMessage() {} + +func (x *IngestionFailureEvent) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4} +} + +func (x *IngestionFailureEvent) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *IngestionFailureEvent) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (m *IngestionFailureEvent) GetFailure() isIngestionFailureEvent_Failure { + if m != nil { + return m.Failure + } + return nil +} + +func (x *IngestionFailureEvent) GetCloudStorageFailure() *IngestionFailureEvent_CloudStorageFailure { + if x, ok := x.GetFailure().(*IngestionFailureEvent_CloudStorageFailure_); ok { + return x.CloudStorageFailure + } + return nil +} + +func (x *IngestionFailureEvent) GetAwsMskFailure() *IngestionFailureEvent_AwsMskFailureReason { + if x, ok := x.GetFailure().(*IngestionFailureEvent_AwsMskFailure); ok { + return x.AwsMskFailure + } + return nil +} + +func (x *IngestionFailureEvent) GetAzureEventHubsFailure() *IngestionFailureEvent_AzureEventHubsFailureReason { + if x, ok := x.GetFailure().(*IngestionFailureEvent_AzureEventHubsFailure); ok { + return x.AzureEventHubsFailure + } + return nil +} + +func (x *IngestionFailureEvent) GetConfluentCloudFailure() *IngestionFailureEvent_ConfluentCloudFailureReason { + if x, ok := x.GetFailure().(*IngestionFailureEvent_ConfluentCloudFailure); ok { + return x.ConfluentCloudFailure + } + return nil +} + +func (x *IngestionFailureEvent) GetAwsKinesisFailure() *IngestionFailureEvent_AwsKinesisFailureReason { + if x, ok := x.GetFailure().(*IngestionFailureEvent_AwsKinesisFailure); ok { + return x.AwsKinesisFailure + } + return nil +} + +type isIngestionFailureEvent_Failure interface { + isIngestionFailureEvent_Failure() +} + +type IngestionFailureEvent_CloudStorageFailure_ struct { + // Optional. Failure when ingesting from Cloud Storage. + CloudStorageFailure *IngestionFailureEvent_CloudStorageFailure `protobuf:"bytes,3,opt,name=cloud_storage_failure,json=cloudStorageFailure,proto3,oneof"` +} + +type IngestionFailureEvent_AwsMskFailure struct { + // Optional. Failure when ingesting from Amazon MSK. + AwsMskFailure *IngestionFailureEvent_AwsMskFailureReason `protobuf:"bytes,4,opt,name=aws_msk_failure,json=awsMskFailure,proto3,oneof"` +} + +type IngestionFailureEvent_AzureEventHubsFailure struct { + // Optional. Failure when ingesting from Azure Event Hubs. + AzureEventHubsFailure *IngestionFailureEvent_AzureEventHubsFailureReason `protobuf:"bytes,5,opt,name=azure_event_hubs_failure,json=azureEventHubsFailure,proto3,oneof"` +} + +type IngestionFailureEvent_ConfluentCloudFailure struct { + // Optional. Failure when ingesting from Confluent Cloud. + ConfluentCloudFailure *IngestionFailureEvent_ConfluentCloudFailureReason `protobuf:"bytes,6,opt,name=confluent_cloud_failure,json=confluentCloudFailure,proto3,oneof"` +} + +type IngestionFailureEvent_AwsKinesisFailure struct { + // Optional. Failure when ingesting from AWS Kinesis. + AwsKinesisFailure *IngestionFailureEvent_AwsKinesisFailureReason `protobuf:"bytes,7,opt,name=aws_kinesis_failure,json=awsKinesisFailure,proto3,oneof"` +} + +func (*IngestionFailureEvent_CloudStorageFailure_) isIngestionFailureEvent_Failure() {} + +func (*IngestionFailureEvent_AwsMskFailure) isIngestionFailureEvent_Failure() {} + +func (*IngestionFailureEvent_AzureEventHubsFailure) isIngestionFailureEvent_Failure() {} + +func (*IngestionFailureEvent_ConfluentCloudFailure) isIngestionFailureEvent_Failure() {} + +func (*IngestionFailureEvent_AwsKinesisFailure) isIngestionFailureEvent_Failure() {} + +// User-defined JavaScript function that can transform or filter a Pub/Sub +// message. +type JavaScriptUDF struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the JavasScript function that should applied to Pub/Sub + // messages. + FunctionName string `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + // Required. JavaScript code that contains a function `function_name` with the + // below signature: + // + // ``` + // + // /** + // * Transforms a Pub/Sub message. + // + // * @return {(Object)>|null)} - To + // * filter a message, return `null`. To transform a message return a map + // * with the following keys: + // * - (required) 'data' : {string} + // * - (optional) 'attributes' : {Object} + // * Returning empty `attributes` will remove all attributes from the + // * message. + // * + // * @param {(Object)>} Pub/Sub + // * message. Keys: + // * - (required) 'data' : {string} + // * - (required) 'attributes' : {Object} + // * + // * @param {Object} metadata - Pub/Sub message metadata. + // * Keys: + // * - (optional) 'message_id' : {string} + // * - (optional) 'publish_time': {string} YYYY-MM-DDTHH:MM:SSZ format + // * - (optional) 'ordering_key': {string} + // */ + // + // function (message, metadata) { + // } + // + // ``` + Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"` +} + +func (x *JavaScriptUDF) Reset() { + *x = JavaScriptUDF{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JavaScriptUDF) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaScriptUDF) ProtoMessage() {} + +func (x *JavaScriptUDF) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaScriptUDF.ProtoReflect.Descriptor instead. +func (*JavaScriptUDF) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{5} +} + +func (x *JavaScriptUDF) GetFunctionName() string { + if x != nil { + return x.FunctionName + } + return "" +} + +func (x *JavaScriptUDF) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +// All supported message transforms types. +type MessageTransform struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of transform to apply to messages. + // + // Types that are assignable to Transform: + // + // *MessageTransform_JavascriptUdf + Transform isMessageTransform_Transform `protobuf_oneof:"transform"` + // Optional. This field is deprecated, use the `disabled` field to disable + // transforms. + // + // Deprecated: Marked as deprecated in google/pubsub/v1/pubsub.proto. + Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Optional. If true, the transform is disabled and will not be applied to + // messages. Defaults to `false`. + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (x *MessageTransform) Reset() { + *x = MessageTransform{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageTransform) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageTransform) ProtoMessage() {} + +func (x *MessageTransform) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageTransform.ProtoReflect.Descriptor instead. +func (*MessageTransform) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{6} +} + +func (m *MessageTransform) GetTransform() isMessageTransform_Transform { + if m != nil { + return m.Transform + } + return nil +} + +func (x *MessageTransform) GetJavascriptUdf() *JavaScriptUDF { + if x, ok := x.GetTransform().(*MessageTransform_JavascriptUdf); ok { + return x.JavascriptUdf + } + return nil +} + +// Deprecated: Marked as deprecated in google/pubsub/v1/pubsub.proto. +func (x *MessageTransform) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *MessageTransform) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +type isMessageTransform_Transform interface { + isMessageTransform_Transform() +} + +type MessageTransform_JavascriptUdf struct { + // Optional. JavaScript User Defined Function. If multiple JavaScriptUDF's + // are specified on a resource, each must have a unique `function_name`. + JavascriptUdf *JavaScriptUDF `protobuf:"bytes,2,opt,name=javascript_udf,json=javascriptUdf,proto3,oneof"` +} + +func (*MessageTransform_JavascriptUdf) isMessageTransform_Transform() {} + +// A topic resource. +type Topic struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the topic. It must have the format + // `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter, + // and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), + // underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent + // signs (`%`). It must be between 3 and 255 characters in length, and it + // must not start with `"goog"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional. See [Creating and managing labels] + // (https://cloud.google.com/pubsub/docs/labels). + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional. Policy constraining the set of Google Cloud Platform regions + // where messages published to the topic may be stored. If not present, then + // no constraints are in effect. + MessageStoragePolicy *MessageStoragePolicy `protobuf:"bytes,3,opt,name=message_storage_policy,json=messageStoragePolicy,proto3" json:"message_storage_policy,omitempty"` + // Optional. The resource name of the Cloud KMS CryptoKey to be used to + // protect access to messages published on this topic. + // + // The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + KmsKeyName string `protobuf:"bytes,5,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"` + // Optional. Settings for validating messages published against a schema. + SchemaSettings *SchemaSettings `protobuf:"bytes,6,opt,name=schema_settings,json=schemaSettings,proto3" json:"schema_settings,omitempty"` + // Optional. Reserved for future use. This field is set only in responses from + // the server; it is ignored if it is set in any requests. + SatisfiesPzs bool `protobuf:"varint,7,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` + // Optional. Indicates the minimum duration to retain a message after it is + // published to the topic. If this field is set, messages published to the + // topic in the last `message_retention_duration` are always available to + // subscribers. For instance, it allows any attached subscription to [seek to + // a + // timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) + // that is up to `message_retention_duration` in the past. If this field is + // not set, message retention is controlled by settings on individual + // subscriptions. Cannot be more than 31 days or less than 10 minutes. + MessageRetentionDuration *durationpb.Duration `protobuf:"bytes,8,opt,name=message_retention_duration,json=messageRetentionDuration,proto3" json:"message_retention_duration,omitempty"` + // Output only. An output-only field indicating the state of the topic. + State Topic_State `protobuf:"varint,9,opt,name=state,proto3,enum=google.pubsub.v1.Topic_State" json:"state,omitempty"` + // Optional. Settings for ingestion from a data source into this topic. + IngestionDataSourceSettings *IngestionDataSourceSettings `protobuf:"bytes,10,opt,name=ingestion_data_source_settings,json=ingestionDataSourceSettings,proto3" json:"ingestion_data_source_settings,omitempty"` + // Optional. Transforms to be applied to messages published to the topic. + // Transforms are applied in the order specified. + MessageTransforms []*MessageTransform `protobuf:"bytes,13,rep,name=message_transforms,json=messageTransforms,proto3" json:"message_transforms,omitempty"` +} + +func (x *Topic) Reset() { + *x = Topic{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Topic) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Topic) ProtoMessage() {} + +func (x *Topic) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Topic.ProtoReflect.Descriptor instead. +func (*Topic) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{7} +} + +func (x *Topic) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Topic) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Topic) GetMessageStoragePolicy() *MessageStoragePolicy { + if x != nil { + return x.MessageStoragePolicy + } + return nil +} + +func (x *Topic) GetKmsKeyName() string { + if x != nil { + return x.KmsKeyName + } + return "" +} + +func (x *Topic) GetSchemaSettings() *SchemaSettings { + if x != nil { + return x.SchemaSettings + } + return nil +} + +func (x *Topic) GetSatisfiesPzs() bool { + if x != nil { + return x.SatisfiesPzs + } + return false +} + +func (x *Topic) GetMessageRetentionDuration() *durationpb.Duration { + if x != nil { + return x.MessageRetentionDuration + } + return nil +} + +func (x *Topic) GetState() Topic_State { + if x != nil { + return x.State + } + return Topic_STATE_UNSPECIFIED +} + +func (x *Topic) GetIngestionDataSourceSettings() *IngestionDataSourceSettings { + if x != nil { + return x.IngestionDataSourceSettings + } + return nil +} + +func (x *Topic) GetMessageTransforms() []*MessageTransform { + if x != nil { + return x.MessageTransforms + } + return nil +} + +// A message that is published by publishers and consumed by subscribers. The +// message must contain either a non-empty data field or at least one attribute. +// Note that client libraries represent this object differently +// depending on the language. See the corresponding [client library +// documentation](https://cloud.google.com/pubsub/docs/reference/libraries) for +// more information. See [quotas and limits] +// (https://cloud.google.com/pubsub/quotas) for more information about message +// limits. +type PubsubMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The message data field. If this field is empty, the message must + // contain at least one attribute. + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // Optional. Attributes for this message. If this field is empty, the message + // must contain non-empty data. This can be used to filter messages on the + // subscription. + Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of this message, assigned by the server when the message is published. + // Guaranteed to be unique within the topic. This value may be read by a + // subscriber that receives a `PubsubMessage` via a `Pull` call or a push + // delivery. It must not be populated by the publisher in a `Publish` call. + MessageId string `protobuf:"bytes,3,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` + // The time at which the message was published, populated by the server when + // it receives the `Publish` call. It must not be populated by the + // publisher in a `Publish` call. + PublishTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=publish_time,json=publishTime,proto3" json:"publish_time,omitempty"` + // Optional. If non-empty, identifies related messages for which publish order + // should be respected. If a `Subscription` has `enable_message_ordering` set + // to `true`, messages published with the same non-empty `ordering_key` value + // will be delivered to subscribers in the order in which they are received by + // the Pub/Sub system. All `PubsubMessage`s published in a given + // `PublishRequest` must specify the same `ordering_key` value. For more + // information, see [ordering + // messages](https://cloud.google.com/pubsub/docs/ordering). + OrderingKey string `protobuf:"bytes,5,opt,name=ordering_key,json=orderingKey,proto3" json:"ordering_key,omitempty"` +} + +func (x *PubsubMessage) Reset() { + *x = PubsubMessage{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PubsubMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PubsubMessage) ProtoMessage() {} + +func (x *PubsubMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PubsubMessage.ProtoReflect.Descriptor instead. +func (*PubsubMessage) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{8} +} + +func (x *PubsubMessage) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *PubsubMessage) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *PubsubMessage) GetMessageId() string { + if x != nil { + return x.MessageId + } + return "" +} + +func (x *PubsubMessage) GetPublishTime() *timestamppb.Timestamp { + if x != nil { + return x.PublishTime + } + return nil +} + +func (x *PubsubMessage) GetOrderingKey() string { + if x != nil { + return x.OrderingKey + } + return "" +} + +// Request for the GetTopic method. +type GetTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the topic to get. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *GetTopicRequest) Reset() { + *x = GetTopicRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicRequest) ProtoMessage() {} + +func (x *GetTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicRequest.ProtoReflect.Descriptor instead. +func (*GetTopicRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{9} +} + +func (x *GetTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +// Request for the UpdateTopic method. +type UpdateTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The updated topic object. + Topic *Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Required. Indicates which fields in the provided topic to update. Must be + // specified and non-empty. Note that if `update_mask` contains + // "message_storage_policy" but the `message_storage_policy` is not set in + // the `topic` provided above, then the updated value is determined by the + // policy configured at the project or organization level. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateTopicRequest) Reset() { + *x = UpdateTopicRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTopicRequest) ProtoMessage() {} + +func (x *UpdateTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTopicRequest.ProtoReflect.Descriptor instead. +func (*UpdateTopicRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{10} +} + +func (x *UpdateTopicRequest) GetTopic() *Topic { + if x != nil { + return x.Topic + } + return nil +} + +func (x *UpdateTopicRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request for the Publish method. +type PublishRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The messages in the request will be published on this topic. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Required. The messages to publish. + Messages []*PubsubMessage `protobuf:"bytes,2,rep,name=messages,proto3" json:"messages,omitempty"` +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{11} +} + +func (x *PublishRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *PublishRequest) GetMessages() []*PubsubMessage { + if x != nil { + return x.Messages + } + return nil +} + +// Response for the `Publish` method. +type PublishResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The server-assigned ID of each published message, in the same + // order as the messages in the request. IDs are guaranteed to be unique + // within the topic. + MessageIds []string `protobuf:"bytes,1,rep,name=message_ids,json=messageIds,proto3" json:"message_ids,omitempty"` +} + +func (x *PublishResponse) Reset() { + *x = PublishResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublishResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse) ProtoMessage() {} + +func (x *PublishResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead. +func (*PublishResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{12} +} + +func (x *PublishResponse) GetMessageIds() []string { + if x != nil { + return x.MessageIds + } + return nil +} + +// Request for the `ListTopics` method. +type ListTopicsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the project in which to list topics. + // Format is `projects/{project-id}`. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // Optional. Maximum number of topics to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. The value returned by the last `ListTopicsResponse`; indicates + // that this is a continuation of a prior `ListTopics` call, and that the + // system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListTopicsRequest) Reset() { + *x = ListTopicsRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListTopicsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicsRequest) ProtoMessage() {} + +func (x *ListTopicsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicsRequest.ProtoReflect.Descriptor instead. +func (*ListTopicsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{13} +} + +func (x *ListTopicsRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListTopicsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListTopicsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListTopics` method. +type ListTopicsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The resulting topics. + Topics []*Topic `protobuf:"bytes,1,rep,name=topics,proto3" json:"topics,omitempty"` + // Optional. If not empty, indicates that there may be more topics that match + // the request; this value should be passed in a new `ListTopicsRequest`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListTopicsResponse) Reset() { + *x = ListTopicsResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListTopicsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicsResponse) ProtoMessage() {} + +func (x *ListTopicsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicsResponse.ProtoReflect.Descriptor instead. +func (*ListTopicsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{14} +} + +func (x *ListTopicsResponse) GetTopics() []*Topic { + if x != nil { + return x.Topics + } + return nil +} + +func (x *ListTopicsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the `ListTopicSubscriptions` method. +type ListTopicSubscriptionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the topic that subscriptions are attached to. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Optional. Maximum number of subscription names to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. The value returned by the last `ListTopicSubscriptionsResponse`; + // indicates that this is a continuation of a prior `ListTopicSubscriptions` + // call, and that the system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListTopicSubscriptionsRequest) Reset() { + *x = ListTopicSubscriptionsRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListTopicSubscriptionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicSubscriptionsRequest) ProtoMessage() {} + +func (x *ListTopicSubscriptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicSubscriptionsRequest.ProtoReflect.Descriptor instead. +func (*ListTopicSubscriptionsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{15} +} + +func (x *ListTopicSubscriptionsRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *ListTopicSubscriptionsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListTopicSubscriptionsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListTopicSubscriptions` method. +type ListTopicSubscriptionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The names of subscriptions attached to the topic specified in the + // request. + Subscriptions []string `protobuf:"bytes,1,rep,name=subscriptions,proto3" json:"subscriptions,omitempty"` + // Optional. If not empty, indicates that there may be more subscriptions that + // match the request; this value should be passed in a new + // `ListTopicSubscriptionsRequest` to get more subscriptions. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListTopicSubscriptionsResponse) Reset() { + *x = ListTopicSubscriptionsResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListTopicSubscriptionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicSubscriptionsResponse) ProtoMessage() {} + +func (x *ListTopicSubscriptionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicSubscriptionsResponse.ProtoReflect.Descriptor instead. +func (*ListTopicSubscriptionsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{16} +} + +func (x *ListTopicSubscriptionsResponse) GetSubscriptions() []string { + if x != nil { + return x.Subscriptions + } + return nil +} + +func (x *ListTopicSubscriptionsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the `ListTopicSnapshots` method. +type ListTopicSnapshotsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the topic that snapshots are attached to. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Optional. Maximum number of snapshot names to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. The value returned by the last `ListTopicSnapshotsResponse`; + // indicates that this is a continuation of a prior `ListTopicSnapshots` call, + // and that the system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListTopicSnapshotsRequest) Reset() { + *x = ListTopicSnapshotsRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListTopicSnapshotsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicSnapshotsRequest) ProtoMessage() {} + +func (x *ListTopicSnapshotsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicSnapshotsRequest.ProtoReflect.Descriptor instead. +func (*ListTopicSnapshotsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{17} +} + +func (x *ListTopicSnapshotsRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *ListTopicSnapshotsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListTopicSnapshotsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListTopicSnapshots` method. +type ListTopicSnapshotsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The names of the snapshots that match the request. + Snapshots []string `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` + // Optional. If not empty, indicates that there may be more snapshots that + // match the request; this value should be passed in a new + // `ListTopicSnapshotsRequest` to get more snapshots. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListTopicSnapshotsResponse) Reset() { + *x = ListTopicSnapshotsResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListTopicSnapshotsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicSnapshotsResponse) ProtoMessage() {} + +func (x *ListTopicSnapshotsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicSnapshotsResponse.ProtoReflect.Descriptor instead. +func (*ListTopicSnapshotsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18} +} + +func (x *ListTopicSnapshotsResponse) GetSnapshots() []string { + if x != nil { + return x.Snapshots + } + return nil +} + +func (x *ListTopicSnapshotsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the `DeleteTopic` method. +type DeleteTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the topic to delete. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *DeleteTopicRequest) Reset() { + *x = DeleteTopicRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicRequest) ProtoMessage() {} + +func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead. +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{19} +} + +func (x *DeleteTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +// Request for the DetachSubscription method. +type DetachSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription to detach. + // Format is `projects/{project}/subscriptions/{subscription}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` +} + +func (x *DetachSubscriptionRequest) Reset() { + *x = DetachSubscriptionRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DetachSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DetachSubscriptionRequest) ProtoMessage() {} + +func (x *DetachSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DetachSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*DetachSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20} +} + +func (x *DetachSubscriptionRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Response for the DetachSubscription method. +// Reserved for future use. +type DetachSubscriptionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DetachSubscriptionResponse) Reset() { + *x = DetachSubscriptionResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DetachSubscriptionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DetachSubscriptionResponse) ProtoMessage() {} + +func (x *DetachSubscriptionResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DetachSubscriptionResponse.ProtoReflect.Descriptor instead. +func (*DetachSubscriptionResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{21} +} + +// A subscription resource. If none of `push_config`, `bigquery_config`, or +// `cloud_storage_config` is set, then the subscriber will pull and ack messages +// using API methods. At most one of these fields may be set. +type Subscription struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the subscription. It must have the format + // `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must + // start with a letter, and contain only letters (`[A-Za-z]`), numbers + // (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), + // plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters + // in length, and it must not start with `"goog"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The name of the topic from which this subscription is receiving + // messages. Format is `projects/{project}/topics/{topic}`. The value of this + // field will be `_deleted-topic_` if the topic has been deleted. + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + // Optional. If push delivery is used with this subscription, this field is + // used to configure it. + PushConfig *PushConfig `protobuf:"bytes,4,opt,name=push_config,json=pushConfig,proto3" json:"push_config,omitempty"` + // Optional. If delivery to BigQuery is used with this subscription, this + // field is used to configure it. + BigqueryConfig *BigQueryConfig `protobuf:"bytes,18,opt,name=bigquery_config,json=bigqueryConfig,proto3" json:"bigquery_config,omitempty"` + // Optional. If delivery to Google Cloud Storage is used with this + // subscription, this field is used to configure it. + CloudStorageConfig *CloudStorageConfig `protobuf:"bytes,22,opt,name=cloud_storage_config,json=cloudStorageConfig,proto3" json:"cloud_storage_config,omitempty"` + // Optional. The approximate amount of time (on a best-effort basis) Pub/Sub + // waits for the subscriber to acknowledge receipt before resending the + // message. In the interval after the message is delivered and before it is + // acknowledged, it is considered to be _outstanding_. During that time + // period, the message will not be redelivered (on a best-effort basis). + // + // For pull subscriptions, this value is used as the initial value for the ack + // deadline. To override this value for a given message, call + // `ModifyAckDeadline` with the corresponding `ack_id` if using + // non-streaming pull or send the `ack_id` in a + // `StreamingModifyAckDeadlineRequest` if using streaming pull. + // The minimum custom deadline you can specify is 10 seconds. + // The maximum custom deadline you can specify is 600 seconds (10 minutes). + // If this parameter is 0, a default value of 10 seconds is used. + // + // For push delivery, this value is also used to set the request timeout for + // the call to the push endpoint. + // + // If the subscriber never acknowledges the message, the Pub/Sub + // system will eventually redeliver the message. + AckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds,proto3" json:"ack_deadline_seconds,omitempty"` + // Optional. Indicates whether to retain acknowledged messages. If true, then + // messages are not expunged from the subscription's backlog, even if they are + // acknowledged, until they fall out of the `message_retention_duration` + // window. This must be true if you would like to [`Seek` to a timestamp] + // (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) in + // the past to replay previously-acknowledged messages. + RetainAckedMessages bool `protobuf:"varint,7,opt,name=retain_acked_messages,json=retainAckedMessages,proto3" json:"retain_acked_messages,omitempty"` + // Optional. How long to retain unacknowledged messages in the subscription's + // backlog, from the moment a message is published. If `retain_acked_messages` + // is true, then this also configures the retention of acknowledged messages, + // and thus configures how far back in time a `Seek` can be done. Defaults to + // 7 days. Cannot be more than 31 days or less than 10 minutes. + MessageRetentionDuration *durationpb.Duration `protobuf:"bytes,8,opt,name=message_retention_duration,json=messageRetentionDuration,proto3" json:"message_retention_duration,omitempty"` + // Optional. See [Creating and managing + // labels](https://cloud.google.com/pubsub/docs/labels). + Labels map[string]string `protobuf:"bytes,9,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional. If true, messages published with the same `ordering_key` in + // `PubsubMessage` will be delivered to the subscribers in the order in which + // they are received by the Pub/Sub system. Otherwise, they may be delivered + // in any order. + EnableMessageOrdering bool `protobuf:"varint,10,opt,name=enable_message_ordering,json=enableMessageOrdering,proto3" json:"enable_message_ordering,omitempty"` + // Optional. A policy that specifies the conditions for this subscription's + // expiration. A subscription is considered active as long as any connected + // subscriber is successfully consuming messages from the subscription or is + // issuing operations on the subscription. If `expiration_policy` is not set, + // a *default policy* with `ttl` of 31 days will be used. The minimum allowed + // value for `expiration_policy.ttl` is 1 day. If `expiration_policy` is set, + // but `expiration_policy.ttl` is not set, the subscription never expires. + ExpirationPolicy *ExpirationPolicy `protobuf:"bytes,11,opt,name=expiration_policy,json=expirationPolicy,proto3" json:"expiration_policy,omitempty"` + // Optional. An expression written in the Pub/Sub [filter + // language](https://cloud.google.com/pubsub/docs/filtering). If non-empty, + // then only `PubsubMessage`s whose `attributes` field matches the filter are + // delivered on this subscription. If empty, then no messages are filtered + // out. + Filter string `protobuf:"bytes,12,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. A policy that specifies the conditions for dead lettering + // messages in this subscription. If dead_letter_policy is not set, dead + // lettering is disabled. + // + // The Pub/Sub service account associated with this subscriptions's + // parent project (i.e., + // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have + // permission to Acknowledge() messages on this subscription. + DeadLetterPolicy *DeadLetterPolicy `protobuf:"bytes,13,opt,name=dead_letter_policy,json=deadLetterPolicy,proto3" json:"dead_letter_policy,omitempty"` + // Optional. A policy that specifies how Pub/Sub retries message delivery for + // this subscription. + // + // If not set, the default retry policy is applied. This generally implies + // that messages will be retried as soon as possible for healthy subscribers. + // RetryPolicy will be triggered on NACKs or acknowledgment deadline exceeded + // events for a given message. + RetryPolicy *RetryPolicy `protobuf:"bytes,14,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // Optional. Indicates whether the subscription is detached from its topic. + // Detached subscriptions don't receive messages from their topic and don't + // retain any backlog. `Pull` and `StreamingPull` requests will return + // FAILED_PRECONDITION. If the subscription is a push subscription, pushes to + // the endpoint will not be made. + Detached bool `protobuf:"varint,15,opt,name=detached,proto3" json:"detached,omitempty"` + // Optional. If true, Pub/Sub provides the following guarantees for the + // delivery of a message with a given value of `message_id` on this + // subscription: + // + // * The message sent to a subscriber is guaranteed not to be resent + // before the message's acknowledgment deadline expires. + // * An acknowledged message will not be resent to a subscriber. + // + // Note that subscribers may still receive multiple copies of a message + // when `enable_exactly_once_delivery` is true if the message was published + // multiple times by a publisher client. These copies are considered distinct + // by Pub/Sub and have distinct `message_id` values. + EnableExactlyOnceDelivery bool `protobuf:"varint,16,opt,name=enable_exactly_once_delivery,json=enableExactlyOnceDelivery,proto3" json:"enable_exactly_once_delivery,omitempty"` + // Output only. Indicates the minimum duration for which a message is retained + // after it is published to the subscription's topic. If this field is set, + // messages published to the subscription's topic in the last + // `topic_message_retention_duration` are always available to subscribers. See + // the `message_retention_duration` field in `Topic`. This field is set only + // in responses from the server; it is ignored if it is set in any requests. + TopicMessageRetentionDuration *durationpb.Duration `protobuf:"bytes,17,opt,name=topic_message_retention_duration,json=topicMessageRetentionDuration,proto3" json:"topic_message_retention_duration,omitempty"` + // Output only. An output-only field indicating whether or not the + // subscription can receive messages. + State Subscription_State `protobuf:"varint,19,opt,name=state,proto3,enum=google.pubsub.v1.Subscription_State" json:"state,omitempty"` + // Output only. Information about the associated Analytics Hub subscription. + // Only set if the subscritpion is created by Analytics Hub. + AnalyticsHubSubscriptionInfo *Subscription_AnalyticsHubSubscriptionInfo `protobuf:"bytes,23,opt,name=analytics_hub_subscription_info,json=analyticsHubSubscriptionInfo,proto3" json:"analytics_hub_subscription_info,omitempty"` + // Optional. Transforms to be applied to messages before they are delivered to + // subscribers. Transforms are applied in the order specified. + MessageTransforms []*MessageTransform `protobuf:"bytes,25,rep,name=message_transforms,json=messageTransforms,proto3" json:"message_transforms,omitempty"` +} + +func (x *Subscription) Reset() { + *x = Subscription{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Subscription) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Subscription) ProtoMessage() {} + +func (x *Subscription) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Subscription.ProtoReflect.Descriptor instead. +func (*Subscription) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22} +} + +func (x *Subscription) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Subscription) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *Subscription) GetPushConfig() *PushConfig { + if x != nil { + return x.PushConfig + } + return nil +} + +func (x *Subscription) GetBigqueryConfig() *BigQueryConfig { + if x != nil { + return x.BigqueryConfig + } + return nil +} + +func (x *Subscription) GetCloudStorageConfig() *CloudStorageConfig { + if x != nil { + return x.CloudStorageConfig + } + return nil +} + +func (x *Subscription) GetAckDeadlineSeconds() int32 { + if x != nil { + return x.AckDeadlineSeconds + } + return 0 +} + +func (x *Subscription) GetRetainAckedMessages() bool { + if x != nil { + return x.RetainAckedMessages + } + return false +} + +func (x *Subscription) GetMessageRetentionDuration() *durationpb.Duration { + if x != nil { + return x.MessageRetentionDuration + } + return nil +} + +func (x *Subscription) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Subscription) GetEnableMessageOrdering() bool { + if x != nil { + return x.EnableMessageOrdering + } + return false +} + +func (x *Subscription) GetExpirationPolicy() *ExpirationPolicy { + if x != nil { + return x.ExpirationPolicy + } + return nil +} + +func (x *Subscription) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *Subscription) GetDeadLetterPolicy() *DeadLetterPolicy { + if x != nil { + return x.DeadLetterPolicy + } + return nil +} + +func (x *Subscription) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *Subscription) GetDetached() bool { + if x != nil { + return x.Detached + } + return false +} + +func (x *Subscription) GetEnableExactlyOnceDelivery() bool { + if x != nil { + return x.EnableExactlyOnceDelivery + } + return false +} + +func (x *Subscription) GetTopicMessageRetentionDuration() *durationpb.Duration { + if x != nil { + return x.TopicMessageRetentionDuration + } + return nil +} + +func (x *Subscription) GetState() Subscription_State { + if x != nil { + return x.State + } + return Subscription_STATE_UNSPECIFIED +} + +func (x *Subscription) GetAnalyticsHubSubscriptionInfo() *Subscription_AnalyticsHubSubscriptionInfo { + if x != nil { + return x.AnalyticsHubSubscriptionInfo + } + return nil +} + +func (x *Subscription) GetMessageTransforms() []*MessageTransform { + if x != nil { + return x.MessageTransforms + } + return nil +} + +// A policy that specifies how Pub/Sub retries message delivery. +// +// Retry delay will be exponential based on provided minimum and maximum +// backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. +// +// RetryPolicy will be triggered on NACKs or acknowledgment deadline exceeded +// events for a given message. +// +// Retry Policy is implemented on a best effort basis. At times, the delay +// between consecutive deliveries may not match the configuration. That is, +// delay can be more or less than configured backoff. +type RetryPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The minimum delay between consecutive deliveries of a given + // message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. + MinimumBackoff *durationpb.Duration `protobuf:"bytes,1,opt,name=minimum_backoff,json=minimumBackoff,proto3" json:"minimum_backoff,omitempty"` + // Optional. The maximum delay between consecutive deliveries of a given + // message. Value should be between 0 and 600 seconds. Defaults to 600 + // seconds. + MaximumBackoff *durationpb.Duration `protobuf:"bytes,2,opt,name=maximum_backoff,json=maximumBackoff,proto3" json:"maximum_backoff,omitempty"` +} + +func (x *RetryPolicy) Reset() { + *x = RetryPolicy{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RetryPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy) ProtoMessage() {} + +func (x *RetryPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. +func (*RetryPolicy) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23} +} + +func (x *RetryPolicy) GetMinimumBackoff() *durationpb.Duration { + if x != nil { + return x.MinimumBackoff + } + return nil +} + +func (x *RetryPolicy) GetMaximumBackoff() *durationpb.Duration { + if x != nil { + return x.MaximumBackoff + } + return nil +} + +// Dead lettering is done on a best effort basis. The same message might be +// dead lettered multiple times. +// +// If validation on any of the fields fails at subscription creation/updation, +// the create/update subscription request will fail. +type DeadLetterPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The name of the topic to which dead letter messages should be + // published. Format is `projects/{project}/topics/{topic}`.The Pub/Sub + // service account associated with the enclosing subscription's parent project + // (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must + // have permission to Publish() to this topic. + // + // The operation will fail if the topic does not exist. + // Users should ensure that there is a subscription attached to this topic + // since messages published to a topic with no subscriptions are lost. + DeadLetterTopic string `protobuf:"bytes,1,opt,name=dead_letter_topic,json=deadLetterTopic,proto3" json:"dead_letter_topic,omitempty"` + // Optional. The maximum number of delivery attempts for any message. The + // value must be between 5 and 100. + // + // The number of delivery attempts is defined as 1 + (the sum of number of + // NACKs and number of times the acknowledgment deadline has been exceeded + // for the message). + // + // A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that + // client libraries may automatically extend ack_deadlines. + // + // This field will be honored on a best effort basis. + // + // If this parameter is 0, a default value of 5 is used. + MaxDeliveryAttempts int32 `protobuf:"varint,2,opt,name=max_delivery_attempts,json=maxDeliveryAttempts,proto3" json:"max_delivery_attempts,omitempty"` +} + +func (x *DeadLetterPolicy) Reset() { + *x = DeadLetterPolicy{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeadLetterPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeadLetterPolicy) ProtoMessage() {} + +func (x *DeadLetterPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeadLetterPolicy.ProtoReflect.Descriptor instead. +func (*DeadLetterPolicy) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24} +} + +func (x *DeadLetterPolicy) GetDeadLetterTopic() string { + if x != nil { + return x.DeadLetterTopic + } + return "" +} + +func (x *DeadLetterPolicy) GetMaxDeliveryAttempts() int32 { + if x != nil { + return x.MaxDeliveryAttempts + } + return 0 +} + +// A policy that specifies the conditions for resource expiration (i.e., +// automatic resource deletion). +type ExpirationPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Specifies the "time-to-live" duration for an associated resource. + // The resource expires if it is not active for a period of `ttl`. The + // definition of "activity" depends on the type of the associated resource. + // The minimum and maximum allowed values for `ttl` depend on the type of the + // associated resource, as well. If `ttl` is not set, the associated resource + // never expires. + Ttl *durationpb.Duration `protobuf:"bytes,1,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *ExpirationPolicy) Reset() { + *x = ExpirationPolicy{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExpirationPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExpirationPolicy) ProtoMessage() {} + +func (x *ExpirationPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExpirationPolicy.ProtoReflect.Descriptor instead. +func (*ExpirationPolicy) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25} +} + +func (x *ExpirationPolicy) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +// Configuration for a push delivery endpoint. +type PushConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. A URL locating the endpoint to which messages should be pushed. + // For example, a Webhook endpoint might use `https://example.com/push`. + PushEndpoint string `protobuf:"bytes,1,opt,name=push_endpoint,json=pushEndpoint,proto3" json:"push_endpoint,omitempty"` + // Optional. Endpoint configuration attributes that can be used to control + // different aspects of the message delivery. + // + // The only currently supported attribute is `x-goog-version`, which you can + // use to change the format of the pushed message. This attribute + // indicates the version of the data expected by the endpoint. This + // controls the shape of the pushed message (i.e., its fields and metadata). + // + // If not present during the `CreateSubscription` call, it will default to + // the version of the Pub/Sub API used to make such call. If not present in a + // `ModifyPushConfig` call, its value will not be changed. `GetSubscription` + // calls will always return a valid version, even if the subscription was + // created without this attribute. + // + // The only supported values for the `x-goog-version` attribute are: + // + // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. + // * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. + // + // For example: + // `attributes { "x-goog-version": "v1" }` + Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // An authentication method used by push endpoints to verify the source of + // push requests. This can be used with push endpoints that are private by + // default to allow requests only from the Pub/Sub system, for example. + // This field is optional and should be set only by users interested in + // authenticated push. + // + // Types that are assignable to AuthenticationMethod: + // + // *PushConfig_OidcToken_ + AuthenticationMethod isPushConfig_AuthenticationMethod `protobuf_oneof:"authentication_method"` + // The format of the delivered message to the push endpoint is defined by + // the chosen wrapper. When unset, `PubsubWrapper` is used. + // + // Types that are assignable to Wrapper: + // + // *PushConfig_PubsubWrapper_ + // *PushConfig_NoWrapper_ + Wrapper isPushConfig_Wrapper `protobuf_oneof:"wrapper"` +} + +func (x *PushConfig) Reset() { + *x = PushConfig{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PushConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushConfig) ProtoMessage() {} + +func (x *PushConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushConfig.ProtoReflect.Descriptor instead. +func (*PushConfig) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26} +} + +func (x *PushConfig) GetPushEndpoint() string { + if x != nil { + return x.PushEndpoint + } + return "" +} + +func (x *PushConfig) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +func (m *PushConfig) GetAuthenticationMethod() isPushConfig_AuthenticationMethod { + if m != nil { + return m.AuthenticationMethod + } + return nil +} + +func (x *PushConfig) GetOidcToken() *PushConfig_OidcToken { + if x, ok := x.GetAuthenticationMethod().(*PushConfig_OidcToken_); ok { + return x.OidcToken + } + return nil +} + +func (m *PushConfig) GetWrapper() isPushConfig_Wrapper { + if m != nil { + return m.Wrapper + } + return nil +} + +func (x *PushConfig) GetPubsubWrapper() *PushConfig_PubsubWrapper { + if x, ok := x.GetWrapper().(*PushConfig_PubsubWrapper_); ok { + return x.PubsubWrapper + } + return nil +} + +func (x *PushConfig) GetNoWrapper() *PushConfig_NoWrapper { + if x, ok := x.GetWrapper().(*PushConfig_NoWrapper_); ok { + return x.NoWrapper + } + return nil +} + +type isPushConfig_AuthenticationMethod interface { + isPushConfig_AuthenticationMethod() +} + +type PushConfig_OidcToken_ struct { + // Optional. If specified, Pub/Sub will generate and attach an OIDC JWT + // token as an `Authorization` header in the HTTP request for every pushed + // message. + OidcToken *PushConfig_OidcToken `protobuf:"bytes,3,opt,name=oidc_token,json=oidcToken,proto3,oneof"` +} + +func (*PushConfig_OidcToken_) isPushConfig_AuthenticationMethod() {} + +type isPushConfig_Wrapper interface { + isPushConfig_Wrapper() +} + +type PushConfig_PubsubWrapper_ struct { + // Optional. When set, the payload to the push endpoint is in the form of + // the JSON representation of a PubsubMessage + // (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). + PubsubWrapper *PushConfig_PubsubWrapper `protobuf:"bytes,4,opt,name=pubsub_wrapper,json=pubsubWrapper,proto3,oneof"` +} + +type PushConfig_NoWrapper_ struct { + // Optional. When set, the payload to the push endpoint is not wrapped. + NoWrapper *PushConfig_NoWrapper `protobuf:"bytes,5,opt,name=no_wrapper,json=noWrapper,proto3,oneof"` +} + +func (*PushConfig_PubsubWrapper_) isPushConfig_Wrapper() {} + +func (*PushConfig_NoWrapper_) isPushConfig_Wrapper() {} + +// Configuration for a BigQuery subscription. +type BigQueryConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The name of the table to which to write data, of the form + // {projectId}.{datasetId}.{tableId} + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + // Optional. When true, use the topic's schema as the columns to write to in + // BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be + // enabled at the same time. + UseTopicSchema bool `protobuf:"varint,2,opt,name=use_topic_schema,json=useTopicSchema,proto3" json:"use_topic_schema,omitempty"` + // Optional. When true, write the subscription name, message_id, publish_time, + // attributes, and ordering_key to additional columns in the table. The + // subscription name, message_id, and publish_time fields are put in their own + // columns while all other message properties (other than data) are written to + // a JSON object in the attributes column. + WriteMetadata bool `protobuf:"varint,3,opt,name=write_metadata,json=writeMetadata,proto3" json:"write_metadata,omitempty"` + // Optional. When true and use_topic_schema is true, any fields that are a + // part of the topic schema that are not part of the BigQuery table schema are + // dropped when writing to BigQuery. Otherwise, the schemas must be kept in + // sync and any messages with extra fields are not written and remain in the + // subscription's backlog. + DropUnknownFields bool `protobuf:"varint,4,opt,name=drop_unknown_fields,json=dropUnknownFields,proto3" json:"drop_unknown_fields,omitempty"` + // Output only. An output-only field that indicates whether or not the + // subscription can receive messages. + State BigQueryConfig_State `protobuf:"varint,5,opt,name=state,proto3,enum=google.pubsub.v1.BigQueryConfig_State" json:"state,omitempty"` + // Optional. When true, use the BigQuery table's schema as the columns to + // write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be + // enabled at the same time. + UseTableSchema bool `protobuf:"varint,6,opt,name=use_table_schema,json=useTableSchema,proto3" json:"use_table_schema,omitempty"` + // Optional. The service account to use to write to BigQuery. The subscription + // creator or updater that specifies this field must have + // `iam.serviceAccounts.actAs` permission on the service account. If not + // specified, the Pub/Sub [service + // agent](https://cloud.google.com/iam/docs/service-agents), + // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. + ServiceAccountEmail string `protobuf:"bytes,7,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` +} + +func (x *BigQueryConfig) Reset() { + *x = BigQueryConfig{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BigQueryConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BigQueryConfig) ProtoMessage() {} + +func (x *BigQueryConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BigQueryConfig.ProtoReflect.Descriptor instead. +func (*BigQueryConfig) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27} +} + +func (x *BigQueryConfig) GetTable() string { + if x != nil { + return x.Table + } + return "" +} + +func (x *BigQueryConfig) GetUseTopicSchema() bool { + if x != nil { + return x.UseTopicSchema + } + return false +} + +func (x *BigQueryConfig) GetWriteMetadata() bool { + if x != nil { + return x.WriteMetadata + } + return false +} + +func (x *BigQueryConfig) GetDropUnknownFields() bool { + if x != nil { + return x.DropUnknownFields + } + return false +} + +func (x *BigQueryConfig) GetState() BigQueryConfig_State { + if x != nil { + return x.State + } + return BigQueryConfig_STATE_UNSPECIFIED +} + +func (x *BigQueryConfig) GetUseTableSchema() bool { + if x != nil { + return x.UseTableSchema + } + return false +} + +func (x *BigQueryConfig) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +// Configuration for a Cloud Storage subscription. +type CloudStorageConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. User-provided name for the Cloud Storage bucket. + // The bucket must be created by the user. The bucket name must be without + // any prefix like "gs://". See the [bucket naming + // requirements] (https://cloud.google.com/storage/docs/buckets#naming). + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Optional. User-provided prefix for Cloud Storage filename. See the [object + // naming requirements](https://cloud.google.com/storage/docs/objects#naming). + FilenamePrefix string `protobuf:"bytes,2,opt,name=filename_prefix,json=filenamePrefix,proto3" json:"filename_prefix,omitempty"` + // Optional. User-provided suffix for Cloud Storage filename. See the [object + // naming requirements](https://cloud.google.com/storage/docs/objects#naming). + // Must not end in "/". + FilenameSuffix string `protobuf:"bytes,3,opt,name=filename_suffix,json=filenameSuffix,proto3" json:"filename_suffix,omitempty"` + // Optional. User-provided format string specifying how to represent datetimes + // in Cloud Storage filenames. See the [datetime format + // guidance](https://cloud.google.com/pubsub/docs/create-cloudstorage-subscription#file_names). + FilenameDatetimeFormat string `protobuf:"bytes,10,opt,name=filename_datetime_format,json=filenameDatetimeFormat,proto3" json:"filename_datetime_format,omitempty"` + // Defaults to text format. + // + // Types that are assignable to OutputFormat: + // + // *CloudStorageConfig_TextConfig_ + // *CloudStorageConfig_AvroConfig_ + OutputFormat isCloudStorageConfig_OutputFormat `protobuf_oneof:"output_format"` + // Optional. The maximum duration that can elapse before a new Cloud Storage + // file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not + // exceed the subscription's acknowledgment deadline. + MaxDuration *durationpb.Duration `protobuf:"bytes,6,opt,name=max_duration,json=maxDuration,proto3" json:"max_duration,omitempty"` + // Optional. The maximum bytes that can be written to a Cloud Storage file + // before a new file is created. Min 1 KB, max 10 GiB. The max_bytes limit may + // be exceeded in cases where messages are larger than the limit. + MaxBytes int64 `protobuf:"varint,7,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + // Optional. The maximum number of messages that can be written to a Cloud + // Storage file before a new file is created. Min 1000 messages. + MaxMessages int64 `protobuf:"varint,8,opt,name=max_messages,json=maxMessages,proto3" json:"max_messages,omitempty"` + // Output only. An output-only field that indicates whether or not the + // subscription can receive messages. + State CloudStorageConfig_State `protobuf:"varint,9,opt,name=state,proto3,enum=google.pubsub.v1.CloudStorageConfig_State" json:"state,omitempty"` + // Optional. The service account to use to write to Cloud Storage. The + // subscription creator or updater that specifies this field must have + // `iam.serviceAccounts.actAs` permission on the service account. If not + // specified, the Pub/Sub + // [service agent](https://cloud.google.com/iam/docs/service-agents), + // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. + ServiceAccountEmail string `protobuf:"bytes,11,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` +} + +func (x *CloudStorageConfig) Reset() { + *x = CloudStorageConfig{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CloudStorageConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudStorageConfig) ProtoMessage() {} + +func (x *CloudStorageConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudStorageConfig.ProtoReflect.Descriptor instead. +func (*CloudStorageConfig) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28} +} + +func (x *CloudStorageConfig) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *CloudStorageConfig) GetFilenamePrefix() string { + if x != nil { + return x.FilenamePrefix + } + return "" +} + +func (x *CloudStorageConfig) GetFilenameSuffix() string { + if x != nil { + return x.FilenameSuffix + } + return "" +} + +func (x *CloudStorageConfig) GetFilenameDatetimeFormat() string { + if x != nil { + return x.FilenameDatetimeFormat + } + return "" +} + +func (m *CloudStorageConfig) GetOutputFormat() isCloudStorageConfig_OutputFormat { + if m != nil { + return m.OutputFormat + } + return nil +} + +func (x *CloudStorageConfig) GetTextConfig() *CloudStorageConfig_TextConfig { + if x, ok := x.GetOutputFormat().(*CloudStorageConfig_TextConfig_); ok { + return x.TextConfig + } + return nil +} + +func (x *CloudStorageConfig) GetAvroConfig() *CloudStorageConfig_AvroConfig { + if x, ok := x.GetOutputFormat().(*CloudStorageConfig_AvroConfig_); ok { + return x.AvroConfig + } + return nil +} + +func (x *CloudStorageConfig) GetMaxDuration() *durationpb.Duration { + if x != nil { + return x.MaxDuration + } + return nil +} + +func (x *CloudStorageConfig) GetMaxBytes() int64 { + if x != nil { + return x.MaxBytes + } + return 0 +} + +func (x *CloudStorageConfig) GetMaxMessages() int64 { + if x != nil { + return x.MaxMessages + } + return 0 +} + +func (x *CloudStorageConfig) GetState() CloudStorageConfig_State { + if x != nil { + return x.State + } + return CloudStorageConfig_STATE_UNSPECIFIED +} + +func (x *CloudStorageConfig) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +type isCloudStorageConfig_OutputFormat interface { + isCloudStorageConfig_OutputFormat() +} + +type CloudStorageConfig_TextConfig_ struct { + // Optional. If set, message data will be written to Cloud Storage in text + // format. + TextConfig *CloudStorageConfig_TextConfig `protobuf:"bytes,4,opt,name=text_config,json=textConfig,proto3,oneof"` +} + +type CloudStorageConfig_AvroConfig_ struct { + // Optional. If set, message data will be written to Cloud Storage in Avro + // format. + AvroConfig *CloudStorageConfig_AvroConfig `protobuf:"bytes,5,opt,name=avro_config,json=avroConfig,proto3,oneof"` +} + +func (*CloudStorageConfig_TextConfig_) isCloudStorageConfig_OutputFormat() {} + +func (*CloudStorageConfig_AvroConfig_) isCloudStorageConfig_OutputFormat() {} + +// A message and its corresponding acknowledgment ID. +type ReceivedMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. This ID can be used to acknowledge the received message. + AckId string `protobuf:"bytes,1,opt,name=ack_id,json=ackId,proto3" json:"ack_id,omitempty"` + // Optional. The message. + Message *PubsubMessage `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Optional. The approximate number of times that Pub/Sub has attempted to + // deliver the associated message to a subscriber. + // + // More precisely, this is 1 + (number of NACKs) + + // (number of ack_deadline exceeds) for this message. + // + // A NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline + // exceeds event is whenever a message is not acknowledged within + // ack_deadline. Note that ack_deadline is initially + // Subscription.ackDeadlineSeconds, but may get extended automatically by + // the client library. + // + // Upon the first delivery of a given message, `delivery_attempt` will have a + // value of 1. The value is calculated at best effort and is approximate. + // + // If a DeadLetterPolicy is not set on the subscription, this will be 0. + DeliveryAttempt int32 `protobuf:"varint,3,opt,name=delivery_attempt,json=deliveryAttempt,proto3" json:"delivery_attempt,omitempty"` +} + +func (x *ReceivedMessage) Reset() { + *x = ReceivedMessage{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReceivedMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReceivedMessage) ProtoMessage() {} + +func (x *ReceivedMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReceivedMessage.ProtoReflect.Descriptor instead. +func (*ReceivedMessage) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{29} +} + +func (x *ReceivedMessage) GetAckId() string { + if x != nil { + return x.AckId + } + return "" +} + +func (x *ReceivedMessage) GetMessage() *PubsubMessage { + if x != nil { + return x.Message + } + return nil +} + +func (x *ReceivedMessage) GetDeliveryAttempt() int32 { + if x != nil { + return x.DeliveryAttempt + } + return 0 +} + +// Request for the GetSubscription method. +type GetSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the subscription to get. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` +} + +func (x *GetSubscriptionRequest) Reset() { + *x = GetSubscriptionRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionRequest) ProtoMessage() {} + +func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{30} +} + +func (x *GetSubscriptionRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Request for the UpdateSubscription method. +type UpdateSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The updated subscription object. + Subscription *Subscription `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Required. Indicates which fields in the provided subscription to update. + // Must be specified and non-empty. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateSubscriptionRequest) Reset() { + *x = UpdateSubscriptionRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSubscriptionRequest) ProtoMessage() {} + +func (x *UpdateSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*UpdateSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{31} +} + +func (x *UpdateSubscriptionRequest) GetSubscription() *Subscription { + if x != nil { + return x.Subscription + } + return nil +} + +func (x *UpdateSubscriptionRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request for the `ListSubscriptions` method. +type ListSubscriptionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the project in which to list subscriptions. + // Format is `projects/{project-id}`. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // Optional. Maximum number of subscriptions to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. The value returned by the last `ListSubscriptionsResponse`; + // indicates that this is a continuation of a prior `ListSubscriptions` call, + // and that the system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListSubscriptionsRequest) Reset() { + *x = ListSubscriptionsRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSubscriptionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSubscriptionsRequest) ProtoMessage() {} + +func (x *ListSubscriptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSubscriptionsRequest.ProtoReflect.Descriptor instead. +func (*ListSubscriptionsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{32} +} + +func (x *ListSubscriptionsRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListSubscriptionsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListSubscriptionsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListSubscriptions` method. +type ListSubscriptionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The subscriptions that match the request. + Subscriptions []*Subscription `protobuf:"bytes,1,rep,name=subscriptions,proto3" json:"subscriptions,omitempty"` + // Optional. If not empty, indicates that there may be more subscriptions that + // match the request; this value should be passed in a new + // `ListSubscriptionsRequest` to get more subscriptions. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListSubscriptionsResponse) Reset() { + *x = ListSubscriptionsResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSubscriptionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSubscriptionsResponse) ProtoMessage() {} + +func (x *ListSubscriptionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSubscriptionsResponse.ProtoReflect.Descriptor instead. +func (*ListSubscriptionsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{33} +} + +func (x *ListSubscriptionsResponse) GetSubscriptions() []*Subscription { + if x != nil { + return x.Subscriptions + } + return nil +} + +func (x *ListSubscriptionsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the DeleteSubscription method. +type DeleteSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription to delete. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` +} + +func (x *DeleteSubscriptionRequest) Reset() { + *x = DeleteSubscriptionRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSubscriptionRequest) ProtoMessage() {} + +func (x *DeleteSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*DeleteSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34} +} + +func (x *DeleteSubscriptionRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Request for the ModifyPushConfig method. +type ModifyPushConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the subscription. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Required. The push configuration for future deliveries. + // + // An empty `pushConfig` indicates that the Pub/Sub system should + // stop pushing messages from the given subscription and allow + // messages to be pulled and acknowledged - effectively pausing + // the subscription if `Pull` or `StreamingPull` is not called. + PushConfig *PushConfig `protobuf:"bytes,2,opt,name=push_config,json=pushConfig,proto3" json:"push_config,omitempty"` +} + +func (x *ModifyPushConfigRequest) Reset() { + *x = ModifyPushConfigRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ModifyPushConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ModifyPushConfigRequest) ProtoMessage() {} + +func (x *ModifyPushConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ModifyPushConfigRequest.ProtoReflect.Descriptor instead. +func (*ModifyPushConfigRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35} +} + +func (x *ModifyPushConfigRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *ModifyPushConfigRequest) GetPushConfig() *PushConfig { + if x != nil { + return x.PushConfig + } + return nil +} + +// Request for the `Pull` method. +type PullRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription from which messages should be pulled. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Optional. If this field set to true, the system will respond immediately + // even if it there are no messages available to return in the `Pull` + // response. Otherwise, the system may wait (for a bounded amount of time) + // until at least one message is available, rather than returning no messages. + // Warning: setting this field to `true` is discouraged because it adversely + // impacts the performance of `Pull` operations. We recommend that users do + // not set this field. + // + // Deprecated: Marked as deprecated in google/pubsub/v1/pubsub.proto. + ReturnImmediately bool `protobuf:"varint,2,opt,name=return_immediately,json=returnImmediately,proto3" json:"return_immediately,omitempty"` + // Required. The maximum number of messages to return for this request. Must + // be a positive integer. The Pub/Sub system may return fewer than the number + // specified. + MaxMessages int32 `protobuf:"varint,3,opt,name=max_messages,json=maxMessages,proto3" json:"max_messages,omitempty"` +} + +func (x *PullRequest) Reset() { + *x = PullRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PullRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullRequest) ProtoMessage() {} + +func (x *PullRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullRequest.ProtoReflect.Descriptor instead. +func (*PullRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{36} +} + +func (x *PullRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Deprecated: Marked as deprecated in google/pubsub/v1/pubsub.proto. +func (x *PullRequest) GetReturnImmediately() bool { + if x != nil { + return x.ReturnImmediately + } + return false +} + +func (x *PullRequest) GetMaxMessages() int32 { + if x != nil { + return x.MaxMessages + } + return 0 +} + +// Response for the `Pull` method. +type PullResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Received Pub/Sub messages. The list will be empty if there are no + // more messages available in the backlog, or if no messages could be returned + // before the request timeout. For JSON, the response can be entirely + // empty. The Pub/Sub system may return fewer than the `maxMessages` requested + // even if there are more messages available in the backlog. + ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages,proto3" json:"received_messages,omitempty"` +} + +func (x *PullResponse) Reset() { + *x = PullResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PullResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullResponse) ProtoMessage() {} + +func (x *PullResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullResponse.ProtoReflect.Descriptor instead. +func (*PullResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37} +} + +func (x *PullResponse) GetReceivedMessages() []*ReceivedMessage { + if x != nil { + return x.ReceivedMessages + } + return nil +} + +// Request for the ModifyAckDeadline method. +type ModifyAckDeadlineRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the subscription. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Required. List of acknowledgment IDs. + AckIds []string `protobuf:"bytes,4,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` + // Required. The new ack deadline with respect to the time this request was + // sent to the Pub/Sub system. For example, if the value is 10, the new ack + // deadline will expire 10 seconds after the `ModifyAckDeadline` call was + // made. Specifying zero might immediately make the message available for + // delivery to another subscriber client. This typically results in an + // increase in the rate of message redeliveries (that is, duplicates). + // The minimum deadline you can specify is 0 seconds. + // The maximum deadline you can specify in a single request is 600 seconds + // (10 minutes). + AckDeadlineSeconds int32 `protobuf:"varint,3,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds,proto3" json:"ack_deadline_seconds,omitempty"` +} + +func (x *ModifyAckDeadlineRequest) Reset() { + *x = ModifyAckDeadlineRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ModifyAckDeadlineRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ModifyAckDeadlineRequest) ProtoMessage() {} + +func (x *ModifyAckDeadlineRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ModifyAckDeadlineRequest.ProtoReflect.Descriptor instead. +func (*ModifyAckDeadlineRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{38} +} + +func (x *ModifyAckDeadlineRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *ModifyAckDeadlineRequest) GetAckIds() []string { + if x != nil { + return x.AckIds + } + return nil +} + +func (x *ModifyAckDeadlineRequest) GetAckDeadlineSeconds() int32 { + if x != nil { + return x.AckDeadlineSeconds + } + return 0 +} + +// Request for the Acknowledge method. +type AcknowledgeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription whose message is being acknowledged. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Required. The acknowledgment ID for the messages being acknowledged that + // was returned by the Pub/Sub system in the `Pull` response. Must not be + // empty. + AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` +} + +func (x *AcknowledgeRequest) Reset() { + *x = AcknowledgeRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AcknowledgeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AcknowledgeRequest) ProtoMessage() {} + +func (x *AcknowledgeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AcknowledgeRequest.ProtoReflect.Descriptor instead. +func (*AcknowledgeRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39} +} + +func (x *AcknowledgeRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *AcknowledgeRequest) GetAckIds() []string { + if x != nil { + return x.AckIds + } + return nil +} + +// Request for the `StreamingPull` streaming RPC method. This request is used to +// establish the initial stream as well as to stream acknowledgments and ack +// deadline modifications from the client to the server. +type StreamingPullRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription for which to initialize the new stream. This + // must be provided in the first request on the stream, and must not be set in + // subsequent requests from client to server. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Optional. List of acknowledgment IDs for acknowledging previously received + // messages (received on this stream or a different stream). If an ack ID has + // expired, the corresponding message may be redelivered later. Acknowledging + // a message more than once will not result in an error. If the acknowledgment + // ID is malformed, the stream will be aborted with status `INVALID_ARGUMENT`. + AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` + // Optional. The list of new ack deadlines for the IDs listed in + // `modify_deadline_ack_ids`. The size of this list must be the same as the + // size of `modify_deadline_ack_ids`. If it differs the stream will be aborted + // with `INVALID_ARGUMENT`. Each element in this list is applied to the + // element in the same position in `modify_deadline_ack_ids`. The new ack + // deadline is with respect to the time this request was sent to the Pub/Sub + // system. Must be >= 0. For example, if the value is 10, the new ack deadline + // will expire 10 seconds after this request is received. If the value is 0, + // the message is immediately made available for another streaming or + // non-streaming pull request. If the value is < 0 (an error), the stream will + // be aborted with status `INVALID_ARGUMENT`. + ModifyDeadlineSeconds []int32 `protobuf:"varint,3,rep,packed,name=modify_deadline_seconds,json=modifyDeadlineSeconds,proto3" json:"modify_deadline_seconds,omitempty"` + // Optional. List of acknowledgment IDs whose deadline will be modified based + // on the corresponding element in `modify_deadline_seconds`. This field can + // be used to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. + ModifyDeadlineAckIds []string `protobuf:"bytes,4,rep,name=modify_deadline_ack_ids,json=modifyDeadlineAckIds,proto3" json:"modify_deadline_ack_ids,omitempty"` + // Required. The ack deadline to use for the stream. This must be provided in + // the first request on the stream, but it can also be updated on subsequent + // requests from client to server. The minimum deadline you can specify is 10 + // seconds. The maximum deadline you can specify is 600 seconds (10 minutes). + StreamAckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=stream_ack_deadline_seconds,json=streamAckDeadlineSeconds,proto3" json:"stream_ack_deadline_seconds,omitempty"` + // Optional. A unique identifier that is used to distinguish client instances + // from each other. Only needs to be provided on the initial request. When a + // stream disconnects and reconnects for the same stream, the client_id should + // be set to the same value so that state associated with the old stream can + // be transferred to the new stream. The same client_id should not be used for + // different client instances. + ClientId string `protobuf:"bytes,6,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + // Optional. Flow control settings for the maximum number of outstanding + // messages. When there are `max_outstanding_messages` currently sent to the + // streaming pull client that have not yet been acked or nacked, the server + // stops sending more messages. The sending of messages resumes once the + // number of outstanding messages is less than this value. If the value is + // <= 0, there is no limit to the number of outstanding messages. This + // property can only be set on the initial StreamingPullRequest. If it is set + // on a subsequent request, the stream will be aborted with status + // `INVALID_ARGUMENT`. + MaxOutstandingMessages int64 `protobuf:"varint,7,opt,name=max_outstanding_messages,json=maxOutstandingMessages,proto3" json:"max_outstanding_messages,omitempty"` + // Optional. Flow control settings for the maximum number of outstanding + // bytes. When there are `max_outstanding_bytes` or more worth of messages + // currently sent to the streaming pull client that have not yet been acked or + // nacked, the server will stop sending more messages. The sending of messages + // resumes once the number of outstanding bytes is less than this value. If + // the value is <= 0, there is no limit to the number of outstanding bytes. + // This property can only be set on the initial StreamingPullRequest. If it is + // set on a subsequent request, the stream will be aborted with status + // `INVALID_ARGUMENT`. + MaxOutstandingBytes int64 `protobuf:"varint,8,opt,name=max_outstanding_bytes,json=maxOutstandingBytes,proto3" json:"max_outstanding_bytes,omitempty"` +} + +func (x *StreamingPullRequest) Reset() { + *x = StreamingPullRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamingPullRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingPullRequest) ProtoMessage() {} + +func (x *StreamingPullRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingPullRequest.ProtoReflect.Descriptor instead. +func (*StreamingPullRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{40} +} + +func (x *StreamingPullRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *StreamingPullRequest) GetAckIds() []string { + if x != nil { + return x.AckIds + } + return nil +} + +func (x *StreamingPullRequest) GetModifyDeadlineSeconds() []int32 { + if x != nil { + return x.ModifyDeadlineSeconds + } + return nil +} + +func (x *StreamingPullRequest) GetModifyDeadlineAckIds() []string { + if x != nil { + return x.ModifyDeadlineAckIds + } + return nil +} + +func (x *StreamingPullRequest) GetStreamAckDeadlineSeconds() int32 { + if x != nil { + return x.StreamAckDeadlineSeconds + } + return 0 +} + +func (x *StreamingPullRequest) GetClientId() string { + if x != nil { + return x.ClientId + } + return "" +} + +func (x *StreamingPullRequest) GetMaxOutstandingMessages() int64 { + if x != nil { + return x.MaxOutstandingMessages + } + return 0 +} + +func (x *StreamingPullRequest) GetMaxOutstandingBytes() int64 { + if x != nil { + return x.MaxOutstandingBytes + } + return 0 +} + +// Response for the `StreamingPull` method. This response is used to stream +// messages from the server to the client. +type StreamingPullResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Received Pub/Sub messages. This will not be empty. + ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages,proto3" json:"received_messages,omitempty"` + // Optional. This field will only be set if `enable_exactly_once_delivery` is + // set to `true` and is not guaranteed to be populated. + AcknowledgeConfirmation *StreamingPullResponse_AcknowledgeConfirmation `protobuf:"bytes,5,opt,name=acknowledge_confirmation,json=acknowledgeConfirmation,proto3" json:"acknowledge_confirmation,omitempty"` + // Optional. This field will only be set if `enable_exactly_once_delivery` is + // set to `true` and is not guaranteed to be populated. + ModifyAckDeadlineConfirmation *StreamingPullResponse_ModifyAckDeadlineConfirmation `protobuf:"bytes,3,opt,name=modify_ack_deadline_confirmation,json=modifyAckDeadlineConfirmation,proto3" json:"modify_ack_deadline_confirmation,omitempty"` + // Optional. Properties associated with this subscription. + SubscriptionProperties *StreamingPullResponse_SubscriptionProperties `protobuf:"bytes,4,opt,name=subscription_properties,json=subscriptionProperties,proto3" json:"subscription_properties,omitempty"` +} + +func (x *StreamingPullResponse) Reset() { + *x = StreamingPullResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamingPullResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingPullResponse) ProtoMessage() {} + +func (x *StreamingPullResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingPullResponse.ProtoReflect.Descriptor instead. +func (*StreamingPullResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41} +} + +func (x *StreamingPullResponse) GetReceivedMessages() []*ReceivedMessage { + if x != nil { + return x.ReceivedMessages + } + return nil +} + +func (x *StreamingPullResponse) GetAcknowledgeConfirmation() *StreamingPullResponse_AcknowledgeConfirmation { + if x != nil { + return x.AcknowledgeConfirmation + } + return nil +} + +func (x *StreamingPullResponse) GetModifyAckDeadlineConfirmation() *StreamingPullResponse_ModifyAckDeadlineConfirmation { + if x != nil { + return x.ModifyAckDeadlineConfirmation + } + return nil +} + +func (x *StreamingPullResponse) GetSubscriptionProperties() *StreamingPullResponse_SubscriptionProperties { + if x != nil { + return x.SubscriptionProperties + } + return nil +} + +// Request for the `CreateSnapshot` method. +type CreateSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. User-provided name for this snapshot. If the name is not provided + // in the request, the server will assign a random name for this snapshot on + // the same project as the subscription. Note that for REST API requests, you + // must specify a name. See the [resource name + // rules](https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). + // Format is `projects/{project}/snapshots/{snap}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The subscription whose backlog the snapshot retains. + // Specifically, the created snapshot is guaranteed to retain: + // + // (a) The existing backlog on the subscription. More precisely, this is + // defined as the messages in the subscription's backlog that are + // unacknowledged upon the successful completion of the + // `CreateSnapshot` request; as well as: + // (b) Any messages published to the subscription's topic following the + // successful completion of the CreateSnapshot request. + // + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Optional. See [Creating and managing + // labels](https://cloud.google.com/pubsub/docs/labels). + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CreateSnapshotRequest) Reset() { + *x = CreateSnapshotRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnapshotRequest) ProtoMessage() {} + +func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{42} +} + +func (x *CreateSnapshotRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateSnapshotRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *CreateSnapshotRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Request for the UpdateSnapshot method. +type UpdateSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The updated snapshot object. + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + // Required. Indicates which fields in the provided snapshot to update. + // Must be specified and non-empty. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateSnapshotRequest) Reset() { + *x = UpdateSnapshotRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSnapshotRequest) ProtoMessage() {} + +func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSnapshotRequest.ProtoReflect.Descriptor instead. +func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{43} +} + +func (x *UpdateSnapshotRequest) GetSnapshot() *Snapshot { + if x != nil { + return x.Snapshot + } + return nil +} + +func (x *UpdateSnapshotRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// A snapshot resource. Snapshots are used in +// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in bulk. That +// is, you can set the acknowledgment state of messages in an existing +// subscription to the state captured by a snapshot. +type Snapshot struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The name of the snapshot. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional. The name of the topic from which this snapshot is retaining + // messages. + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + // Optional. The snapshot is guaranteed to exist up until this time. + // A newly-created snapshot expires no later than 7 days from the time of its + // creation. Its exact lifetime is determined at creation by the existing + // backlog in the source subscription. Specifically, the lifetime of the + // snapshot is `7 days - (age of oldest unacked message in the subscription)`. + // For example, consider a subscription whose oldest unacked message is 3 days + // old. If a snapshot is created from this subscription, the snapshot -- which + // will always capture this 3-day-old backlog as long as the snapshot + // exists -- will expire in 4 days. The service will refuse to create a + // snapshot that would expire in less than 1 hour after creation. + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + // Optional. See [Creating and managing labels] + // (https://cloud.google.com/pubsub/docs/labels). + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Snapshot) Reset() { + *x = Snapshot{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Snapshot) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snapshot) ProtoMessage() {} + +func (x *Snapshot) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snapshot.ProtoReflect.Descriptor instead. +func (*Snapshot) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{44} +} + +func (x *Snapshot) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Snapshot) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *Snapshot) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +func (x *Snapshot) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Request for the GetSnapshot method. +type GetSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the snapshot to get. + // Format is `projects/{project}/snapshots/{snap}`. + Snapshot string `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` +} + +func (x *GetSnapshotRequest) Reset() { + *x = GetSnapshotRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSnapshotRequest) ProtoMessage() {} + +func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSnapshotRequest.ProtoReflect.Descriptor instead. +func (*GetSnapshotRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{45} +} + +func (x *GetSnapshotRequest) GetSnapshot() string { + if x != nil { + return x.Snapshot + } + return "" +} + +// Request for the `ListSnapshots` method. +type ListSnapshotsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the project in which to list snapshots. + // Format is `projects/{project-id}`. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // Optional. Maximum number of snapshots to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. The value returned by the last `ListSnapshotsResponse`; indicates + // that this is a continuation of a prior `ListSnapshots` call, and that the + // system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListSnapshotsRequest) Reset() { + *x = ListSnapshotsRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSnapshotsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnapshotsRequest) ProtoMessage() {} + +func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{46} +} + +func (x *ListSnapshotsRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListSnapshotsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListSnapshotsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListSnapshots` method. +type ListSnapshotsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The resulting snapshots. + Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` + // Optional. If not empty, indicates that there may be more snapshot that + // match the request; this value should be passed in a new + // `ListSnapshotsRequest`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListSnapshotsResponse) Reset() { + *x = ListSnapshotsResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSnapshotsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnapshotsResponse) ProtoMessage() {} + +func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{47} +} + +func (x *ListSnapshotsResponse) GetSnapshots() []*Snapshot { + if x != nil { + return x.Snapshots + } + return nil +} + +func (x *ListSnapshotsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the `DeleteSnapshot` method. +type DeleteSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the snapshot to delete. + // Format is `projects/{project}/snapshots/{snap}`. + Snapshot string `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` +} + +func (x *DeleteSnapshotRequest) Reset() { + *x = DeleteSnapshotRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSnapshotRequest) ProtoMessage() {} + +func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[48] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{48} +} + +func (x *DeleteSnapshotRequest) GetSnapshot() string { + if x != nil { + return x.Snapshot + } + return "" +} + +// Request for the `Seek` method. +type SeekRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription to affect. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Types that are assignable to Target: + // + // *SeekRequest_Time + // *SeekRequest_Snapshot + Target isSeekRequest_Target `protobuf_oneof:"target"` +} + +func (x *SeekRequest) Reset() { + *x = SeekRequest{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SeekRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SeekRequest) ProtoMessage() {} + +func (x *SeekRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[49] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SeekRequest.ProtoReflect.Descriptor instead. +func (*SeekRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{49} +} + +func (x *SeekRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (m *SeekRequest) GetTarget() isSeekRequest_Target { + if m != nil { + return m.Target + } + return nil +} + +func (x *SeekRequest) GetTime() *timestamppb.Timestamp { + if x, ok := x.GetTarget().(*SeekRequest_Time); ok { + return x.Time + } + return nil +} + +func (x *SeekRequest) GetSnapshot() string { + if x, ok := x.GetTarget().(*SeekRequest_Snapshot); ok { + return x.Snapshot + } + return "" +} + +type isSeekRequest_Target interface { + isSeekRequest_Target() +} + +type SeekRequest_Time struct { + // Optional. The time to seek to. + // Messages retained in the subscription that were published before this + // time are marked as acknowledged, and messages retained in the + // subscription that were published after this time are marked as + // unacknowledged. Note that this operation affects only those messages + // retained in the subscription (configured by the combination of + // `message_retention_duration` and `retain_acked_messages`). For example, + // if `time` corresponds to a point before the message retention + // window (or to a point before the system's notion of the subscription + // creation time), only retained messages will be marked as unacknowledged, + // and already-expunged messages will not be restored. + Time *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=time,proto3,oneof"` +} + +type SeekRequest_Snapshot struct { + // Optional. The snapshot to seek to. The snapshot's topic must be the same + // as that of the provided subscription. Format is + // `projects/{project}/snapshots/{snap}`. + Snapshot string `protobuf:"bytes,3,opt,name=snapshot,proto3,oneof"` +} + +func (*SeekRequest_Time) isSeekRequest_Target() {} + +func (*SeekRequest_Snapshot) isSeekRequest_Target() {} + +// Response for the `Seek` method (this response is empty). +type SeekResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SeekResponse) Reset() { + *x = SeekResponse{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SeekResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SeekResponse) ProtoMessage() {} + +func (x *SeekResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SeekResponse.ProtoReflect.Descriptor instead. +func (*SeekResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{50} +} + +// Ingestion settings for Amazon Kinesis Data Streams. +type IngestionDataSourceSettings_AwsKinesis struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. An output-only field that indicates the state of the Kinesis + // ingestion source. + State IngestionDataSourceSettings_AwsKinesis_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.pubsub.v1.IngestionDataSourceSettings_AwsKinesis_State" json:"state,omitempty"` + // Required. The Kinesis stream ARN to ingest data from. + StreamArn string `protobuf:"bytes,2,opt,name=stream_arn,json=streamArn,proto3" json:"stream_arn,omitempty"` + // Required. The Kinesis consumer ARN to used for ingestion in Enhanced + // Fan-Out mode. The consumer must be already created and ready to be used. + ConsumerArn string `protobuf:"bytes,3,opt,name=consumer_arn,json=consumerArn,proto3" json:"consumer_arn,omitempty"` + // Required. AWS role ARN to be used for Federated Identity authentication + // with Kinesis. Check the Pub/Sub docs for how to set up this role and the + // required permissions that need to be attached to it. + AwsRoleArn string `protobuf:"bytes,4,opt,name=aws_role_arn,json=awsRoleArn,proto3" json:"aws_role_arn,omitempty"` + // Required. The GCP service account to be used for Federated Identity + // authentication with Kinesis (via a `AssumeRoleWithWebIdentity` call for + // the provided role). The `aws_role_arn` must be set up with + // `accounts.google.com:sub` equals to this service account number. + GcpServiceAccount string `protobuf:"bytes,5,opt,name=gcp_service_account,json=gcpServiceAccount,proto3" json:"gcp_service_account,omitempty"` +} + +func (x *IngestionDataSourceSettings_AwsKinesis) Reset() { + *x = IngestionDataSourceSettings_AwsKinesis{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionDataSourceSettings_AwsKinesis) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_AwsKinesis) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_AwsKinesis) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_AwsKinesis.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_AwsKinesis) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *IngestionDataSourceSettings_AwsKinesis) GetState() IngestionDataSourceSettings_AwsKinesis_State { + if x != nil { + return x.State + } + return IngestionDataSourceSettings_AwsKinesis_STATE_UNSPECIFIED +} + +func (x *IngestionDataSourceSettings_AwsKinesis) GetStreamArn() string { + if x != nil { + return x.StreamArn + } + return "" +} + +func (x *IngestionDataSourceSettings_AwsKinesis) GetConsumerArn() string { + if x != nil { + return x.ConsumerArn + } + return "" +} + +func (x *IngestionDataSourceSettings_AwsKinesis) GetAwsRoleArn() string { + if x != nil { + return x.AwsRoleArn + } + return "" +} + +func (x *IngestionDataSourceSettings_AwsKinesis) GetGcpServiceAccount() string { + if x != nil { + return x.GcpServiceAccount + } + return "" +} + +// Ingestion settings for Cloud Storage. +type IngestionDataSourceSettings_CloudStorage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. An output-only field that indicates the state of the Cloud + // Storage ingestion source. + State IngestionDataSourceSettings_CloudStorage_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.pubsub.v1.IngestionDataSourceSettings_CloudStorage_State" json:"state,omitempty"` + // Optional. Cloud Storage bucket. The bucket name must be without any + // prefix like "gs://". See the [bucket naming requirements] + // (https://cloud.google.com/storage/docs/buckets#naming). + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Defaults to text format. + // + // Types that are assignable to InputFormat: + // + // *IngestionDataSourceSettings_CloudStorage_TextFormat_ + // *IngestionDataSourceSettings_CloudStorage_AvroFormat_ + // *IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat + InputFormat isIngestionDataSourceSettings_CloudStorage_InputFormat `protobuf_oneof:"input_format"` + // Optional. Only objects with a larger or equal creation timestamp will be + // ingested. + MinimumObjectCreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=minimum_object_create_time,json=minimumObjectCreateTime,proto3" json:"minimum_object_create_time,omitempty"` + // Optional. Glob pattern used to match objects that will be ingested. If + // unset, all objects will be ingested. See the [supported + // patterns](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob). + MatchGlob string `protobuf:"bytes,9,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"` +} + +func (x *IngestionDataSourceSettings_CloudStorage) Reset() { + *x = IngestionDataSourceSettings_CloudStorage{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionDataSourceSettings_CloudStorage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_CloudStorage) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_CloudStorage) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_CloudStorage.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_CloudStorage) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *IngestionDataSourceSettings_CloudStorage) GetState() IngestionDataSourceSettings_CloudStorage_State { + if x != nil { + return x.State + } + return IngestionDataSourceSettings_CloudStorage_STATE_UNSPECIFIED +} + +func (x *IngestionDataSourceSettings_CloudStorage) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (m *IngestionDataSourceSettings_CloudStorage) GetInputFormat() isIngestionDataSourceSettings_CloudStorage_InputFormat { + if m != nil { + return m.InputFormat + } + return nil +} + +func (x *IngestionDataSourceSettings_CloudStorage) GetTextFormat() *IngestionDataSourceSettings_CloudStorage_TextFormat { + if x, ok := x.GetInputFormat().(*IngestionDataSourceSettings_CloudStorage_TextFormat_); ok { + return x.TextFormat + } + return nil +} + +func (x *IngestionDataSourceSettings_CloudStorage) GetAvroFormat() *IngestionDataSourceSettings_CloudStorage_AvroFormat { + if x, ok := x.GetInputFormat().(*IngestionDataSourceSettings_CloudStorage_AvroFormat_); ok { + return x.AvroFormat + } + return nil +} + +func (x *IngestionDataSourceSettings_CloudStorage) GetPubsubAvroFormat() *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat { + if x, ok := x.GetInputFormat().(*IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat); ok { + return x.PubsubAvroFormat + } + return nil +} + +func (x *IngestionDataSourceSettings_CloudStorage) GetMinimumObjectCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.MinimumObjectCreateTime + } + return nil +} + +func (x *IngestionDataSourceSettings_CloudStorage) GetMatchGlob() string { + if x != nil { + return x.MatchGlob + } + return "" +} + +type isIngestionDataSourceSettings_CloudStorage_InputFormat interface { + isIngestionDataSourceSettings_CloudStorage_InputFormat() +} + +type IngestionDataSourceSettings_CloudStorage_TextFormat_ struct { + // Optional. Data from Cloud Storage will be interpreted as text. + TextFormat *IngestionDataSourceSettings_CloudStorage_TextFormat `protobuf:"bytes,3,opt,name=text_format,json=textFormat,proto3,oneof"` +} + +type IngestionDataSourceSettings_CloudStorage_AvroFormat_ struct { + // Optional. Data from Cloud Storage will be interpreted in Avro format. + AvroFormat *IngestionDataSourceSettings_CloudStorage_AvroFormat `protobuf:"bytes,4,opt,name=avro_format,json=avroFormat,proto3,oneof"` +} + +type IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat struct { + // Optional. It will be assumed data from Cloud Storage was written via + // [Cloud Storage + // subscriptions](https://cloud.google.com/pubsub/docs/cloudstorage). + PubsubAvroFormat *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat `protobuf:"bytes,5,opt,name=pubsub_avro_format,json=pubsubAvroFormat,proto3,oneof"` +} + +func (*IngestionDataSourceSettings_CloudStorage_TextFormat_) isIngestionDataSourceSettings_CloudStorage_InputFormat() { +} + +func (*IngestionDataSourceSettings_CloudStorage_AvroFormat_) isIngestionDataSourceSettings_CloudStorage_InputFormat() { +} + +func (*IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat) isIngestionDataSourceSettings_CloudStorage_InputFormat() { +} + +// Ingestion settings for Azure Event Hubs. +type IngestionDataSourceSettings_AzureEventHubs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. An output-only field that indicates the state of the Event + // Hubs ingestion source. + State IngestionDataSourceSettings_AzureEventHubs_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.pubsub.v1.IngestionDataSourceSettings_AzureEventHubs_State" json:"state,omitempty"` + // Optional. Name of the resource group within the azure subscription. + ResourceGroup string `protobuf:"bytes,2,opt,name=resource_group,json=resourceGroup,proto3" json:"resource_group,omitempty"` + // Optional. The name of the Event Hubs namespace. + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + // Optional. The name of the Event Hub. + EventHub string `protobuf:"bytes,4,opt,name=event_hub,json=eventHub,proto3" json:"event_hub,omitempty"` + // Optional. The client id of the Azure application that is being used to + // authenticate Pub/Sub. + ClientId string `protobuf:"bytes,5,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + // Optional. The tenant id of the Azure application that is being used to + // authenticate Pub/Sub. + TenantId string `protobuf:"bytes,6,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + // Optional. The Azure subscription id. + SubscriptionId string `protobuf:"bytes,7,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` + // Optional. The GCP service account to be used for Federated Identity + // authentication. + GcpServiceAccount string `protobuf:"bytes,8,opt,name=gcp_service_account,json=gcpServiceAccount,proto3" json:"gcp_service_account,omitempty"` +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) Reset() { + *x = IngestionDataSourceSettings_AzureEventHubs{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_AzureEventHubs) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_AzureEventHubs) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_AzureEventHubs.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_AzureEventHubs) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 2} +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) GetState() IngestionDataSourceSettings_AzureEventHubs_State { + if x != nil { + return x.State + } + return IngestionDataSourceSettings_AzureEventHubs_STATE_UNSPECIFIED +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) GetResourceGroup() string { + if x != nil { + return x.ResourceGroup + } + return "" +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) GetEventHub() string { + if x != nil { + return x.EventHub + } + return "" +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) GetClientId() string { + if x != nil { + return x.ClientId + } + return "" +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) GetTenantId() string { + if x != nil { + return x.TenantId + } + return "" +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) GetSubscriptionId() string { + if x != nil { + return x.SubscriptionId + } + return "" +} + +func (x *IngestionDataSourceSettings_AzureEventHubs) GetGcpServiceAccount() string { + if x != nil { + return x.GcpServiceAccount + } + return "" +} + +// Ingestion settings for Amazon MSK. +type IngestionDataSourceSettings_AwsMsk struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. An output-only field that indicates the state of the Amazon + // MSK ingestion source. + State IngestionDataSourceSettings_AwsMsk_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.pubsub.v1.IngestionDataSourceSettings_AwsMsk_State" json:"state,omitempty"` + // Required. The Amazon Resource Name (ARN) that uniquely identifies the + // cluster. + ClusterArn string `protobuf:"bytes,2,opt,name=cluster_arn,json=clusterArn,proto3" json:"cluster_arn,omitempty"` + // Required. The name of the topic in the Amazon MSK cluster that Pub/Sub + // will import from. + Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` + // Required. AWS role ARN to be used for Federated Identity authentication + // with Amazon MSK. Check the Pub/Sub docs for how to set up this role and + // the required permissions that need to be attached to it. + AwsRoleArn string `protobuf:"bytes,4,opt,name=aws_role_arn,json=awsRoleArn,proto3" json:"aws_role_arn,omitempty"` + // Required. The GCP service account to be used for Federated Identity + // authentication with Amazon MSK (via a `AssumeRoleWithWebIdentity` call + // for the provided role). The `aws_role_arn` must be set up with + // `accounts.google.com:sub` equals to this service account number. + GcpServiceAccount string `protobuf:"bytes,5,opt,name=gcp_service_account,json=gcpServiceAccount,proto3" json:"gcp_service_account,omitempty"` +} + +func (x *IngestionDataSourceSettings_AwsMsk) Reset() { + *x = IngestionDataSourceSettings_AwsMsk{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionDataSourceSettings_AwsMsk) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_AwsMsk) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_AwsMsk) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[54] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_AwsMsk.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_AwsMsk) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 3} +} + +func (x *IngestionDataSourceSettings_AwsMsk) GetState() IngestionDataSourceSettings_AwsMsk_State { + if x != nil { + return x.State + } + return IngestionDataSourceSettings_AwsMsk_STATE_UNSPECIFIED +} + +func (x *IngestionDataSourceSettings_AwsMsk) GetClusterArn() string { + if x != nil { + return x.ClusterArn + } + return "" +} + +func (x *IngestionDataSourceSettings_AwsMsk) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *IngestionDataSourceSettings_AwsMsk) GetAwsRoleArn() string { + if x != nil { + return x.AwsRoleArn + } + return "" +} + +func (x *IngestionDataSourceSettings_AwsMsk) GetGcpServiceAccount() string { + if x != nil { + return x.GcpServiceAccount + } + return "" +} + +// Ingestion settings for Confluent Cloud. +type IngestionDataSourceSettings_ConfluentCloud struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. An output-only field that indicates the state of the + // Confluent Cloud ingestion source. + State IngestionDataSourceSettings_ConfluentCloud_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.pubsub.v1.IngestionDataSourceSettings_ConfluentCloud_State" json:"state,omitempty"` + // Required. The address of the bootstrap server. The format is url:port. + BootstrapServer string `protobuf:"bytes,2,opt,name=bootstrap_server,json=bootstrapServer,proto3" json:"bootstrap_server,omitempty"` + // Required. The id of the cluster. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. The name of the topic in the Confluent Cloud cluster that + // Pub/Sub will import from. + Topic string `protobuf:"bytes,4,opt,name=topic,proto3" json:"topic,omitempty"` + // Required. The id of the identity pool to be used for Federated Identity + // authentication with Confluent Cloud. See + // https://docs.confluent.io/cloud/current/security/authenticate/workload-identities/identity-providers/oauth/identity-pools.html#add-oauth-identity-pools. + IdentityPoolId string `protobuf:"bytes,5,opt,name=identity_pool_id,json=identityPoolId,proto3" json:"identity_pool_id,omitempty"` + // Required. The GCP service account to be used for Federated Identity + // authentication with `identity_pool_id`. + GcpServiceAccount string `protobuf:"bytes,6,opt,name=gcp_service_account,json=gcpServiceAccount,proto3" json:"gcp_service_account,omitempty"` +} + +func (x *IngestionDataSourceSettings_ConfluentCloud) Reset() { + *x = IngestionDataSourceSettings_ConfluentCloud{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionDataSourceSettings_ConfluentCloud) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_ConfluentCloud) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_ConfluentCloud) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_ConfluentCloud.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_ConfluentCloud) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 4} +} + +func (x *IngestionDataSourceSettings_ConfluentCloud) GetState() IngestionDataSourceSettings_ConfluentCloud_State { + if x != nil { + return x.State + } + return IngestionDataSourceSettings_ConfluentCloud_STATE_UNSPECIFIED +} + +func (x *IngestionDataSourceSettings_ConfluentCloud) GetBootstrapServer() string { + if x != nil { + return x.BootstrapServer + } + return "" +} + +func (x *IngestionDataSourceSettings_ConfluentCloud) GetClusterId() string { + if x != nil { + return x.ClusterId + } + return "" +} + +func (x *IngestionDataSourceSettings_ConfluentCloud) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *IngestionDataSourceSettings_ConfluentCloud) GetIdentityPoolId() string { + if x != nil { + return x.IdentityPoolId + } + return "" +} + +func (x *IngestionDataSourceSettings_ConfluentCloud) GetGcpServiceAccount() string { + if x != nil { + return x.GcpServiceAccount + } + return "" +} + +// Configuration for reading Cloud Storage data in text format. Each line of +// text as specified by the delimiter will be set to the `data` field of a +// Pub/Sub message. +type IngestionDataSourceSettings_CloudStorage_TextFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. When unset, '\n' is used. + Delimiter *string `protobuf:"bytes,1,opt,name=delimiter,proto3,oneof" json:"delimiter,omitempty"` +} + +func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) Reset() { + *x = IngestionDataSourceSettings_CloudStorage_TextFormat{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_CloudStorage_TextFormat) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_CloudStorage_TextFormat.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_CloudStorage_TextFormat) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 0} +} + +func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) GetDelimiter() string { + if x != nil && x.Delimiter != nil { + return *x.Delimiter + } + return "" +} + +// Configuration for reading Cloud Storage data in Avro binary format. The +// bytes of each object will be set to the `data` field of a Pub/Sub +// message. +type IngestionDataSourceSettings_CloudStorage_AvroFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionDataSourceSettings_CloudStorage_AvroFormat) Reset() { + *x = IngestionDataSourceSettings_CloudStorage_AvroFormat{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionDataSourceSettings_CloudStorage_AvroFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_CloudStorage_AvroFormat) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_CloudStorage_AvroFormat) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[57] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_CloudStorage_AvroFormat.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_CloudStorage_AvroFormat) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 1} +} + +// Configuration for reading Cloud Storage data written via [Cloud Storage +// subscriptions](https://cloud.google.com/pubsub/docs/cloudstorage). The +// data and attributes fields of the originally exported Pub/Sub message +// will be restored when publishing. +type IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) Reset() { + *x = IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[58] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 2} +} + +// Specifies the reason why some data may have been left out of +// the desired Pub/Sub message due to the API message limits +// (https://cloud.google.com/pubsub/quotas#resource_limits). For example, +// when the number of attributes is larger than 100, the number of +// attributes is truncated to 100 to respect the limit on the attribute count. +// Other attribute limits are treated similarly. When the size of the desired +// message would've been larger than 10MB, the message won't be published at +// all, and ingestion of the subsequent messages will proceed as normal. +type IngestionFailureEvent_ApiViolationReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionFailureEvent_ApiViolationReason) Reset() { + *x = IngestionFailureEvent_ApiViolationReason{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent_ApiViolationReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_ApiViolationReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_ApiViolationReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_ApiViolationReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_ApiViolationReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 0} +} + +// Set when an Avro file is unsupported or its format is not valid. When this +// occurs, one or more Avro objects won't be ingested. +type IngestionFailureEvent_AvroFailureReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionFailureEvent_AvroFailureReason) Reset() { + *x = IngestionFailureEvent_AvroFailureReason{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent_AvroFailureReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_AvroFailureReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_AvroFailureReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[60] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_AvroFailureReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_AvroFailureReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 1} +} + +// Set when a Pub/Sub message fails to get published due to a schema +// validation violation. +type IngestionFailureEvent_SchemaViolationReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionFailureEvent_SchemaViolationReason) Reset() { + *x = IngestionFailureEvent_SchemaViolationReason{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent_SchemaViolationReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_SchemaViolationReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_SchemaViolationReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[61] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_SchemaViolationReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_SchemaViolationReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 2} +} + +// Set when a Pub/Sub message fails to get published due to a message +// transformation error. +type IngestionFailureEvent_MessageTransformationFailureReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionFailureEvent_MessageTransformationFailureReason) Reset() { + *x = IngestionFailureEvent_MessageTransformationFailureReason{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent_MessageTransformationFailureReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_MessageTransformationFailureReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_MessageTransformationFailureReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[62] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_MessageTransformationFailureReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_MessageTransformationFailureReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 3} +} + +// Failure when ingesting from a Cloud Storage source. +type IngestionFailureEvent_CloudStorageFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Name of the Cloud Storage bucket used for ingestion. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Optional. Name of the Cloud Storage object which contained the section + // that couldn't be ingested. + ObjectName string `protobuf:"bytes,2,opt,name=object_name,json=objectName,proto3" json:"object_name,omitempty"` + // Optional. Generation of the Cloud Storage object which contained the + // section that couldn't be ingested. + ObjectGeneration int64 `protobuf:"varint,3,opt,name=object_generation,json=objectGeneration,proto3" json:"object_generation,omitempty"` + // Reason why ingestion failed for the specified object. + // + // Types that are assignable to Reason: + // + // *IngestionFailureEvent_CloudStorageFailure_AvroFailureReason + // *IngestionFailureEvent_CloudStorageFailure_ApiViolationReason + // *IngestionFailureEvent_CloudStorageFailure_SchemaViolationReason + // *IngestionFailureEvent_CloudStorageFailure_MessageTransformationFailureReason + Reason isIngestionFailureEvent_CloudStorageFailure_Reason `protobuf_oneof:"reason"` +} + +func (x *IngestionFailureEvent_CloudStorageFailure) Reset() { + *x = IngestionFailureEvent_CloudStorageFailure{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent_CloudStorageFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_CloudStorageFailure) ProtoMessage() {} + +func (x *IngestionFailureEvent_CloudStorageFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[63] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_CloudStorageFailure.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_CloudStorageFailure) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 4} +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetObjectName() string { + if x != nil { + return x.ObjectName + } + return "" +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetObjectGeneration() int64 { + if x != nil { + return x.ObjectGeneration + } + return 0 +} + +func (m *IngestionFailureEvent_CloudStorageFailure) GetReason() isIngestionFailureEvent_CloudStorageFailure_Reason { + if m != nil { + return m.Reason + } + return nil +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetAvroFailureReason() *IngestionFailureEvent_AvroFailureReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_CloudStorageFailure_AvroFailureReason); ok { + return x.AvroFailureReason + } + return nil +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetApiViolationReason() *IngestionFailureEvent_ApiViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_CloudStorageFailure_ApiViolationReason); ok { + return x.ApiViolationReason + } + return nil +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetSchemaViolationReason() *IngestionFailureEvent_SchemaViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_CloudStorageFailure_SchemaViolationReason); ok { + return x.SchemaViolationReason + } + return nil +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetMessageTransformationFailureReason() *IngestionFailureEvent_MessageTransformationFailureReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_CloudStorageFailure_MessageTransformationFailureReason); ok { + return x.MessageTransformationFailureReason + } + return nil +} + +type isIngestionFailureEvent_CloudStorageFailure_Reason interface { + isIngestionFailureEvent_CloudStorageFailure_Reason() +} + +type IngestionFailureEvent_CloudStorageFailure_AvroFailureReason struct { + // Optional. Failure encountered when parsing an Avro file. + AvroFailureReason *IngestionFailureEvent_AvroFailureReason `protobuf:"bytes,5,opt,name=avro_failure_reason,json=avroFailureReason,proto3,oneof"` +} + +type IngestionFailureEvent_CloudStorageFailure_ApiViolationReason struct { + // Optional. The Pub/Sub API limits prevented the desired message from + // being published. + ApiViolationReason *IngestionFailureEvent_ApiViolationReason `protobuf:"bytes,6,opt,name=api_violation_reason,json=apiViolationReason,proto3,oneof"` +} + +type IngestionFailureEvent_CloudStorageFailure_SchemaViolationReason struct { + // Optional. The Pub/Sub message failed schema validation. + SchemaViolationReason *IngestionFailureEvent_SchemaViolationReason `protobuf:"bytes,7,opt,name=schema_violation_reason,json=schemaViolationReason,proto3,oneof"` +} + +type IngestionFailureEvent_CloudStorageFailure_MessageTransformationFailureReason struct { + // Optional. Failure encountered when applying a message transformation to + // the Pub/Sub message. + MessageTransformationFailureReason *IngestionFailureEvent_MessageTransformationFailureReason `protobuf:"bytes,8,opt,name=message_transformation_failure_reason,json=messageTransformationFailureReason,proto3,oneof"` +} + +func (*IngestionFailureEvent_CloudStorageFailure_AvroFailureReason) isIngestionFailureEvent_CloudStorageFailure_Reason() { +} + +func (*IngestionFailureEvent_CloudStorageFailure_ApiViolationReason) isIngestionFailureEvent_CloudStorageFailure_Reason() { +} + +func (*IngestionFailureEvent_CloudStorageFailure_SchemaViolationReason) isIngestionFailureEvent_CloudStorageFailure_Reason() { +} + +func (*IngestionFailureEvent_CloudStorageFailure_MessageTransformationFailureReason) isIngestionFailureEvent_CloudStorageFailure_Reason() { +} + +// Failure when ingesting from an Amazon MSK source. +type IngestionFailureEvent_AwsMskFailureReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The ARN of the cluster of the topic being ingested from. + ClusterArn string `protobuf:"bytes,1,opt,name=cluster_arn,json=clusterArn,proto3" json:"cluster_arn,omitempty"` + // Optional. The name of the Kafka topic being ingested from. + KafkaTopic string `protobuf:"bytes,2,opt,name=kafka_topic,json=kafkaTopic,proto3" json:"kafka_topic,omitempty"` + // Optional. The partition ID of the message that failed to be ingested. + PartitionId int64 `protobuf:"varint,3,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"` + // Optional. The offset within the partition of the message that failed to + // be ingested. + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + // Reason why ingestion failed for the specified message. + // + // Types that are assignable to Reason: + // + // *IngestionFailureEvent_AwsMskFailureReason_ApiViolationReason + // *IngestionFailureEvent_AwsMskFailureReason_SchemaViolationReason + // *IngestionFailureEvent_AwsMskFailureReason_MessageTransformationFailureReason + Reason isIngestionFailureEvent_AwsMskFailureReason_Reason `protobuf_oneof:"reason"` +} + +func (x *IngestionFailureEvent_AwsMskFailureReason) Reset() { + *x = IngestionFailureEvent_AwsMskFailureReason{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent_AwsMskFailureReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_AwsMskFailureReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_AwsMskFailureReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[64] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_AwsMskFailureReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_AwsMskFailureReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 5} +} + +func (x *IngestionFailureEvent_AwsMskFailureReason) GetClusterArn() string { + if x != nil { + return x.ClusterArn + } + return "" +} + +func (x *IngestionFailureEvent_AwsMskFailureReason) GetKafkaTopic() string { + if x != nil { + return x.KafkaTopic + } + return "" +} + +func (x *IngestionFailureEvent_AwsMskFailureReason) GetPartitionId() int64 { + if x != nil { + return x.PartitionId + } + return 0 +} + +func (x *IngestionFailureEvent_AwsMskFailureReason) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (m *IngestionFailureEvent_AwsMskFailureReason) GetReason() isIngestionFailureEvent_AwsMskFailureReason_Reason { + if m != nil { + return m.Reason + } + return nil +} + +func (x *IngestionFailureEvent_AwsMskFailureReason) GetApiViolationReason() *IngestionFailureEvent_ApiViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_AwsMskFailureReason_ApiViolationReason); ok { + return x.ApiViolationReason + } + return nil +} + +func (x *IngestionFailureEvent_AwsMskFailureReason) GetSchemaViolationReason() *IngestionFailureEvent_SchemaViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_AwsMskFailureReason_SchemaViolationReason); ok { + return x.SchemaViolationReason + } + return nil +} + +func (x *IngestionFailureEvent_AwsMskFailureReason) GetMessageTransformationFailureReason() *IngestionFailureEvent_MessageTransformationFailureReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_AwsMskFailureReason_MessageTransformationFailureReason); ok { + return x.MessageTransformationFailureReason + } + return nil +} + +type isIngestionFailureEvent_AwsMskFailureReason_Reason interface { + isIngestionFailureEvent_AwsMskFailureReason_Reason() +} + +type IngestionFailureEvent_AwsMskFailureReason_ApiViolationReason struct { + // Optional. The Pub/Sub API limits prevented the desired message from + // being published. + ApiViolationReason *IngestionFailureEvent_ApiViolationReason `protobuf:"bytes,5,opt,name=api_violation_reason,json=apiViolationReason,proto3,oneof"` +} + +type IngestionFailureEvent_AwsMskFailureReason_SchemaViolationReason struct { + // Optional. The Pub/Sub message failed schema validation. + SchemaViolationReason *IngestionFailureEvent_SchemaViolationReason `protobuf:"bytes,6,opt,name=schema_violation_reason,json=schemaViolationReason,proto3,oneof"` +} + +type IngestionFailureEvent_AwsMskFailureReason_MessageTransformationFailureReason struct { + // Optional. Failure encountered when applying a message transformation to + // the Pub/Sub message. + MessageTransformationFailureReason *IngestionFailureEvent_MessageTransformationFailureReason `protobuf:"bytes,7,opt,name=message_transformation_failure_reason,json=messageTransformationFailureReason,proto3,oneof"` +} + +func (*IngestionFailureEvent_AwsMskFailureReason_ApiViolationReason) isIngestionFailureEvent_AwsMskFailureReason_Reason() { +} + +func (*IngestionFailureEvent_AwsMskFailureReason_SchemaViolationReason) isIngestionFailureEvent_AwsMskFailureReason_Reason() { +} + +func (*IngestionFailureEvent_AwsMskFailureReason_MessageTransformationFailureReason) isIngestionFailureEvent_AwsMskFailureReason_Reason() { +} + +// Failure when ingesting from an Azure Event Hubs source. +type IngestionFailureEvent_AzureEventHubsFailureReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The namespace containing the event hub being ingested from. + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + // Optional. The name of the event hub being ingested from. + EventHub string `protobuf:"bytes,2,opt,name=event_hub,json=eventHub,proto3" json:"event_hub,omitempty"` + // Optional. The partition ID of the message that failed to be ingested. + PartitionId int64 `protobuf:"varint,3,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"` + // Optional. The offset within the partition of the message that failed to + // be ingested. + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + // Reason why ingestion failed for the specified message. + // + // Types that are assignable to Reason: + // + // *IngestionFailureEvent_AzureEventHubsFailureReason_ApiViolationReason + // *IngestionFailureEvent_AzureEventHubsFailureReason_SchemaViolationReason + // *IngestionFailureEvent_AzureEventHubsFailureReason_MessageTransformationFailureReason + Reason isIngestionFailureEvent_AzureEventHubsFailureReason_Reason `protobuf_oneof:"reason"` +} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) Reset() { + *x = IngestionFailureEvent_AzureEventHubsFailureReason{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_AzureEventHubsFailureReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[65] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_AzureEventHubsFailureReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_AzureEventHubsFailureReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 6} +} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) GetEventHub() string { + if x != nil { + return x.EventHub + } + return "" +} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) GetPartitionId() int64 { + if x != nil { + return x.PartitionId + } + return 0 +} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (m *IngestionFailureEvent_AzureEventHubsFailureReason) GetReason() isIngestionFailureEvent_AzureEventHubsFailureReason_Reason { + if m != nil { + return m.Reason + } + return nil +} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) GetApiViolationReason() *IngestionFailureEvent_ApiViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_AzureEventHubsFailureReason_ApiViolationReason); ok { + return x.ApiViolationReason + } + return nil +} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) GetSchemaViolationReason() *IngestionFailureEvent_SchemaViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_AzureEventHubsFailureReason_SchemaViolationReason); ok { + return x.SchemaViolationReason + } + return nil +} + +func (x *IngestionFailureEvent_AzureEventHubsFailureReason) GetMessageTransformationFailureReason() *IngestionFailureEvent_MessageTransformationFailureReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_AzureEventHubsFailureReason_MessageTransformationFailureReason); ok { + return x.MessageTransformationFailureReason + } + return nil +} + +type isIngestionFailureEvent_AzureEventHubsFailureReason_Reason interface { + isIngestionFailureEvent_AzureEventHubsFailureReason_Reason() +} + +type IngestionFailureEvent_AzureEventHubsFailureReason_ApiViolationReason struct { + // Optional. The Pub/Sub API limits prevented the desired message from + // being published. + ApiViolationReason *IngestionFailureEvent_ApiViolationReason `protobuf:"bytes,5,opt,name=api_violation_reason,json=apiViolationReason,proto3,oneof"` +} + +type IngestionFailureEvent_AzureEventHubsFailureReason_SchemaViolationReason struct { + // Optional. The Pub/Sub message failed schema validation. + SchemaViolationReason *IngestionFailureEvent_SchemaViolationReason `protobuf:"bytes,6,opt,name=schema_violation_reason,json=schemaViolationReason,proto3,oneof"` +} + +type IngestionFailureEvent_AzureEventHubsFailureReason_MessageTransformationFailureReason struct { + // Optional. Failure encountered when applying a message transformation to + // the Pub/Sub message. + MessageTransformationFailureReason *IngestionFailureEvent_MessageTransformationFailureReason `protobuf:"bytes,7,opt,name=message_transformation_failure_reason,json=messageTransformationFailureReason,proto3,oneof"` +} + +func (*IngestionFailureEvent_AzureEventHubsFailureReason_ApiViolationReason) isIngestionFailureEvent_AzureEventHubsFailureReason_Reason() { +} + +func (*IngestionFailureEvent_AzureEventHubsFailureReason_SchemaViolationReason) isIngestionFailureEvent_AzureEventHubsFailureReason_Reason() { +} + +func (*IngestionFailureEvent_AzureEventHubsFailureReason_MessageTransformationFailureReason) isIngestionFailureEvent_AzureEventHubsFailureReason_Reason() { +} + +// Failure when ingesting from a Confluent Cloud source. +type IngestionFailureEvent_ConfluentCloudFailureReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The cluster ID containing the topic being ingested from. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Optional. The name of the Kafka topic being ingested from. + KafkaTopic string `protobuf:"bytes,2,opt,name=kafka_topic,json=kafkaTopic,proto3" json:"kafka_topic,omitempty"` + // Optional. The partition ID of the message that failed to be ingested. + PartitionId int64 `protobuf:"varint,3,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"` + // Optional. The offset within the partition of the message that failed to + // be ingested. + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + // Reason why ingestion failed for the specified message. + // + // Types that are assignable to Reason: + // + // *IngestionFailureEvent_ConfluentCloudFailureReason_ApiViolationReason + // *IngestionFailureEvent_ConfluentCloudFailureReason_SchemaViolationReason + // *IngestionFailureEvent_ConfluentCloudFailureReason_MessageTransformationFailureReason + Reason isIngestionFailureEvent_ConfluentCloudFailureReason_Reason `protobuf_oneof:"reason"` +} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) Reset() { + *x = IngestionFailureEvent_ConfluentCloudFailureReason{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_ConfluentCloudFailureReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[66] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_ConfluentCloudFailureReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_ConfluentCloudFailureReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 7} +} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) GetClusterId() string { + if x != nil { + return x.ClusterId + } + return "" +} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) GetKafkaTopic() string { + if x != nil { + return x.KafkaTopic + } + return "" +} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) GetPartitionId() int64 { + if x != nil { + return x.PartitionId + } + return 0 +} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (m *IngestionFailureEvent_ConfluentCloudFailureReason) GetReason() isIngestionFailureEvent_ConfluentCloudFailureReason_Reason { + if m != nil { + return m.Reason + } + return nil +} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) GetApiViolationReason() *IngestionFailureEvent_ApiViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_ConfluentCloudFailureReason_ApiViolationReason); ok { + return x.ApiViolationReason + } + return nil +} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) GetSchemaViolationReason() *IngestionFailureEvent_SchemaViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_ConfluentCloudFailureReason_SchemaViolationReason); ok { + return x.SchemaViolationReason + } + return nil +} + +func (x *IngestionFailureEvent_ConfluentCloudFailureReason) GetMessageTransformationFailureReason() *IngestionFailureEvent_MessageTransformationFailureReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_ConfluentCloudFailureReason_MessageTransformationFailureReason); ok { + return x.MessageTransformationFailureReason + } + return nil +} + +type isIngestionFailureEvent_ConfluentCloudFailureReason_Reason interface { + isIngestionFailureEvent_ConfluentCloudFailureReason_Reason() +} + +type IngestionFailureEvent_ConfluentCloudFailureReason_ApiViolationReason struct { + // Optional. The Pub/Sub API limits prevented the desired message from + // being published. + ApiViolationReason *IngestionFailureEvent_ApiViolationReason `protobuf:"bytes,5,opt,name=api_violation_reason,json=apiViolationReason,proto3,oneof"` +} + +type IngestionFailureEvent_ConfluentCloudFailureReason_SchemaViolationReason struct { + // Optional. The Pub/Sub message failed schema validation. + SchemaViolationReason *IngestionFailureEvent_SchemaViolationReason `protobuf:"bytes,6,opt,name=schema_violation_reason,json=schemaViolationReason,proto3,oneof"` +} + +type IngestionFailureEvent_ConfluentCloudFailureReason_MessageTransformationFailureReason struct { + // Optional. Failure encountered when applying a message transformation to + // the Pub/Sub message. + MessageTransformationFailureReason *IngestionFailureEvent_MessageTransformationFailureReason `protobuf:"bytes,7,opt,name=message_transformation_failure_reason,json=messageTransformationFailureReason,proto3,oneof"` +} + +func (*IngestionFailureEvent_ConfluentCloudFailureReason_ApiViolationReason) isIngestionFailureEvent_ConfluentCloudFailureReason_Reason() { +} + +func (*IngestionFailureEvent_ConfluentCloudFailureReason_SchemaViolationReason) isIngestionFailureEvent_ConfluentCloudFailureReason_Reason() { +} + +func (*IngestionFailureEvent_ConfluentCloudFailureReason_MessageTransformationFailureReason) isIngestionFailureEvent_ConfluentCloudFailureReason_Reason() { +} + +// Failure when ingesting from an AWS Kinesis source. +type IngestionFailureEvent_AwsKinesisFailureReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The stream ARN of the Kinesis stream being ingested from. + StreamArn string `protobuf:"bytes,1,opt,name=stream_arn,json=streamArn,proto3" json:"stream_arn,omitempty"` + // Optional. The partition key of the message that failed to be ingested. + PartitionKey string `protobuf:"bytes,2,opt,name=partition_key,json=partitionKey,proto3" json:"partition_key,omitempty"` + // Optional. The sequence number of the message that failed to be ingested. + SequenceNumber string `protobuf:"bytes,3,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` + // Reason why ingestion failed for the specified message. + // + // Types that are assignable to Reason: + // + // *IngestionFailureEvent_AwsKinesisFailureReason_SchemaViolationReason + // *IngestionFailureEvent_AwsKinesisFailureReason_MessageTransformationFailureReason + Reason isIngestionFailureEvent_AwsKinesisFailureReason_Reason `protobuf_oneof:"reason"` +} + +func (x *IngestionFailureEvent_AwsKinesisFailureReason) Reset() { + *x = IngestionFailureEvent_AwsKinesisFailureReason{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IngestionFailureEvent_AwsKinesisFailureReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_AwsKinesisFailureReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_AwsKinesisFailureReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[67] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_AwsKinesisFailureReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_AwsKinesisFailureReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 8} +} + +func (x *IngestionFailureEvent_AwsKinesisFailureReason) GetStreamArn() string { + if x != nil { + return x.StreamArn + } + return "" +} + +func (x *IngestionFailureEvent_AwsKinesisFailureReason) GetPartitionKey() string { + if x != nil { + return x.PartitionKey + } + return "" +} + +func (x *IngestionFailureEvent_AwsKinesisFailureReason) GetSequenceNumber() string { + if x != nil { + return x.SequenceNumber + } + return "" +} + +func (m *IngestionFailureEvent_AwsKinesisFailureReason) GetReason() isIngestionFailureEvent_AwsKinesisFailureReason_Reason { + if m != nil { + return m.Reason + } + return nil +} + +func (x *IngestionFailureEvent_AwsKinesisFailureReason) GetSchemaViolationReason() *IngestionFailureEvent_SchemaViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_AwsKinesisFailureReason_SchemaViolationReason); ok { + return x.SchemaViolationReason + } + return nil +} + +func (x *IngestionFailureEvent_AwsKinesisFailureReason) GetMessageTransformationFailureReason() *IngestionFailureEvent_MessageTransformationFailureReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_AwsKinesisFailureReason_MessageTransformationFailureReason); ok { + return x.MessageTransformationFailureReason + } + return nil +} + +type isIngestionFailureEvent_AwsKinesisFailureReason_Reason interface { + isIngestionFailureEvent_AwsKinesisFailureReason_Reason() +} + +type IngestionFailureEvent_AwsKinesisFailureReason_SchemaViolationReason struct { + // Optional. The Pub/Sub message failed schema validation. + SchemaViolationReason *IngestionFailureEvent_SchemaViolationReason `protobuf:"bytes,4,opt,name=schema_violation_reason,json=schemaViolationReason,proto3,oneof"` +} + +type IngestionFailureEvent_AwsKinesisFailureReason_MessageTransformationFailureReason struct { + // Optional. Failure encountered when applying a message transformation to + // the Pub/Sub message. + MessageTransformationFailureReason *IngestionFailureEvent_MessageTransformationFailureReason `protobuf:"bytes,5,opt,name=message_transformation_failure_reason,json=messageTransformationFailureReason,proto3,oneof"` +} + +func (*IngestionFailureEvent_AwsKinesisFailureReason_SchemaViolationReason) isIngestionFailureEvent_AwsKinesisFailureReason_Reason() { +} + +func (*IngestionFailureEvent_AwsKinesisFailureReason_MessageTransformationFailureReason) isIngestionFailureEvent_AwsKinesisFailureReason_Reason() { +} + +// Information about an associated [Analytics Hub +// subscription](https://cloud.google.com/bigquery/docs/analytics-hub-manage-subscriptions). +type Subscription_AnalyticsHubSubscriptionInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The name of the associated Analytics Hub listing resource. + // Pattern: + // "projects/{project}/locations/{location}/dataExchanges/{data_exchange}/listings/{listing}" + Listing string `protobuf:"bytes,1,opt,name=listing,proto3" json:"listing,omitempty"` + // Optional. The name of the associated Analytics Hub subscription resource. + // Pattern: + // "projects/{project}/locations/{location}/subscriptions/{subscription}" + Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` +} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) Reset() { + *x = Subscription_AnalyticsHubSubscriptionInfo{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Subscription_AnalyticsHubSubscriptionInfo) ProtoMessage() {} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[70] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Subscription_AnalyticsHubSubscriptionInfo.ProtoReflect.Descriptor instead. +func (*Subscription_AnalyticsHubSubscriptionInfo) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 0} +} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) GetListing() string { + if x != nil { + return x.Listing + } + return "" +} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Contains information needed for generating an +// [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +type PushConfig_OidcToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. [Service account + // email](https://cloud.google.com/iam/docs/service-accounts) + // used for generating the OIDC token. For more information + // on setting up authentication, see + // [Push subscriptions](https://cloud.google.com/pubsub/docs/push). + ServiceAccountEmail string `protobuf:"bytes,1,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // Optional. Audience to be used when generating OIDC token. The audience + // claim identifies the recipients that the JWT is intended for. The + // audience value is a single case-sensitive string. Having multiple values + // (array) for the audience field is not supported. More info about the OIDC + // JWT token audience here: + // https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not specified, + // the Push endpoint URL will be used. + Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"` +} + +func (x *PushConfig_OidcToken) Reset() { + *x = PushConfig_OidcToken{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PushConfig_OidcToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushConfig_OidcToken) ProtoMessage() {} + +func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[72] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushConfig_OidcToken.ProtoReflect.Descriptor instead. +func (*PushConfig_OidcToken) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26, 0} +} + +func (x *PushConfig_OidcToken) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +func (x *PushConfig_OidcToken) GetAudience() string { + if x != nil { + return x.Audience + } + return "" +} + +// The payload to the push endpoint is in the form of the JSON representation +// of a PubsubMessage +// (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). +type PushConfig_PubsubWrapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PushConfig_PubsubWrapper) Reset() { + *x = PushConfig_PubsubWrapper{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PushConfig_PubsubWrapper) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushConfig_PubsubWrapper) ProtoMessage() {} + +func (x *PushConfig_PubsubWrapper) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[73] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushConfig_PubsubWrapper.ProtoReflect.Descriptor instead. +func (*PushConfig_PubsubWrapper) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26, 1} +} + +// Sets the `data` field as the HTTP body for delivery. +type PushConfig_NoWrapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. When true, writes the Pub/Sub message metadata to + // `x-goog-pubsub-:` headers of the HTTP request. Writes the + // Pub/Sub message attributes to `:` headers of the HTTP request. + WriteMetadata bool `protobuf:"varint,1,opt,name=write_metadata,json=writeMetadata,proto3" json:"write_metadata,omitempty"` +} + +func (x *PushConfig_NoWrapper) Reset() { + *x = PushConfig_NoWrapper{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PushConfig_NoWrapper) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushConfig_NoWrapper) ProtoMessage() {} + +func (x *PushConfig_NoWrapper) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[74] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushConfig_NoWrapper.ProtoReflect.Descriptor instead. +func (*PushConfig_NoWrapper) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26, 2} +} + +func (x *PushConfig_NoWrapper) GetWriteMetadata() bool { + if x != nil { + return x.WriteMetadata + } + return false +} + +// Configuration for writing message data in text format. +// Message payloads will be written to files as raw text, separated by a +// newline. +type CloudStorageConfig_TextConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CloudStorageConfig_TextConfig) Reset() { + *x = CloudStorageConfig_TextConfig{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CloudStorageConfig_TextConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudStorageConfig_TextConfig) ProtoMessage() {} + +func (x *CloudStorageConfig_TextConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[76] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudStorageConfig_TextConfig.ProtoReflect.Descriptor instead. +func (*CloudStorageConfig_TextConfig) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28, 0} +} + +// Configuration for writing message data in Avro format. +// Message payloads and metadata will be written to files as an Avro binary. +type CloudStorageConfig_AvroConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. When true, write the subscription name, message_id, + // publish_time, attributes, and ordering_key as additional fields in the + // output. The subscription name, message_id, and publish_time fields are + // put in their own fields while all other message properties other than + // data (for example, an ordering_key, if present) are added as entries in + // the attributes map. + WriteMetadata bool `protobuf:"varint,1,opt,name=write_metadata,json=writeMetadata,proto3" json:"write_metadata,omitempty"` + // Optional. When true, the output Cloud Storage file will be serialized + // using the topic schema, if it exists. + UseTopicSchema bool `protobuf:"varint,2,opt,name=use_topic_schema,json=useTopicSchema,proto3" json:"use_topic_schema,omitempty"` +} + +func (x *CloudStorageConfig_AvroConfig) Reset() { + *x = CloudStorageConfig_AvroConfig{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CloudStorageConfig_AvroConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudStorageConfig_AvroConfig) ProtoMessage() {} + +func (x *CloudStorageConfig_AvroConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[77] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudStorageConfig_AvroConfig.ProtoReflect.Descriptor instead. +func (*CloudStorageConfig_AvroConfig) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28, 1} +} + +func (x *CloudStorageConfig_AvroConfig) GetWriteMetadata() bool { + if x != nil { + return x.WriteMetadata + } + return false +} + +func (x *CloudStorageConfig_AvroConfig) GetUseTopicSchema() bool { + if x != nil { + return x.UseTopicSchema + } + return false +} + +// Acknowledgment IDs sent in one or more previous requests to acknowledge a +// previously received message. +type StreamingPullResponse_AcknowledgeConfirmation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Successfully processed acknowledgment IDs. + AckIds []string `protobuf:"bytes,1,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` + // Optional. List of acknowledgment IDs that were malformed or whose + // acknowledgment deadline has expired. + InvalidAckIds []string `protobuf:"bytes,2,rep,name=invalid_ack_ids,json=invalidAckIds,proto3" json:"invalid_ack_ids,omitempty"` + // Optional. List of acknowledgment IDs that were out of order. + UnorderedAckIds []string `protobuf:"bytes,3,rep,name=unordered_ack_ids,json=unorderedAckIds,proto3" json:"unordered_ack_ids,omitempty"` + // Optional. List of acknowledgment IDs that failed processing with + // temporary issues. + TemporaryFailedAckIds []string `protobuf:"bytes,4,rep,name=temporary_failed_ack_ids,json=temporaryFailedAckIds,proto3" json:"temporary_failed_ack_ids,omitempty"` +} + +func (x *StreamingPullResponse_AcknowledgeConfirmation) Reset() { + *x = StreamingPullResponse_AcknowledgeConfirmation{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamingPullResponse_AcknowledgeConfirmation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingPullResponse_AcknowledgeConfirmation) ProtoMessage() {} + +func (x *StreamingPullResponse_AcknowledgeConfirmation) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[78] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingPullResponse_AcknowledgeConfirmation.ProtoReflect.Descriptor instead. +func (*StreamingPullResponse_AcknowledgeConfirmation) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41, 0} +} + +func (x *StreamingPullResponse_AcknowledgeConfirmation) GetAckIds() []string { + if x != nil { + return x.AckIds + } + return nil +} + +func (x *StreamingPullResponse_AcknowledgeConfirmation) GetInvalidAckIds() []string { + if x != nil { + return x.InvalidAckIds + } + return nil +} + +func (x *StreamingPullResponse_AcknowledgeConfirmation) GetUnorderedAckIds() []string { + if x != nil { + return x.UnorderedAckIds + } + return nil +} + +func (x *StreamingPullResponse_AcknowledgeConfirmation) GetTemporaryFailedAckIds() []string { + if x != nil { + return x.TemporaryFailedAckIds + } + return nil +} + +// Acknowledgment IDs sent in one or more previous requests to modify the +// deadline for a specific message. +type StreamingPullResponse_ModifyAckDeadlineConfirmation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Successfully processed acknowledgment IDs. + AckIds []string `protobuf:"bytes,1,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` + // Optional. List of acknowledgment IDs that were malformed or whose + // acknowledgment deadline has expired. + InvalidAckIds []string `protobuf:"bytes,2,rep,name=invalid_ack_ids,json=invalidAckIds,proto3" json:"invalid_ack_ids,omitempty"` + // Optional. List of acknowledgment IDs that failed processing with + // temporary issues. + TemporaryFailedAckIds []string `protobuf:"bytes,3,rep,name=temporary_failed_ack_ids,json=temporaryFailedAckIds,proto3" json:"temporary_failed_ack_ids,omitempty"` +} + +func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) Reset() { + *x = StreamingPullResponse_ModifyAckDeadlineConfirmation{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoMessage() {} + +func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[79] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingPullResponse_ModifyAckDeadlineConfirmation.ProtoReflect.Descriptor instead. +func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41, 1} +} + +func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) GetAckIds() []string { + if x != nil { + return x.AckIds + } + return nil +} + +func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) GetInvalidAckIds() []string { + if x != nil { + return x.InvalidAckIds + } + return nil +} + +func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) GetTemporaryFailedAckIds() []string { + if x != nil { + return x.TemporaryFailedAckIds + } + return nil +} + +// Subscription properties sent as part of the response. +type StreamingPullResponse_SubscriptionProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. True iff exactly once delivery is enabled for this + // subscription. + ExactlyOnceDeliveryEnabled bool `protobuf:"varint,1,opt,name=exactly_once_delivery_enabled,json=exactlyOnceDeliveryEnabled,proto3" json:"exactly_once_delivery_enabled,omitempty"` + // Optional. True iff message ordering is enabled for this subscription. + MessageOrderingEnabled bool `protobuf:"varint,2,opt,name=message_ordering_enabled,json=messageOrderingEnabled,proto3" json:"message_ordering_enabled,omitempty"` +} + +func (x *StreamingPullResponse_SubscriptionProperties) Reset() { + *x = StreamingPullResponse_SubscriptionProperties{} + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamingPullResponse_SubscriptionProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingPullResponse_SubscriptionProperties) ProtoMessage() {} + +func (x *StreamingPullResponse_SubscriptionProperties) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[80] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingPullResponse_SubscriptionProperties.ProtoReflect.Descriptor instead. +func (*StreamingPullResponse_SubscriptionProperties) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41, 2} +} + +func (x *StreamingPullResponse_SubscriptionProperties) GetExactlyOnceDeliveryEnabled() bool { + if x != nil { + return x.ExactlyOnceDeliveryEnabled + } + return false +} + +func (x *StreamingPullResponse_SubscriptionProperties) GetMessageOrderingEnabled() bool { + if x != nil { + return x.MessageOrderingEnabled + } + return false +} + +var File_google_pubsub_v1_pubsub_proto protoreflect.FileDescriptor + +var file_google_pubsub_v1_pubsub_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x10, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x01, 0x0a, 0x14, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x1b, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x50, + 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x31, 0x0a, 0x12, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x5f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x69, 0x74, 0x22, 0xeb, 0x01, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1e, 0x0a, + 0x1c, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3b, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x11, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x0f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x22, 0xaf, 0x1b, 0x0a, 0x1b, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x60, 0x0a, 0x0b, 0x61, 0x77, 0x73, 0x5f, 0x6b, 0x69, 0x6e, 0x65, 0x73, 0x69, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, + 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x77, 0x73, 0x4b, 0x69, 0x6e, + 0x65, 0x73, 0x69, 0x73, 0x12, 0x66, 0x0a, 0x0d, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0c, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x6d, 0x0a, 0x10, + 0x61, 0x7a, 0x75, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x75, 0x62, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x41, 0x7a, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x48, 0x75, 0x62, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x7a, 0x75, + 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x75, 0x62, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x61, + 0x77, 0x73, 0x5f, 0x6d, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x41, 0x77, 0x73, 0x4d, + 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x06, 0x61, 0x77, 0x73, 0x4d, 0x73, + 0x6b, 0x12, 0x6c, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x75, 0x65, 0x6e, 0x74, 0x5f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x75, + 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, + 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x75, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x12, + 0x61, 0x0a, 0x16, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x73, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x1a, 0xa8, 0x03, 0x0a, 0x0a, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, + 0x73, 0x12, 0x59, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0a, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x72, 0x6e, + 0x12, 0x26, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x72, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6d, 0x65, 0x72, 0x41, 0x72, 0x6e, 0x12, 0x25, 0x0a, 0x0c, 0x61, 0x77, 0x73, 0x5f, + 0x72, 0x6f, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x61, 0x77, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x41, 0x72, 0x6e, 0x12, + 0x33, 0x0a, 0x13, 0x67, 0x63, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x11, 0x67, 0x63, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, + 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, + 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x4b, 0x49, 0x4e, 0x45, 0x53, 0x49, 0x53, 0x5f, 0x50, 0x45, 0x52, + 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, + 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45, 0x52, 0x4d, + 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x14, 0x0a, 0x10, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, + 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x1a, 0xfe, 0x06, + 0x0a, 0x0c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x5b, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x6d, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, + 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x6d, 0x0a, 0x0b, 0x61, 0x76, 0x72, 0x6f, 0x5f, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x76, 0x72, 0x6f, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x80, 0x01, 0x0a, 0x12, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x5f, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x10, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x41, + 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x17, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x1a, 0x42, 0x0a, 0x0a, 0x54, + 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x26, 0x0a, 0x09, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x48, 0x00, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x88, 0x01, + 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, + 0x0c, 0x0a, 0x0a, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x12, 0x0a, + 0x10, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x22, 0x9a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, + 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, + 0x0a, 0x1f, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, + 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, + 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, + 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x54, + 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x4f, 0x4f, 0x5f, + 0x4d, 0x41, 0x4e, 0x59, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x10, 0x05, 0x42, 0x0e, + 0x0a, 0x0c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0xe1, + 0x04, 0x0a, 0x0e, 0x41, 0x7a, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x75, 0x62, + 0x73, 0x12, 0x5d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x41, 0x7a, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x75, 0x62, 0x73, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x2a, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x21, 0x0a, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x20, 0x0a, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x75, 0x62, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x75, + 0x62, 0x12, 0x20, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x74, 0x65, 0x6e, + 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x13, 0x67, 0x63, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x67, 0x63, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, + 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x20, 0x0a, 0x1c, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x48, + 0x55, 0x42, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, + 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, + 0x53, 0x48, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, + 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x50, + 0x41, 0x43, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, + 0x17, 0x0a, 0x13, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x55, 0x42, 0x5f, 0x4e, 0x4f, 0x54, + 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x55, 0x42, 0x53, + 0x43, 0x52, 0x49, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, + 0x4e, 0x44, 0x10, 0x06, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, + 0x10, 0x07, 0x1a, 0x8f, 0x03, 0x0a, 0x06, 0x41, 0x77, 0x73, 0x4d, 0x73, 0x6b, 0x12, 0x55, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x41, 0x77, 0x73, 0x4d, + 0x73, 0x6b, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x61, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x72, 0x6e, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x25, 0x0a, 0x0c, 0x61, 0x77, 0x73, 0x5f, 0x72, 0x6f, 0x6c, + 0x65, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0a, 0x61, 0x77, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x41, 0x72, 0x6e, 0x12, 0x33, 0x0a, 0x13, + 0x67, 0x63, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, + 0x67, 0x63, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x90, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, + 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x19, + 0x0a, 0x15, 0x4d, 0x53, 0x4b, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, + 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, + 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4c, 0x55, 0x53, + 0x54, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, + 0x13, 0x0a, 0x0f, 0x54, 0x4f, 0x50, 0x49, 0x43, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, + 0x4e, 0x44, 0x10, 0x05, 0x1a, 0x83, 0x04, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x75, 0x65, + 0x6e, 0x74, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x5d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x75, 0x65, 0x6e, 0x74, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2e, 0x0a, 0x10, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2d, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, + 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x13, 0x67, 0x63, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x67, 0x63, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xbe, 0x01, 0x0a, 0x05, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, + 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x25, 0x0a, 0x21, 0x43, 0x4f, 0x4e, 0x46, 0x4c, 0x55, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, + 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, + 0x55, 0x4e, 0x52, 0x45, 0x41, 0x43, 0x48, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x54, + 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x04, 0x12, 0x15, + 0x0a, 0x11, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4f, 0x50, 0x49, 0x43, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x06, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x50, 0x0a, + 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x22, + 0x5f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, + 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x02, 0x12, 0x08, + 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, + 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, + 0x22, 0x90, 0x1d, 0x0a, 0x15, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x76, 0x0a, 0x15, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, + 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x48, 0x00, 0x52, 0x13, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x6a, 0x0a, 0x0f, 0x61, 0x77, 0x73, 0x5f, 0x6d, + 0x73, 0x6b, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x77, 0x73, 0x4d, 0x73, 0x6b, + 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x77, 0x73, 0x4d, 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x18, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x5f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x75, 0x62, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x41, 0x7a, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x75, 0x62, 0x73, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x48, 0x00, 0x52, 0x15, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x75, + 0x62, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x17, 0x63, 0x6f, + 0x6e, 0x66, 0x6c, 0x75, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x66, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x75, 0x65, 0x6e, 0x74, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x15, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x75, 0x65, + 0x6e, 0x74, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x76, + 0x0a, 0x13, 0x61, 0x77, 0x73, 0x5f, 0x6b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x5f, 0x66, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x48, 0x00, 0x52, 0x11, 0x61, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x1a, 0x14, 0x0a, 0x12, 0x41, 0x70, 0x69, 0x56, 0x69, 0x6f, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x13, 0x0a, 0x11, + 0x41, 0x76, 0x72, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x1a, 0x17, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x69, 0x6f, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x24, 0x0a, 0x22, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x1a, 0xa0, 0x05, 0x0a, 0x13, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x11, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x70, 0x0a, + 0x13, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x11, 0x61, 0x76, + 0x72, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x73, 0x0a, 0x14, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, + 0x52, 0x12, 0x61, 0x70, 0x69, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x7c, 0x0a, 0x17, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, + 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x15, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0xa4, 0x01, 0x0a, 0x25, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x1a, 0xca, 0x04, 0x0a, 0x13, 0x41, 0x77, 0x73, 0x4d, 0x73, 0x6b, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0b, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x72, + 0x6e, 0x12, 0x24, 0x0a, 0x0b, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, 0x61, 0x66, + 0x6b, 0x61, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, + 0x1b, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x73, 0x0a, 0x14, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x12, 0x61, + 0x70, 0x69, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x12, 0x7c, 0x0a, 0x17, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x69, 0x6f, 0x6c, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x15, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0xa4, 0x01, 0x0a, 0x25, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, + 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x48, 0x00, 0x52, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x1a, 0xcb, 0x04, 0x0a, 0x1b, 0x41, 0x7a, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, + 0x75, 0x62, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x21, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x75, 0x62, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x48, 0x75, 0x62, 0x12, 0x26, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x73, 0x0a, 0x14, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, + 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x12, 0x61, 0x70, 0x69, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x7c, 0x0a, 0x17, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x15, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x69, + 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0xa4, 0x01, + 0x0a, 0x25, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, + 0x52, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0xd0, + 0x04, 0x0a, 0x1b, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x75, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x22, + 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0b, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x5f, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, 0x61, + 0x66, 0x6b, 0x61, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x73, 0x0a, + 0x14, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x12, + 0x61, 0x70, 0x69, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0x7c, 0x0a, 0x17, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x69, 0x6f, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, + 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x15, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0xa4, 0x01, 0x0a, 0x25, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x48, 0x00, 0x52, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x1a, 0xc4, 0x03, 0x0a, 0x17, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, + 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x22, 0x0a, + 0x0a, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x72, + 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x70, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x0f, 0x73, + 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x7c, 0x0a, 0x17, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, + 0x52, 0x15, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0xa4, 0x01, 0x0a, 0x25, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x22, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x08, + 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x22, 0x52, 0x0a, 0x0d, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x55, 0x44, 0x46, 0x12, 0x28, 0x0a, 0x0d, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0c, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x17, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0xb0, 0x01, 0x0a, 0x10, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x4d, 0x0a, 0x0e, + 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x5f, 0x75, 0x64, 0x66, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x55, 0x44, 0x46, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x61, + 0x76, 0x61, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x64, 0x66, 0x12, 0x1f, 0x0a, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x05, 0xe0, 0x41, + 0x01, 0x18, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x08, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x0b, 0x0a, + 0x09, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0xb9, 0x07, 0x0a, 0x05, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x61, 0x0a, 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x25, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, + 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x61, 0x74, + 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, + 0x50, 0x7a, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x38, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x69, + 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, + 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x56, 0x0a, 0x12, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x0b, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x48, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, + 0x45, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x49, 0x4e, 0x47, 0x45, 0x53, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, + 0x02, 0x3a, 0x63, 0xea, 0x41, 0x60, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x7b, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x7d, 0x12, 0x0f, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x2d, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x2a, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x32, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0xc3, 0x02, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x54, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, + 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, + 0x68, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x1a, 0x3d, 0x0a, + 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0f, + 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x8a, 0x01, 0x0a, 0x12, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x8d, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, + 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x73, + 0x22, 0xa8, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, + 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, + 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x77, 0x0a, 0x12, 0x4c, + 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x34, 0x0a, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa0, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9f, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0d, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x0f, + 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, + 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x4c, 0x69, + 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6c, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4f, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x6b, 0x0a, 0x19, 0x44, 0x65, 0x74, 0x61, 0x63, + 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x85, 0x0e, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0a, 0x70, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, 0x62, + 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x62, 0x69, 0x67, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x14, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, + 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x61, 0x63, 0x6b, + 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, + 0x37, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x5f, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x13, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x41, 0x63, 0x6b, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x3b, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x11, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x55, 0x0a, 0x12, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1f, 0x0a, + 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, 0x44, + 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, + 0x5f, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x45, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, + 0x76, 0x65, 0x72, 0x79, 0x12, 0x67, 0x0a, 0x20, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1d, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x87, + 0x01, 0x0a, 0x1f, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x5f, 0x68, 0x75, 0x62, + 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, + 0x63, 0x73, 0x48, 0x75, 0x62, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1c, 0x61, 0x6e, 0x61, 0x6c, + 0x79, 0x74, 0x69, 0x63, 0x73, 0x48, 0x75, 0x62, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x56, 0x0a, 0x12, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x19, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x73, + 0x1a, 0x66, 0x0a, 0x1c, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x48, 0x75, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x1d, 0x0a, 0x07, 0x6c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, + 0x27, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x3e, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x02, 0x3a, 0x75, 0xea, 0x41, 0x72, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2a, 0x0d, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x9f, 0x01, 0x0a, 0x0b, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, + 0x6f, 0x66, 0x66, 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, + 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x22, 0x7c, 0x0a, 0x10, + 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x2f, 0x0a, 0x11, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x0f, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, + 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, + 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x22, 0x44, 0x0a, 0x10, 0x45, 0x78, + 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x22, 0x93, 0x05, 0x0a, 0x0a, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x28, 0x0a, 0x0d, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x70, 0x75, 0x73, + 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x0a, + 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, + 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, + 0x09, 0x6f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x58, 0x0a, 0x0e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x0a, 0x6e, 0x6f, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x09, 0x6e, 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x72, 0x1a, 0x65, 0x0a, 0x09, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x1a, 0x0f, 0x0a, 0x0d, 0x50, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x09, 0x4e, 0x6f, + 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x09, 0x0a, 0x07, 0x77, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x22, 0xf3, 0x03, 0x0a, 0x0e, 0x42, 0x69, 0x67, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x33, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, + 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, + 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, + 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, + 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x04, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e, 0x5f, 0x54, 0x52, + 0x41, 0x4e, 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, + 0x45, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x22, 0xa0, 0x07, 0x0a, + 0x12, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, + 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2c, + 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, + 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x66, 0x69, + 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x3d, 0x0a, 0x18, + 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x44, 0x61, 0x74, + 0x65, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x57, 0x0a, 0x0b, 0x74, + 0x65, 0x78, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x0b, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x41, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, + 0x00, 0x52, 0x0a, 0x61, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, + 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6d, + 0x61, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x1a, 0x0c, 0x0a, 0x0a, 0x54, 0x65, + 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x67, 0x0a, 0x0a, 0x41, 0x76, 0x72, 0x6f, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, + 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x15, + 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, + 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, + 0x4e, 0x44, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, + 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x54, + 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43, 0x48, + 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x05, 0x42, 0x0f, + 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, + 0x9d, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x06, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x12, + 0x3e, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, + 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, + 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x22, + 0x68, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x19, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x22, 0xaf, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, + 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6b, 0x0a, 0x19, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x4d, 0x6f, 0x64, 0x69, + 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x70, 0x75, 0x73, + 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x0b, 0x50, 0x75, 0x6c, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x18, 0x01, 0x52, 0x11, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x12, 0x26, 0x0a, + 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x63, 0x0a, 0x0c, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, + 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x4d, + 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, + 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, + 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, + 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, + 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x82, 0x01, 0x0a, + 0x12, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, + 0x73, 0x22, 0xdb, 0x03, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, + 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, + 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x17, 0x6d, 0x6f, 0x64, 0x69, + 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, + 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x3a, 0x0a, 0x17, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, + 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x6d, 0x6f, 0x64, + 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x6b, 0x49, 0x64, + 0x73, 0x12, 0x42, 0x0a, 0x1b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x63, 0x6b, 0x5f, + 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x18, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x6f, + 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, + 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, + 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x4f, + 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, + 0xa4, 0x08, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, + 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72, 0x65, 0x63, + 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x72, 0x65, + 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x7f, + 0x0a, 0x18, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, + 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, + 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x17, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x93, 0x01, 0x0a, 0x20, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, + 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, + 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7c, 0x0a, 0x17, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x1a, 0xd3, 0x01, 0x0a, 0x17, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, + 0x0f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69, 0x6e, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x11, 0x75, 0x6e, + 0x6f, 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x75, 0x6e, 0x6f, 0x72, + 0x64, 0x65, 0x72, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18, 0x74, + 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, + 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61, 0x69, + 0x6c, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, 0xa8, 0x01, 0x0a, 0x1d, 0x4d, 0x6f, + 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, + 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x69, 0x6e, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, + 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, + 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x74, + 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, + 0x6b, 0x49, 0x64, 0x73, 0x1a, 0x9f, 0x01, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x46, 0x0a, 0x1d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65, 0x5f, + 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1a, 0x65, 0x78, 0x61, + 0x63, 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xb0, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0c, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x06, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x15, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x22, 0x83, 0x03, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, + 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x1d, 0x0a, + 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x61, 0xea, 0x41, 0x5e, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x7d, 0x2a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x32, 0x08, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x58, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, + 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x22, 0xab, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x83, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x42, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x22, 0xe4, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x48, 0x00, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, + 0x01, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x42, 0x08, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x53, 0x65, + 0x65, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb8, 0x0b, 0x0a, 0x09, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x72, 0x12, 0x71, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x30, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, 0x0b, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x43, 0xda, 0x41, 0x11, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x01, 0x2a, 0x32, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, + 0x93, 0x01, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x20, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x43, 0xda, 0x41, 0x0e, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x01, 0x2a, 0x22, 0x27, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x77, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x2f, 0xda, + 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8a, + 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x23, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x16, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xaa, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, + 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0xda, 0x41, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x12, 0x29, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x2f, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x21, 0x2a, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0xad, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, + 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x22, 0x34, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x55, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x32, 0xd2, 0x15, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x72, 0x12, 0xb4, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5e, 0xda, 0x41, 0x2b, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x2a, 0x3a, 0x01, 0x2a, 0x1a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa1, 0x01, 0x0a, 0x0f, + 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0xda, 0x41, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, + 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, + 0xbb, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x58, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x32, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa6, 0x01, + 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0xda, 0x41, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x9f, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x44, 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x2a, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcf, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x64, + 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x76, 0xda, 0x41, 0x29, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x2c, 0x61, 0x63, 0x6b, 0x5f, + 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x01, 0x2a, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, + 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x41, + 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x5b, 0xda, 0x41, 0x14, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, + 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x3a, 0x01, 0x2a, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, + 0x6c, 0x65, 0x64, 0x67, 0x65, 0x12, 0xd0, 0x01, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, + 0xda, 0x41, 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, + 0x6c, 0x79, 0x2c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0xda, + 0x41, 0x19, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x6d, + 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x75, 0x6c, 0x6c, 0x12, 0x66, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, + 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, + 0x12, 0xbb, 0x01, 0x0a, 0x10, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, + 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x64, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x01, 0x2a, 0x22, 0x3e, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, + 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x22, 0x38, 0xda, 0x41, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x96, 0x01, 0x0a, 0x0d, 0x4c, + 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0xda, + 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x12, + 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x40, 0xda, 0x41, 0x11, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x1a, 0x21, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa3, 0x01, + 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x4c, 0xda, 0x41, 0x14, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2f, 0x3a, 0x01, 0x2a, 0x32, 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x38, 0xda, 0x41, 0x08, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0x84, 0x01, 0x0a, 0x04, 0x53, 0x65, 0x65, 0x6b, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, + 0x65, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x65, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x65, 0x6b, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0xd2, 0x41, 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x42, 0xaa, 0x01, 0x0a, 0x14, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x35, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f, 0x76, + 0x32, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, + 0x3b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0xaa, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x2e, + 0x56, 0x31, 0xca, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x5c, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x19, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x50, 0x75, 0x62, + 0x53, 0x75, 0x62, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_pubsub_v1_pubsub_proto_rawDescOnce sync.Once + file_google_pubsub_v1_pubsub_proto_rawDescData = file_google_pubsub_v1_pubsub_proto_rawDesc +) + +func file_google_pubsub_v1_pubsub_proto_rawDescGZIP() []byte { + file_google_pubsub_v1_pubsub_proto_rawDescOnce.Do(func() { + file_google_pubsub_v1_pubsub_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_pubsub_v1_pubsub_proto_rawDescData) + }) + return file_google_pubsub_v1_pubsub_proto_rawDescData +} + +var file_google_pubsub_v1_pubsub_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_pubsub_v1_pubsub_proto_msgTypes = make([]protoimpl.MessageInfo, 83) +var file_google_pubsub_v1_pubsub_proto_goTypes = []any{ + (IngestionDataSourceSettings_AwsKinesis_State)(0), // 0: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State + (IngestionDataSourceSettings_CloudStorage_State)(0), // 1: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.State + (IngestionDataSourceSettings_AzureEventHubs_State)(0), // 2: google.pubsub.v1.IngestionDataSourceSettings.AzureEventHubs.State + (IngestionDataSourceSettings_AwsMsk_State)(0), // 3: google.pubsub.v1.IngestionDataSourceSettings.AwsMsk.State + (IngestionDataSourceSettings_ConfluentCloud_State)(0), // 4: google.pubsub.v1.IngestionDataSourceSettings.ConfluentCloud.State + (PlatformLogsSettings_Severity)(0), // 5: google.pubsub.v1.PlatformLogsSettings.Severity + (Topic_State)(0), // 6: google.pubsub.v1.Topic.State + (Subscription_State)(0), // 7: google.pubsub.v1.Subscription.State + (BigQueryConfig_State)(0), // 8: google.pubsub.v1.BigQueryConfig.State + (CloudStorageConfig_State)(0), // 9: google.pubsub.v1.CloudStorageConfig.State + (*MessageStoragePolicy)(nil), // 10: google.pubsub.v1.MessageStoragePolicy + (*SchemaSettings)(nil), // 11: google.pubsub.v1.SchemaSettings + (*IngestionDataSourceSettings)(nil), // 12: google.pubsub.v1.IngestionDataSourceSettings + (*PlatformLogsSettings)(nil), // 13: google.pubsub.v1.PlatformLogsSettings + (*IngestionFailureEvent)(nil), // 14: google.pubsub.v1.IngestionFailureEvent + (*JavaScriptUDF)(nil), // 15: google.pubsub.v1.JavaScriptUDF + (*MessageTransform)(nil), // 16: google.pubsub.v1.MessageTransform + (*Topic)(nil), // 17: google.pubsub.v1.Topic + (*PubsubMessage)(nil), // 18: google.pubsub.v1.PubsubMessage + (*GetTopicRequest)(nil), // 19: google.pubsub.v1.GetTopicRequest + (*UpdateTopicRequest)(nil), // 20: google.pubsub.v1.UpdateTopicRequest + (*PublishRequest)(nil), // 21: google.pubsub.v1.PublishRequest + (*PublishResponse)(nil), // 22: google.pubsub.v1.PublishResponse + (*ListTopicsRequest)(nil), // 23: google.pubsub.v1.ListTopicsRequest + (*ListTopicsResponse)(nil), // 24: google.pubsub.v1.ListTopicsResponse + (*ListTopicSubscriptionsRequest)(nil), // 25: google.pubsub.v1.ListTopicSubscriptionsRequest + (*ListTopicSubscriptionsResponse)(nil), // 26: google.pubsub.v1.ListTopicSubscriptionsResponse + (*ListTopicSnapshotsRequest)(nil), // 27: google.pubsub.v1.ListTopicSnapshotsRequest + (*ListTopicSnapshotsResponse)(nil), // 28: google.pubsub.v1.ListTopicSnapshotsResponse + (*DeleteTopicRequest)(nil), // 29: google.pubsub.v1.DeleteTopicRequest + (*DetachSubscriptionRequest)(nil), // 30: google.pubsub.v1.DetachSubscriptionRequest + (*DetachSubscriptionResponse)(nil), // 31: google.pubsub.v1.DetachSubscriptionResponse + (*Subscription)(nil), // 32: google.pubsub.v1.Subscription + (*RetryPolicy)(nil), // 33: google.pubsub.v1.RetryPolicy + (*DeadLetterPolicy)(nil), // 34: google.pubsub.v1.DeadLetterPolicy + (*ExpirationPolicy)(nil), // 35: google.pubsub.v1.ExpirationPolicy + (*PushConfig)(nil), // 36: google.pubsub.v1.PushConfig + (*BigQueryConfig)(nil), // 37: google.pubsub.v1.BigQueryConfig + (*CloudStorageConfig)(nil), // 38: google.pubsub.v1.CloudStorageConfig + (*ReceivedMessage)(nil), // 39: google.pubsub.v1.ReceivedMessage + (*GetSubscriptionRequest)(nil), // 40: google.pubsub.v1.GetSubscriptionRequest + (*UpdateSubscriptionRequest)(nil), // 41: google.pubsub.v1.UpdateSubscriptionRequest + (*ListSubscriptionsRequest)(nil), // 42: google.pubsub.v1.ListSubscriptionsRequest + (*ListSubscriptionsResponse)(nil), // 43: google.pubsub.v1.ListSubscriptionsResponse + (*DeleteSubscriptionRequest)(nil), // 44: google.pubsub.v1.DeleteSubscriptionRequest + (*ModifyPushConfigRequest)(nil), // 45: google.pubsub.v1.ModifyPushConfigRequest + (*PullRequest)(nil), // 46: google.pubsub.v1.PullRequest + (*PullResponse)(nil), // 47: google.pubsub.v1.PullResponse + (*ModifyAckDeadlineRequest)(nil), // 48: google.pubsub.v1.ModifyAckDeadlineRequest + (*AcknowledgeRequest)(nil), // 49: google.pubsub.v1.AcknowledgeRequest + (*StreamingPullRequest)(nil), // 50: google.pubsub.v1.StreamingPullRequest + (*StreamingPullResponse)(nil), // 51: google.pubsub.v1.StreamingPullResponse + (*CreateSnapshotRequest)(nil), // 52: google.pubsub.v1.CreateSnapshotRequest + (*UpdateSnapshotRequest)(nil), // 53: google.pubsub.v1.UpdateSnapshotRequest + (*Snapshot)(nil), // 54: google.pubsub.v1.Snapshot + (*GetSnapshotRequest)(nil), // 55: google.pubsub.v1.GetSnapshotRequest + (*ListSnapshotsRequest)(nil), // 56: google.pubsub.v1.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 57: google.pubsub.v1.ListSnapshotsResponse + (*DeleteSnapshotRequest)(nil), // 58: google.pubsub.v1.DeleteSnapshotRequest + (*SeekRequest)(nil), // 59: google.pubsub.v1.SeekRequest + (*SeekResponse)(nil), // 60: google.pubsub.v1.SeekResponse + (*IngestionDataSourceSettings_AwsKinesis)(nil), // 61: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis + (*IngestionDataSourceSettings_CloudStorage)(nil), // 62: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage + (*IngestionDataSourceSettings_AzureEventHubs)(nil), // 63: google.pubsub.v1.IngestionDataSourceSettings.AzureEventHubs + (*IngestionDataSourceSettings_AwsMsk)(nil), // 64: google.pubsub.v1.IngestionDataSourceSettings.AwsMsk + (*IngestionDataSourceSettings_ConfluentCloud)(nil), // 65: google.pubsub.v1.IngestionDataSourceSettings.ConfluentCloud + (*IngestionDataSourceSettings_CloudStorage_TextFormat)(nil), // 66: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.TextFormat + (*IngestionDataSourceSettings_CloudStorage_AvroFormat)(nil), // 67: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.AvroFormat + (*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat)(nil), // 68: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.PubSubAvroFormat + (*IngestionFailureEvent_ApiViolationReason)(nil), // 69: google.pubsub.v1.IngestionFailureEvent.ApiViolationReason + (*IngestionFailureEvent_AvroFailureReason)(nil), // 70: google.pubsub.v1.IngestionFailureEvent.AvroFailureReason + (*IngestionFailureEvent_SchemaViolationReason)(nil), // 71: google.pubsub.v1.IngestionFailureEvent.SchemaViolationReason + (*IngestionFailureEvent_MessageTransformationFailureReason)(nil), // 72: google.pubsub.v1.IngestionFailureEvent.MessageTransformationFailureReason + (*IngestionFailureEvent_CloudStorageFailure)(nil), // 73: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure + (*IngestionFailureEvent_AwsMskFailureReason)(nil), // 74: google.pubsub.v1.IngestionFailureEvent.AwsMskFailureReason + (*IngestionFailureEvent_AzureEventHubsFailureReason)(nil), // 75: google.pubsub.v1.IngestionFailureEvent.AzureEventHubsFailureReason + (*IngestionFailureEvent_ConfluentCloudFailureReason)(nil), // 76: google.pubsub.v1.IngestionFailureEvent.ConfluentCloudFailureReason + (*IngestionFailureEvent_AwsKinesisFailureReason)(nil), // 77: google.pubsub.v1.IngestionFailureEvent.AwsKinesisFailureReason + nil, // 78: google.pubsub.v1.Topic.LabelsEntry + nil, // 79: google.pubsub.v1.PubsubMessage.AttributesEntry + (*Subscription_AnalyticsHubSubscriptionInfo)(nil), // 80: google.pubsub.v1.Subscription.AnalyticsHubSubscriptionInfo + nil, // 81: google.pubsub.v1.Subscription.LabelsEntry + (*PushConfig_OidcToken)(nil), // 82: google.pubsub.v1.PushConfig.OidcToken + (*PushConfig_PubsubWrapper)(nil), // 83: google.pubsub.v1.PushConfig.PubsubWrapper + (*PushConfig_NoWrapper)(nil), // 84: google.pubsub.v1.PushConfig.NoWrapper + nil, // 85: google.pubsub.v1.PushConfig.AttributesEntry + (*CloudStorageConfig_TextConfig)(nil), // 86: google.pubsub.v1.CloudStorageConfig.TextConfig + (*CloudStorageConfig_AvroConfig)(nil), // 87: google.pubsub.v1.CloudStorageConfig.AvroConfig + (*StreamingPullResponse_AcknowledgeConfirmation)(nil), // 88: google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation + (*StreamingPullResponse_ModifyAckDeadlineConfirmation)(nil), // 89: google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation + (*StreamingPullResponse_SubscriptionProperties)(nil), // 90: google.pubsub.v1.StreamingPullResponse.SubscriptionProperties + nil, // 91: google.pubsub.v1.CreateSnapshotRequest.LabelsEntry + nil, // 92: google.pubsub.v1.Snapshot.LabelsEntry + (Encoding)(0), // 93: google.pubsub.v1.Encoding + (*durationpb.Duration)(nil), // 94: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 95: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 96: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 97: google.protobuf.Empty +} +var file_google_pubsub_v1_pubsub_proto_depIdxs = []int32{ + 93, // 0: google.pubsub.v1.SchemaSettings.encoding:type_name -> google.pubsub.v1.Encoding + 61, // 1: google.pubsub.v1.IngestionDataSourceSettings.aws_kinesis:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis + 62, // 2: google.pubsub.v1.IngestionDataSourceSettings.cloud_storage:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage + 63, // 3: google.pubsub.v1.IngestionDataSourceSettings.azure_event_hubs:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AzureEventHubs + 64, // 4: google.pubsub.v1.IngestionDataSourceSettings.aws_msk:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsMsk + 65, // 5: google.pubsub.v1.IngestionDataSourceSettings.confluent_cloud:type_name -> google.pubsub.v1.IngestionDataSourceSettings.ConfluentCloud + 13, // 6: google.pubsub.v1.IngestionDataSourceSettings.platform_logs_settings:type_name -> google.pubsub.v1.PlatformLogsSettings + 5, // 7: google.pubsub.v1.PlatformLogsSettings.severity:type_name -> google.pubsub.v1.PlatformLogsSettings.Severity + 73, // 8: google.pubsub.v1.IngestionFailureEvent.cloud_storage_failure:type_name -> google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure + 74, // 9: google.pubsub.v1.IngestionFailureEvent.aws_msk_failure:type_name -> google.pubsub.v1.IngestionFailureEvent.AwsMskFailureReason + 75, // 10: google.pubsub.v1.IngestionFailureEvent.azure_event_hubs_failure:type_name -> google.pubsub.v1.IngestionFailureEvent.AzureEventHubsFailureReason + 76, // 11: google.pubsub.v1.IngestionFailureEvent.confluent_cloud_failure:type_name -> google.pubsub.v1.IngestionFailureEvent.ConfluentCloudFailureReason + 77, // 12: google.pubsub.v1.IngestionFailureEvent.aws_kinesis_failure:type_name -> google.pubsub.v1.IngestionFailureEvent.AwsKinesisFailureReason + 15, // 13: google.pubsub.v1.MessageTransform.javascript_udf:type_name -> google.pubsub.v1.JavaScriptUDF + 78, // 14: google.pubsub.v1.Topic.labels:type_name -> google.pubsub.v1.Topic.LabelsEntry + 10, // 15: google.pubsub.v1.Topic.message_storage_policy:type_name -> google.pubsub.v1.MessageStoragePolicy + 11, // 16: google.pubsub.v1.Topic.schema_settings:type_name -> google.pubsub.v1.SchemaSettings + 94, // 17: google.pubsub.v1.Topic.message_retention_duration:type_name -> google.protobuf.Duration + 6, // 18: google.pubsub.v1.Topic.state:type_name -> google.pubsub.v1.Topic.State + 12, // 19: google.pubsub.v1.Topic.ingestion_data_source_settings:type_name -> google.pubsub.v1.IngestionDataSourceSettings + 16, // 20: google.pubsub.v1.Topic.message_transforms:type_name -> google.pubsub.v1.MessageTransform + 79, // 21: google.pubsub.v1.PubsubMessage.attributes:type_name -> google.pubsub.v1.PubsubMessage.AttributesEntry + 95, // 22: google.pubsub.v1.PubsubMessage.publish_time:type_name -> google.protobuf.Timestamp + 17, // 23: google.pubsub.v1.UpdateTopicRequest.topic:type_name -> google.pubsub.v1.Topic + 96, // 24: google.pubsub.v1.UpdateTopicRequest.update_mask:type_name -> google.protobuf.FieldMask + 18, // 25: google.pubsub.v1.PublishRequest.messages:type_name -> google.pubsub.v1.PubsubMessage + 17, // 26: google.pubsub.v1.ListTopicsResponse.topics:type_name -> google.pubsub.v1.Topic + 36, // 27: google.pubsub.v1.Subscription.push_config:type_name -> google.pubsub.v1.PushConfig + 37, // 28: google.pubsub.v1.Subscription.bigquery_config:type_name -> google.pubsub.v1.BigQueryConfig + 38, // 29: google.pubsub.v1.Subscription.cloud_storage_config:type_name -> google.pubsub.v1.CloudStorageConfig + 94, // 30: google.pubsub.v1.Subscription.message_retention_duration:type_name -> google.protobuf.Duration + 81, // 31: google.pubsub.v1.Subscription.labels:type_name -> google.pubsub.v1.Subscription.LabelsEntry + 35, // 32: google.pubsub.v1.Subscription.expiration_policy:type_name -> google.pubsub.v1.ExpirationPolicy + 34, // 33: google.pubsub.v1.Subscription.dead_letter_policy:type_name -> google.pubsub.v1.DeadLetterPolicy + 33, // 34: google.pubsub.v1.Subscription.retry_policy:type_name -> google.pubsub.v1.RetryPolicy + 94, // 35: google.pubsub.v1.Subscription.topic_message_retention_duration:type_name -> google.protobuf.Duration + 7, // 36: google.pubsub.v1.Subscription.state:type_name -> google.pubsub.v1.Subscription.State + 80, // 37: google.pubsub.v1.Subscription.analytics_hub_subscription_info:type_name -> google.pubsub.v1.Subscription.AnalyticsHubSubscriptionInfo + 16, // 38: google.pubsub.v1.Subscription.message_transforms:type_name -> google.pubsub.v1.MessageTransform + 94, // 39: google.pubsub.v1.RetryPolicy.minimum_backoff:type_name -> google.protobuf.Duration + 94, // 40: google.pubsub.v1.RetryPolicy.maximum_backoff:type_name -> google.protobuf.Duration + 94, // 41: google.pubsub.v1.ExpirationPolicy.ttl:type_name -> google.protobuf.Duration + 85, // 42: google.pubsub.v1.PushConfig.attributes:type_name -> google.pubsub.v1.PushConfig.AttributesEntry + 82, // 43: google.pubsub.v1.PushConfig.oidc_token:type_name -> google.pubsub.v1.PushConfig.OidcToken + 83, // 44: google.pubsub.v1.PushConfig.pubsub_wrapper:type_name -> google.pubsub.v1.PushConfig.PubsubWrapper + 84, // 45: google.pubsub.v1.PushConfig.no_wrapper:type_name -> google.pubsub.v1.PushConfig.NoWrapper + 8, // 46: google.pubsub.v1.BigQueryConfig.state:type_name -> google.pubsub.v1.BigQueryConfig.State + 86, // 47: google.pubsub.v1.CloudStorageConfig.text_config:type_name -> google.pubsub.v1.CloudStorageConfig.TextConfig + 87, // 48: google.pubsub.v1.CloudStorageConfig.avro_config:type_name -> google.pubsub.v1.CloudStorageConfig.AvroConfig + 94, // 49: google.pubsub.v1.CloudStorageConfig.max_duration:type_name -> google.protobuf.Duration + 9, // 50: google.pubsub.v1.CloudStorageConfig.state:type_name -> google.pubsub.v1.CloudStorageConfig.State + 18, // 51: google.pubsub.v1.ReceivedMessage.message:type_name -> google.pubsub.v1.PubsubMessage + 32, // 52: google.pubsub.v1.UpdateSubscriptionRequest.subscription:type_name -> google.pubsub.v1.Subscription + 96, // 53: google.pubsub.v1.UpdateSubscriptionRequest.update_mask:type_name -> google.protobuf.FieldMask + 32, // 54: google.pubsub.v1.ListSubscriptionsResponse.subscriptions:type_name -> google.pubsub.v1.Subscription + 36, // 55: google.pubsub.v1.ModifyPushConfigRequest.push_config:type_name -> google.pubsub.v1.PushConfig + 39, // 56: google.pubsub.v1.PullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage + 39, // 57: google.pubsub.v1.StreamingPullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage + 88, // 58: google.pubsub.v1.StreamingPullResponse.acknowledge_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation + 89, // 59: google.pubsub.v1.StreamingPullResponse.modify_ack_deadline_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation + 90, // 60: google.pubsub.v1.StreamingPullResponse.subscription_properties:type_name -> google.pubsub.v1.StreamingPullResponse.SubscriptionProperties + 91, // 61: google.pubsub.v1.CreateSnapshotRequest.labels:type_name -> google.pubsub.v1.CreateSnapshotRequest.LabelsEntry + 54, // 62: google.pubsub.v1.UpdateSnapshotRequest.snapshot:type_name -> google.pubsub.v1.Snapshot + 96, // 63: google.pubsub.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask + 95, // 64: google.pubsub.v1.Snapshot.expire_time:type_name -> google.protobuf.Timestamp + 92, // 65: google.pubsub.v1.Snapshot.labels:type_name -> google.pubsub.v1.Snapshot.LabelsEntry + 54, // 66: google.pubsub.v1.ListSnapshotsResponse.snapshots:type_name -> google.pubsub.v1.Snapshot + 95, // 67: google.pubsub.v1.SeekRequest.time:type_name -> google.protobuf.Timestamp + 0, // 68: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State + 1, // 69: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.State + 66, // 70: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.text_format:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.TextFormat + 67, // 71: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.avro_format:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.AvroFormat + 68, // 72: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.pubsub_avro_format:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.PubSubAvroFormat + 95, // 73: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.minimum_object_create_time:type_name -> google.protobuf.Timestamp + 2, // 74: google.pubsub.v1.IngestionDataSourceSettings.AzureEventHubs.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AzureEventHubs.State + 3, // 75: google.pubsub.v1.IngestionDataSourceSettings.AwsMsk.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsMsk.State + 4, // 76: google.pubsub.v1.IngestionDataSourceSettings.ConfluentCloud.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.ConfluentCloud.State + 70, // 77: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure.avro_failure_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.AvroFailureReason + 69, // 78: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure.api_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.ApiViolationReason + 71, // 79: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure.schema_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.SchemaViolationReason + 72, // 80: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure.message_transformation_failure_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.MessageTransformationFailureReason + 69, // 81: google.pubsub.v1.IngestionFailureEvent.AwsMskFailureReason.api_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.ApiViolationReason + 71, // 82: google.pubsub.v1.IngestionFailureEvent.AwsMskFailureReason.schema_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.SchemaViolationReason + 72, // 83: google.pubsub.v1.IngestionFailureEvent.AwsMskFailureReason.message_transformation_failure_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.MessageTransformationFailureReason + 69, // 84: google.pubsub.v1.IngestionFailureEvent.AzureEventHubsFailureReason.api_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.ApiViolationReason + 71, // 85: google.pubsub.v1.IngestionFailureEvent.AzureEventHubsFailureReason.schema_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.SchemaViolationReason + 72, // 86: google.pubsub.v1.IngestionFailureEvent.AzureEventHubsFailureReason.message_transformation_failure_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.MessageTransformationFailureReason + 69, // 87: google.pubsub.v1.IngestionFailureEvent.ConfluentCloudFailureReason.api_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.ApiViolationReason + 71, // 88: google.pubsub.v1.IngestionFailureEvent.ConfluentCloudFailureReason.schema_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.SchemaViolationReason + 72, // 89: google.pubsub.v1.IngestionFailureEvent.ConfluentCloudFailureReason.message_transformation_failure_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.MessageTransformationFailureReason + 71, // 90: google.pubsub.v1.IngestionFailureEvent.AwsKinesisFailureReason.schema_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.SchemaViolationReason + 72, // 91: google.pubsub.v1.IngestionFailureEvent.AwsKinesisFailureReason.message_transformation_failure_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.MessageTransformationFailureReason + 17, // 92: google.pubsub.v1.Publisher.CreateTopic:input_type -> google.pubsub.v1.Topic + 20, // 93: google.pubsub.v1.Publisher.UpdateTopic:input_type -> google.pubsub.v1.UpdateTopicRequest + 21, // 94: google.pubsub.v1.Publisher.Publish:input_type -> google.pubsub.v1.PublishRequest + 19, // 95: google.pubsub.v1.Publisher.GetTopic:input_type -> google.pubsub.v1.GetTopicRequest + 23, // 96: google.pubsub.v1.Publisher.ListTopics:input_type -> google.pubsub.v1.ListTopicsRequest + 25, // 97: google.pubsub.v1.Publisher.ListTopicSubscriptions:input_type -> google.pubsub.v1.ListTopicSubscriptionsRequest + 27, // 98: google.pubsub.v1.Publisher.ListTopicSnapshots:input_type -> google.pubsub.v1.ListTopicSnapshotsRequest + 29, // 99: google.pubsub.v1.Publisher.DeleteTopic:input_type -> google.pubsub.v1.DeleteTopicRequest + 30, // 100: google.pubsub.v1.Publisher.DetachSubscription:input_type -> google.pubsub.v1.DetachSubscriptionRequest + 32, // 101: google.pubsub.v1.Subscriber.CreateSubscription:input_type -> google.pubsub.v1.Subscription + 40, // 102: google.pubsub.v1.Subscriber.GetSubscription:input_type -> google.pubsub.v1.GetSubscriptionRequest + 41, // 103: google.pubsub.v1.Subscriber.UpdateSubscription:input_type -> google.pubsub.v1.UpdateSubscriptionRequest + 42, // 104: google.pubsub.v1.Subscriber.ListSubscriptions:input_type -> google.pubsub.v1.ListSubscriptionsRequest + 44, // 105: google.pubsub.v1.Subscriber.DeleteSubscription:input_type -> google.pubsub.v1.DeleteSubscriptionRequest + 48, // 106: google.pubsub.v1.Subscriber.ModifyAckDeadline:input_type -> google.pubsub.v1.ModifyAckDeadlineRequest + 49, // 107: google.pubsub.v1.Subscriber.Acknowledge:input_type -> google.pubsub.v1.AcknowledgeRequest + 46, // 108: google.pubsub.v1.Subscriber.Pull:input_type -> google.pubsub.v1.PullRequest + 50, // 109: google.pubsub.v1.Subscriber.StreamingPull:input_type -> google.pubsub.v1.StreamingPullRequest + 45, // 110: google.pubsub.v1.Subscriber.ModifyPushConfig:input_type -> google.pubsub.v1.ModifyPushConfigRequest + 55, // 111: google.pubsub.v1.Subscriber.GetSnapshot:input_type -> google.pubsub.v1.GetSnapshotRequest + 56, // 112: google.pubsub.v1.Subscriber.ListSnapshots:input_type -> google.pubsub.v1.ListSnapshotsRequest + 52, // 113: google.pubsub.v1.Subscriber.CreateSnapshot:input_type -> google.pubsub.v1.CreateSnapshotRequest + 53, // 114: google.pubsub.v1.Subscriber.UpdateSnapshot:input_type -> google.pubsub.v1.UpdateSnapshotRequest + 58, // 115: google.pubsub.v1.Subscriber.DeleteSnapshot:input_type -> google.pubsub.v1.DeleteSnapshotRequest + 59, // 116: google.pubsub.v1.Subscriber.Seek:input_type -> google.pubsub.v1.SeekRequest + 17, // 117: google.pubsub.v1.Publisher.CreateTopic:output_type -> google.pubsub.v1.Topic + 17, // 118: google.pubsub.v1.Publisher.UpdateTopic:output_type -> google.pubsub.v1.Topic + 22, // 119: google.pubsub.v1.Publisher.Publish:output_type -> google.pubsub.v1.PublishResponse + 17, // 120: google.pubsub.v1.Publisher.GetTopic:output_type -> google.pubsub.v1.Topic + 24, // 121: google.pubsub.v1.Publisher.ListTopics:output_type -> google.pubsub.v1.ListTopicsResponse + 26, // 122: google.pubsub.v1.Publisher.ListTopicSubscriptions:output_type -> google.pubsub.v1.ListTopicSubscriptionsResponse + 28, // 123: google.pubsub.v1.Publisher.ListTopicSnapshots:output_type -> google.pubsub.v1.ListTopicSnapshotsResponse + 97, // 124: google.pubsub.v1.Publisher.DeleteTopic:output_type -> google.protobuf.Empty + 31, // 125: google.pubsub.v1.Publisher.DetachSubscription:output_type -> google.pubsub.v1.DetachSubscriptionResponse + 32, // 126: google.pubsub.v1.Subscriber.CreateSubscription:output_type -> google.pubsub.v1.Subscription + 32, // 127: google.pubsub.v1.Subscriber.GetSubscription:output_type -> google.pubsub.v1.Subscription + 32, // 128: google.pubsub.v1.Subscriber.UpdateSubscription:output_type -> google.pubsub.v1.Subscription + 43, // 129: google.pubsub.v1.Subscriber.ListSubscriptions:output_type -> google.pubsub.v1.ListSubscriptionsResponse + 97, // 130: google.pubsub.v1.Subscriber.DeleteSubscription:output_type -> google.protobuf.Empty + 97, // 131: google.pubsub.v1.Subscriber.ModifyAckDeadline:output_type -> google.protobuf.Empty + 97, // 132: google.pubsub.v1.Subscriber.Acknowledge:output_type -> google.protobuf.Empty + 47, // 133: google.pubsub.v1.Subscriber.Pull:output_type -> google.pubsub.v1.PullResponse + 51, // 134: google.pubsub.v1.Subscriber.StreamingPull:output_type -> google.pubsub.v1.StreamingPullResponse + 97, // 135: google.pubsub.v1.Subscriber.ModifyPushConfig:output_type -> google.protobuf.Empty + 54, // 136: google.pubsub.v1.Subscriber.GetSnapshot:output_type -> google.pubsub.v1.Snapshot + 57, // 137: google.pubsub.v1.Subscriber.ListSnapshots:output_type -> google.pubsub.v1.ListSnapshotsResponse + 54, // 138: google.pubsub.v1.Subscriber.CreateSnapshot:output_type -> google.pubsub.v1.Snapshot + 54, // 139: google.pubsub.v1.Subscriber.UpdateSnapshot:output_type -> google.pubsub.v1.Snapshot + 97, // 140: google.pubsub.v1.Subscriber.DeleteSnapshot:output_type -> google.protobuf.Empty + 60, // 141: google.pubsub.v1.Subscriber.Seek:output_type -> google.pubsub.v1.SeekResponse + 117, // [117:142] is the sub-list for method output_type + 92, // [92:117] is the sub-list for method input_type + 92, // [92:92] is the sub-list for extension type_name + 92, // [92:92] is the sub-list for extension extendee + 0, // [0:92] is the sub-list for field type_name +} + +func init() { file_google_pubsub_v1_pubsub_proto_init() } +func file_google_pubsub_v1_pubsub_proto_init() { + if File_google_pubsub_v1_pubsub_proto != nil { + return + } + file_google_pubsub_v1_schema_proto_init() + file_google_pubsub_v1_pubsub_proto_msgTypes[2].OneofWrappers = []any{ + (*IngestionDataSourceSettings_AwsKinesis_)(nil), + (*IngestionDataSourceSettings_CloudStorage_)(nil), + (*IngestionDataSourceSettings_AzureEventHubs_)(nil), + (*IngestionDataSourceSettings_AwsMsk_)(nil), + (*IngestionDataSourceSettings_ConfluentCloud_)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[4].OneofWrappers = []any{ + (*IngestionFailureEvent_CloudStorageFailure_)(nil), + (*IngestionFailureEvent_AwsMskFailure)(nil), + (*IngestionFailureEvent_AzureEventHubsFailure)(nil), + (*IngestionFailureEvent_ConfluentCloudFailure)(nil), + (*IngestionFailureEvent_AwsKinesisFailure)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[6].OneofWrappers = []any{ + (*MessageTransform_JavascriptUdf)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[26].OneofWrappers = []any{ + (*PushConfig_OidcToken_)(nil), + (*PushConfig_PubsubWrapper_)(nil), + (*PushConfig_NoWrapper_)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[28].OneofWrappers = []any{ + (*CloudStorageConfig_TextConfig_)(nil), + (*CloudStorageConfig_AvroConfig_)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[49].OneofWrappers = []any{ + (*SeekRequest_Time)(nil), + (*SeekRequest_Snapshot)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[52].OneofWrappers = []any{ + (*IngestionDataSourceSettings_CloudStorage_TextFormat_)(nil), + (*IngestionDataSourceSettings_CloudStorage_AvroFormat_)(nil), + (*IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[56].OneofWrappers = []any{} + file_google_pubsub_v1_pubsub_proto_msgTypes[63].OneofWrappers = []any{ + (*IngestionFailureEvent_CloudStorageFailure_AvroFailureReason)(nil), + (*IngestionFailureEvent_CloudStorageFailure_ApiViolationReason)(nil), + (*IngestionFailureEvent_CloudStorageFailure_SchemaViolationReason)(nil), + (*IngestionFailureEvent_CloudStorageFailure_MessageTransformationFailureReason)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[64].OneofWrappers = []any{ + (*IngestionFailureEvent_AwsMskFailureReason_ApiViolationReason)(nil), + (*IngestionFailureEvent_AwsMskFailureReason_SchemaViolationReason)(nil), + (*IngestionFailureEvent_AwsMskFailureReason_MessageTransformationFailureReason)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[65].OneofWrappers = []any{ + (*IngestionFailureEvent_AzureEventHubsFailureReason_ApiViolationReason)(nil), + (*IngestionFailureEvent_AzureEventHubsFailureReason_SchemaViolationReason)(nil), + (*IngestionFailureEvent_AzureEventHubsFailureReason_MessageTransformationFailureReason)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[66].OneofWrappers = []any{ + (*IngestionFailureEvent_ConfluentCloudFailureReason_ApiViolationReason)(nil), + (*IngestionFailureEvent_ConfluentCloudFailureReason_SchemaViolationReason)(nil), + (*IngestionFailureEvent_ConfluentCloudFailureReason_MessageTransformationFailureReason)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[67].OneofWrappers = []any{ + (*IngestionFailureEvent_AwsKinesisFailureReason_SchemaViolationReason)(nil), + (*IngestionFailureEvent_AwsKinesisFailureReason_MessageTransformationFailureReason)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_pubsub_v1_pubsub_proto_rawDesc, + NumEnums: 10, + NumMessages: 83, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_google_pubsub_v1_pubsub_proto_goTypes, + DependencyIndexes: file_google_pubsub_v1_pubsub_proto_depIdxs, + EnumInfos: file_google_pubsub_v1_pubsub_proto_enumTypes, + MessageInfos: file_google_pubsub_v1_pubsub_proto_msgTypes, + }.Build() + File_google_pubsub_v1_pubsub_proto = out.File + file_google_pubsub_v1_pubsub_proto_rawDesc = nil + file_google_pubsub_v1_pubsub_proto_goTypes = nil + file_google_pubsub_v1_pubsub_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// PublisherClient is the client API for Publisher service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PublisherClient interface { + // Creates the given topic with the given name. See the [resource name rules] + // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). + CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) + // Updates an existing topic by updating the fields specified in the update + // mask. Note that certain properties of a topic are not modifiable. + UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error) + // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic + // does not exist. + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) + // Gets the configuration of a topic. + GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) + // Lists matching topics. + ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) + // Lists the names of the attached subscriptions on this topic. + ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) + // Lists the names of the snapshots on this topic. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + ListTopicSnapshots(ctx context.Context, in *ListTopicSnapshotsRequest, opts ...grpc.CallOption) (*ListTopicSnapshotsResponse, error) + // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic + // does not exist. After a topic is deleted, a new topic may be created with + // the same name; this is an entirely new topic with none of the old + // configuration or subscriptions. Existing subscriptions to this topic are + // not deleted, but their `topic` field is set to `_deleted-topic_`. + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Detaches a subscription from this topic. All messages retained in the + // subscription are dropped. Subsequent `Pull` and `StreamingPull` requests + // will return FAILED_PRECONDITION. If the subscription is a push + // subscription, pushes to the endpoint will stop. + DetachSubscription(ctx context.Context, in *DetachSubscriptionRequest, opts ...grpc.CallOption) (*DetachSubscriptionResponse, error) +} + +type publisherClient struct { + cc grpc.ClientConnInterface +} + +func NewPublisherClient(cc grpc.ClientConnInterface) PublisherClient { + return &publisherClient{cc} +} + +func (c *publisherClient) CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/CreateTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/UpdateTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) { + out := new(PublishResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/Publish", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/GetTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) { + out := new(ListTopicsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) { + out := new(ListTopicSubscriptionsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopicSubscriptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopicSnapshots(ctx context.Context, in *ListTopicSnapshotsRequest, opts ...grpc.CallOption) (*ListTopicSnapshotsResponse, error) { + out := new(ListTopicSnapshotsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopicSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/DeleteTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) DetachSubscription(ctx context.Context, in *DetachSubscriptionRequest, opts ...grpc.CallOption) (*DetachSubscriptionResponse, error) { + out := new(DetachSubscriptionResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/DetachSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PublisherServer is the server API for Publisher service. +type PublisherServer interface { + // Creates the given topic with the given name. See the [resource name rules] + // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). + CreateTopic(context.Context, *Topic) (*Topic, error) + // Updates an existing topic by updating the fields specified in the update + // mask. Note that certain properties of a topic are not modifiable. + UpdateTopic(context.Context, *UpdateTopicRequest) (*Topic, error) + // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic + // does not exist. + Publish(context.Context, *PublishRequest) (*PublishResponse, error) + // Gets the configuration of a topic. + GetTopic(context.Context, *GetTopicRequest) (*Topic, error) + // Lists matching topics. + ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) + // Lists the names of the attached subscriptions on this topic. + ListTopicSubscriptions(context.Context, *ListTopicSubscriptionsRequest) (*ListTopicSubscriptionsResponse, error) + // Lists the names of the snapshots on this topic. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + ListTopicSnapshots(context.Context, *ListTopicSnapshotsRequest) (*ListTopicSnapshotsResponse, error) + // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic + // does not exist. After a topic is deleted, a new topic may be created with + // the same name; this is an entirely new topic with none of the old + // configuration or subscriptions. Existing subscriptions to this topic are + // not deleted, but their `topic` field is set to `_deleted-topic_`. + DeleteTopic(context.Context, *DeleteTopicRequest) (*emptypb.Empty, error) + // Detaches a subscription from this topic. All messages retained in the + // subscription are dropped. Subsequent `Pull` and `StreamingPull` requests + // will return FAILED_PRECONDITION. If the subscription is a push + // subscription, pushes to the endpoint will stop. + DetachSubscription(context.Context, *DetachSubscriptionRequest) (*DetachSubscriptionResponse, error) +} + +// UnimplementedPublisherServer can be embedded to have forward compatible implementations. +type UnimplementedPublisherServer struct { +} + +func (*UnimplementedPublisherServer) CreateTopic(context.Context, *Topic) (*Topic, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateTopic not implemented") +} +func (*UnimplementedPublisherServer) UpdateTopic(context.Context, *UpdateTopicRequest) (*Topic, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTopic not implemented") +} +func (*UnimplementedPublisherServer) Publish(context.Context, *PublishRequest) (*PublishResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (*UnimplementedPublisherServer) GetTopic(context.Context, *GetTopicRequest) (*Topic, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTopic not implemented") +} +func (*UnimplementedPublisherServer) ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListTopics not implemented") +} +func (*UnimplementedPublisherServer) ListTopicSubscriptions(context.Context, *ListTopicSubscriptionsRequest) (*ListTopicSubscriptionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListTopicSubscriptions not implemented") +} +func (*UnimplementedPublisherServer) ListTopicSnapshots(context.Context, *ListTopicSnapshotsRequest) (*ListTopicSnapshotsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListTopicSnapshots not implemented") +} +func (*UnimplementedPublisherServer) DeleteTopic(context.Context, *DeleteTopicRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteTopic not implemented") +} +func (*UnimplementedPublisherServer) DetachSubscription(context.Context, *DetachSubscriptionRequest) (*DetachSubscriptionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DetachSubscription not implemented") +} + +func RegisterPublisherServer(s *grpc.Server, srv PublisherServer) { + s.RegisterService(&_Publisher_serviceDesc, srv) +} + +func _Publisher_CreateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Topic) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).CreateTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/CreateTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).CreateTopic(ctx, req.(*Topic)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_UpdateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).UpdateTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/UpdateTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).UpdateTopic(ctx, req.(*UpdateTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/Publish", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_GetTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).GetTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/GetTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).GetTopic(ctx, req.(*GetTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/ListTopics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopics(ctx, req.(*ListTopicsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopicSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopicSubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/ListTopicSubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopicSubscriptions(ctx, req.(*ListTopicSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopicSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopicSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/ListTopicSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopicSnapshots(ctx, req.(*ListTopicSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_DetachSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DetachSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).DetachSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/DetachSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).DetachSubscription(ctx, req.(*DetachSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Publisher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.v1.Publisher", + HandlerType: (*PublisherServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateTopic", + Handler: _Publisher_CreateTopic_Handler, + }, + { + MethodName: "UpdateTopic", + Handler: _Publisher_UpdateTopic_Handler, + }, + { + MethodName: "Publish", + Handler: _Publisher_Publish_Handler, + }, + { + MethodName: "GetTopic", + Handler: _Publisher_GetTopic_Handler, + }, + { + MethodName: "ListTopics", + Handler: _Publisher_ListTopics_Handler, + }, + { + MethodName: "ListTopicSubscriptions", + Handler: _Publisher_ListTopicSubscriptions_Handler, + }, + { + MethodName: "ListTopicSnapshots", + Handler: _Publisher_ListTopicSnapshots_Handler, + }, + { + MethodName: "DeleteTopic", + Handler: _Publisher_DeleteTopic_Handler, + }, + { + MethodName: "DetachSubscription", + Handler: _Publisher_DetachSubscription_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/pubsub/v1/pubsub.proto", +} + +// SubscriberClient is the client API for Subscriber service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SubscriberClient interface { + // Creates a subscription to a given topic. See the [resource name rules] + // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). + // If the subscription already exists, returns `ALREADY_EXISTS`. + // If the corresponding topic doesn't exist, returns `NOT_FOUND`. + // + // If the name is not provided in the request, the server will assign a random + // name for this subscription on the same project as the topic, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The + // generated name is populated in the returned Subscription object. Note that + // for REST API requests, you must specify a name in the request. + CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) + // Gets the configuration details of a subscription. + GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) + // Updates an existing subscription by updating the fields specified in the + // update mask. Note that certain properties of a subscription, such as its + // topic, are not modifiable. + UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) + // Lists matching subscriptions. + ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) + // Deletes an existing subscription. All messages retained in the subscription + // are immediately dropped. Calls to `Pull` after deletion will return + // `NOT_FOUND`. After a subscription is deleted, a new one may be created with + // the same name, but the new one has no association with the old + // subscription or its topic unless the same topic is specified. + DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Modifies the ack deadline for a specific message. This method is useful + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. Note that this does not modify the + // subscription-level `ackDeadlineSeconds` used for subsequent messages. + ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Acknowledges the messages associated with the `ack_ids` in the + // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages + // from the subscription. + // + // Acknowledging a message whose ack deadline has expired may succeed, + // but such a message may be redelivered later. Acknowledging a message more + // than once will not result in an error. + Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Pulls messages from the server. + Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) + // Establishes a stream with the server, which sends messages down to the + // client. The client streams acknowledgments and ack deadline modifications + // back to the server. The server will close the stream and return the status + // on any error. The server may close the stream with status `UNAVAILABLE` to + // reassign server-side resources, in which case, the client should + // re-establish the stream. Flow control can be achieved by configuring the + // underlying RPC channel. + StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error) + // Modifies the `PushConfig` for a specified subscription. + // + // This may be used to change a push subscription to a pull one (signified by + // an empty `PushConfig`) or vice versa, or change the endpoint URL and other + // attributes of a push subscription. Messages will accumulate for delivery + // continuously through the call regardless of changes to the `PushConfig`. + ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Gets the configuration details of a snapshot. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + GetSnapshot(ctx context.Context, in *GetSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) + // Lists the existing snapshots. Snapshots are used in [Seek]( + // https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) + // Creates a snapshot from the requested subscription. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // If the snapshot already exists, returns `ALREADY_EXISTS`. + // If the requested subscription doesn't exist, returns `NOT_FOUND`. + // If the backlog in the subscription is too old -- and the resulting snapshot + // would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. + // See also the `Snapshot.expire_time` field. If the name is not provided in + // the request, the server will assign a random + // name for this snapshot on the same project as the subscription, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The + // generated name is populated in the returned Snapshot object. Note that for + // REST API requests, you must specify a name in the request. + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) + // Updates an existing snapshot by updating the fields specified in the update + // mask. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) + // Removes an existing snapshot. Snapshots are used in [Seek] + // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // When the snapshot is deleted, all messages retained in the snapshot + // are immediately dropped. After a snapshot is deleted, a new one may be + // created with the same name, but the new one has no association with the old + // snapshot or its subscription, unless the same subscription is specified. + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Seeks an existing subscription to a point in time or to a given snapshot, + // whichever is provided in the request. Snapshots are used in [Seek] + // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. Note that both the subscription and the + // snapshot must be on the same topic. + Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error) +} + +type subscriberClient struct { + cc grpc.ClientConnInterface +} + +func NewSubscriberClient(cc grpc.ClientConnInterface) SubscriberClient { + return &subscriberClient{cc} +} + +func (c *subscriberClient) CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/GetSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) { + out := new(ListSubscriptionsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSubscriptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyAckDeadline", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Acknowledge", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) { + out := new(PullResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Pull", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error) { + stream, err := c.cc.NewStream(ctx, &_Subscriber_serviceDesc.Streams[0], "/google.pubsub.v1.Subscriber/StreamingPull", opts...) + if err != nil { + return nil, err + } + x := &subscriberStreamingPullClient{stream} + return x, nil +} + +type Subscriber_StreamingPullClient interface { + Send(*StreamingPullRequest) error + Recv() (*StreamingPullResponse, error) + grpc.ClientStream +} + +type subscriberStreamingPullClient struct { + grpc.ClientStream +} + +func (x *subscriberStreamingPullClient) Send(m *StreamingPullRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *subscriberStreamingPullClient) Recv() (*StreamingPullResponse, error) { + m := new(StreamingPullResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *subscriberClient) ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyPushConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) GetSnapshot(ctx context.Context, in *GetSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { + out := new(Snapshot) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/GetSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { + out := new(Snapshot) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { + out := new(Snapshot) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error) { + out := new(SeekResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Seek", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SubscriberServer is the server API for Subscriber service. +type SubscriberServer interface { + // Creates a subscription to a given topic. See the [resource name rules] + // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). + // If the subscription already exists, returns `ALREADY_EXISTS`. + // If the corresponding topic doesn't exist, returns `NOT_FOUND`. + // + // If the name is not provided in the request, the server will assign a random + // name for this subscription on the same project as the topic, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The + // generated name is populated in the returned Subscription object. Note that + // for REST API requests, you must specify a name in the request. + CreateSubscription(context.Context, *Subscription) (*Subscription, error) + // Gets the configuration details of a subscription. + GetSubscription(context.Context, *GetSubscriptionRequest) (*Subscription, error) + // Updates an existing subscription by updating the fields specified in the + // update mask. Note that certain properties of a subscription, such as its + // topic, are not modifiable. + UpdateSubscription(context.Context, *UpdateSubscriptionRequest) (*Subscription, error) + // Lists matching subscriptions. + ListSubscriptions(context.Context, *ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) + // Deletes an existing subscription. All messages retained in the subscription + // are immediately dropped. Calls to `Pull` after deletion will return + // `NOT_FOUND`. After a subscription is deleted, a new one may be created with + // the same name, but the new one has no association with the old + // subscription or its topic unless the same topic is specified. + DeleteSubscription(context.Context, *DeleteSubscriptionRequest) (*emptypb.Empty, error) + // Modifies the ack deadline for a specific message. This method is useful + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. Note that this does not modify the + // subscription-level `ackDeadlineSeconds` used for subsequent messages. + ModifyAckDeadline(context.Context, *ModifyAckDeadlineRequest) (*emptypb.Empty, error) + // Acknowledges the messages associated with the `ack_ids` in the + // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages + // from the subscription. + // + // Acknowledging a message whose ack deadline has expired may succeed, + // but such a message may be redelivered later. Acknowledging a message more + // than once will not result in an error. + Acknowledge(context.Context, *AcknowledgeRequest) (*emptypb.Empty, error) + // Pulls messages from the server. + Pull(context.Context, *PullRequest) (*PullResponse, error) + // Establishes a stream with the server, which sends messages down to the + // client. The client streams acknowledgments and ack deadline modifications + // back to the server. The server will close the stream and return the status + // on any error. The server may close the stream with status `UNAVAILABLE` to + // reassign server-side resources, in which case, the client should + // re-establish the stream. Flow control can be achieved by configuring the + // underlying RPC channel. + StreamingPull(Subscriber_StreamingPullServer) error + // Modifies the `PushConfig` for a specified subscription. + // + // This may be used to change a push subscription to a pull one (signified by + // an empty `PushConfig`) or vice versa, or change the endpoint URL and other + // attributes of a push subscription. Messages will accumulate for delivery + // continuously through the call regardless of changes to the `PushConfig`. + ModifyPushConfig(context.Context, *ModifyPushConfigRequest) (*emptypb.Empty, error) + // Gets the configuration details of a snapshot. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + GetSnapshot(context.Context, *GetSnapshotRequest) (*Snapshot, error) + // Lists the existing snapshots. Snapshots are used in [Seek]( + // https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) + // Creates a snapshot from the requested subscription. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // If the snapshot already exists, returns `ALREADY_EXISTS`. + // If the requested subscription doesn't exist, returns `NOT_FOUND`. + // If the backlog in the subscription is too old -- and the resulting snapshot + // would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. + // See also the `Snapshot.expire_time` field. If the name is not provided in + // the request, the server will assign a random + // name for this snapshot on the same project as the subscription, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The + // generated name is populated in the returned Snapshot object. Note that for + // REST API requests, you must specify a name in the request. + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*Snapshot, error) + // Updates an existing snapshot by updating the fields specified in the update + // mask. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + UpdateSnapshot(context.Context, *UpdateSnapshotRequest) (*Snapshot, error) + // Removes an existing snapshot. Snapshots are used in [Seek] + // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // When the snapshot is deleted, all messages retained in the snapshot + // are immediately dropped. After a snapshot is deleted, a new one may be + // created with the same name, but the new one has no association with the old + // snapshot or its subscription, unless the same subscription is specified. + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) + // Seeks an existing subscription to a point in time or to a given snapshot, + // whichever is provided in the request. Snapshots are used in [Seek] + // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. Note that both the subscription and the + // snapshot must be on the same topic. + Seek(context.Context, *SeekRequest) (*SeekResponse, error) +} + +// UnimplementedSubscriberServer can be embedded to have forward compatible implementations. +type UnimplementedSubscriberServer struct { +} + +func (*UnimplementedSubscriberServer) CreateSubscription(context.Context, *Subscription) (*Subscription, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSubscription not implemented") +} +func (*UnimplementedSubscriberServer) GetSubscription(context.Context, *GetSubscriptionRequest) (*Subscription, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSubscription not implemented") +} +func (*UnimplementedSubscriberServer) UpdateSubscription(context.Context, *UpdateSubscriptionRequest) (*Subscription, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSubscription not implemented") +} +func (*UnimplementedSubscriberServer) ListSubscriptions(context.Context, *ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSubscriptions not implemented") +} +func (*UnimplementedSubscriberServer) DeleteSubscription(context.Context, *DeleteSubscriptionRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSubscription not implemented") +} +func (*UnimplementedSubscriberServer) ModifyAckDeadline(context.Context, *ModifyAckDeadlineRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ModifyAckDeadline not implemented") +} +func (*UnimplementedSubscriberServer) Acknowledge(context.Context, *AcknowledgeRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Acknowledge not implemented") +} +func (*UnimplementedSubscriberServer) Pull(context.Context, *PullRequest) (*PullResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Pull not implemented") +} +func (*UnimplementedSubscriberServer) StreamingPull(Subscriber_StreamingPullServer) error { + return status.Errorf(codes.Unimplemented, "method StreamingPull not implemented") +} +func (*UnimplementedSubscriberServer) ModifyPushConfig(context.Context, *ModifyPushConfigRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ModifyPushConfig not implemented") +} +func (*UnimplementedSubscriberServer) GetSnapshot(context.Context, *GetSnapshotRequest) (*Snapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSnapshot not implemented") +} +func (*UnimplementedSubscriberServer) ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedSubscriberServer) CreateSnapshot(context.Context, *CreateSnapshotRequest) (*Snapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") +} +func (*UnimplementedSubscriberServer) UpdateSnapshot(context.Context, *UpdateSnapshotRequest) (*Snapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSnapshot not implemented") +} +func (*UnimplementedSubscriberServer) DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") +} +func (*UnimplementedSubscriberServer) Seek(context.Context, *SeekRequest) (*SeekResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Seek not implemented") +} + +func RegisterSubscriberServer(s *grpc.Server, srv SubscriberServer) { + s.RegisterService(&_Subscriber_serviceDesc, srv) +} + +func _Subscriber_CreateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Subscription) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).CreateSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/CreateSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).CreateSubscription(ctx, req.(*Subscription)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_GetSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).GetSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/GetSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).GetSubscription(ctx, req.(*GetSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_UpdateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).UpdateSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/UpdateSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).UpdateSubscription(ctx, req.(*UpdateSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ListSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ListSubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ListSubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ListSubscriptions(ctx, req.(*ListSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_DeleteSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).DeleteSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/DeleteSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).DeleteSubscription(ctx, req.(*DeleteSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ModifyAckDeadline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyAckDeadlineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ModifyAckDeadline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ModifyAckDeadline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ModifyAckDeadline(ctx, req.(*ModifyAckDeadlineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Acknowledge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AcknowledgeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Acknowledge(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/Acknowledge", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Acknowledge(ctx, req.(*AcknowledgeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Pull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PullRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Pull(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/Pull", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Pull(ctx, req.(*PullRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_StreamingPull_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SubscriberServer).StreamingPull(&subscriberStreamingPullServer{stream}) +} + +type Subscriber_StreamingPullServer interface { + Send(*StreamingPullResponse) error + Recv() (*StreamingPullRequest, error) + grpc.ServerStream +} + +type subscriberStreamingPullServer struct { + grpc.ServerStream +} + +func (x *subscriberStreamingPullServer) Send(m *StreamingPullResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *subscriberStreamingPullServer) Recv() (*StreamingPullRequest, error) { + m := new(StreamingPullRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Subscriber_ModifyPushConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyPushConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ModifyPushConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ModifyPushConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ModifyPushConfig(ctx, req.(*ModifyPushConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_GetSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).GetSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/GetSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).GetSnapshot(ctx, req.(*GetSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/CreateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_UpdateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).UpdateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/UpdateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).UpdateSnapshot(ctx, req.(*UpdateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).DeleteSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/DeleteSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Seek_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeekRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Seek(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/Seek", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Seek(ctx, req.(*SeekRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Subscriber_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.v1.Subscriber", + HandlerType: (*SubscriberServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSubscription", + Handler: _Subscriber_CreateSubscription_Handler, + }, + { + MethodName: "GetSubscription", + Handler: _Subscriber_GetSubscription_Handler, + }, + { + MethodName: "UpdateSubscription", + Handler: _Subscriber_UpdateSubscription_Handler, + }, + { + MethodName: "ListSubscriptions", + Handler: _Subscriber_ListSubscriptions_Handler, + }, + { + MethodName: "DeleteSubscription", + Handler: _Subscriber_DeleteSubscription_Handler, + }, + { + MethodName: "ModifyAckDeadline", + Handler: _Subscriber_ModifyAckDeadline_Handler, + }, + { + MethodName: "Acknowledge", + Handler: _Subscriber_Acknowledge_Handler, + }, + { + MethodName: "Pull", + Handler: _Subscriber_Pull_Handler, + }, + { + MethodName: "ModifyPushConfig", + Handler: _Subscriber_ModifyPushConfig_Handler, + }, + { + MethodName: "GetSnapshot", + Handler: _Subscriber_GetSnapshot_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _Subscriber_ListSnapshots_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _Subscriber_CreateSnapshot_Handler, + }, + { + MethodName: "UpdateSnapshot", + Handler: _Subscriber_UpdateSnapshot_Handler, + }, + { + MethodName: "DeleteSnapshot", + Handler: _Subscriber_DeleteSnapshot_Handler, + }, + { + MethodName: "Seek", + Handler: _Subscriber_Seek_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingPull", + Handler: _Subscriber_StreamingPull_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/pubsub/v1/pubsub.proto", +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/schema.pb.go b/vendor/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb/schema.pb.go similarity index 91% rename from vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/schema.pb.go rename to vendor/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb/schema.pb.go index 8b46af88..6ed36b12 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/schema.pb.go +++ b/vendor/cloud.google.com/go/pubsub/v2/apiv1/pubsubpb/schema.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,17 +14,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 -// protoc v4.25.3 +// protoc-gen-go v1.35.2 +// protoc v4.25.7 // source: google/pubsub/v1/schema.proto package pubsubpb import ( context "context" - reflect "reflect" - sync "sync" - _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -33,6 +30,8 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" emptypb "google.golang.org/protobuf/types/known/emptypb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) const ( @@ -226,11 +225,9 @@ type Schema struct { func (x *Schema) Reset() { *x = Schema{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Schema) String() string { @@ -241,7 +238,7 @@ func (*Schema) ProtoMessage() {} func (x *Schema) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -316,11 +313,9 @@ type CreateSchemaRequest struct { func (x *CreateSchemaRequest) Reset() { *x = CreateSchemaRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CreateSchemaRequest) String() string { @@ -331,7 +326,7 @@ func (*CreateSchemaRequest) ProtoMessage() {} func (x *CreateSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -383,11 +378,9 @@ type GetSchemaRequest struct { func (x *GetSchemaRequest) Reset() { *x = GetSchemaRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetSchemaRequest) String() string { @@ -398,7 +391,7 @@ func (*GetSchemaRequest) ProtoMessage() {} func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -450,11 +443,9 @@ type ListSchemasRequest struct { func (x *ListSchemasRequest) Reset() { *x = ListSchemasRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListSchemasRequest) String() string { @@ -465,7 +456,7 @@ func (*ListSchemasRequest) ProtoMessage() {} func (x *ListSchemasRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -523,11 +514,9 @@ type ListSchemasResponse struct { func (x *ListSchemasResponse) Reset() { *x = ListSchemasResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListSchemasResponse) String() string { @@ -538,7 +527,7 @@ func (*ListSchemasResponse) ProtoMessage() {} func (x *ListSchemasResponse) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -588,11 +577,9 @@ type ListSchemaRevisionsRequest struct { func (x *ListSchemaRevisionsRequest) Reset() { *x = ListSchemaRevisionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListSchemaRevisionsRequest) String() string { @@ -603,7 +590,7 @@ func (*ListSchemaRevisionsRequest) ProtoMessage() {} func (x *ListSchemaRevisionsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -661,11 +648,9 @@ type ListSchemaRevisionsResponse struct { func (x *ListSchemaRevisionsResponse) Reset() { *x = ListSchemaRevisionsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListSchemaRevisionsResponse) String() string { @@ -676,7 +661,7 @@ func (*ListSchemaRevisionsResponse) ProtoMessage() {} func (x *ListSchemaRevisionsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -720,11 +705,9 @@ type CommitSchemaRequest struct { func (x *CommitSchemaRequest) Reset() { *x = CommitSchemaRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CommitSchemaRequest) String() string { @@ -735,7 +718,7 @@ func (*CommitSchemaRequest) ProtoMessage() {} func (x *CommitSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -781,11 +764,9 @@ type RollbackSchemaRequest struct { func (x *RollbackSchemaRequest) Reset() { *x = RollbackSchemaRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RollbackSchemaRequest) String() string { @@ -796,7 +777,7 @@ func (*RollbackSchemaRequest) ProtoMessage() {} func (x *RollbackSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -846,11 +827,9 @@ type DeleteSchemaRevisionRequest struct { func (x *DeleteSchemaRevisionRequest) Reset() { *x = DeleteSchemaRevisionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DeleteSchemaRevisionRequest) String() string { @@ -861,7 +840,7 @@ func (*DeleteSchemaRevisionRequest) ProtoMessage() {} func (x *DeleteSchemaRevisionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -904,11 +883,9 @@ type DeleteSchemaRequest struct { func (x *DeleteSchemaRequest) Reset() { *x = DeleteSchemaRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DeleteSchemaRequest) String() string { @@ -919,7 +896,7 @@ func (*DeleteSchemaRequest) ProtoMessage() {} func (x *DeleteSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -956,11 +933,9 @@ type ValidateSchemaRequest struct { func (x *ValidateSchemaRequest) Reset() { *x = ValidateSchemaRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidateSchemaRequest) String() string { @@ -971,7 +946,7 @@ func (*ValidateSchemaRequest) ProtoMessage() {} func (x *ValidateSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1010,11 +985,9 @@ type ValidateSchemaResponse struct { func (x *ValidateSchemaResponse) Reset() { *x = ValidateSchemaResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidateSchemaResponse) String() string { @@ -1025,7 +998,7 @@ func (*ValidateSchemaResponse) ProtoMessage() {} func (x *ValidateSchemaResponse) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1062,11 +1035,9 @@ type ValidateMessageRequest struct { func (x *ValidateMessageRequest) Reset() { *x = ValidateMessageRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidateMessageRequest) String() string { @@ -1077,7 +1048,7 @@ func (*ValidateMessageRequest) ProtoMessage() {} func (x *ValidateMessageRequest) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1164,11 +1135,9 @@ type ValidateMessageResponse struct { func (x *ValidateMessageResponse) Reset() { *x = ValidateMessageResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_schema_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_pubsub_v1_schema_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidateMessageResponse) String() string { @@ -1179,7 +1148,7 @@ func (*ValidateMessageResponse) ProtoMessage() {} func (x *ValidateMessageResponse) ProtoReflect() protoreflect.Message { mi := &file_google_pubsub_v1_schema_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1470,10 +1439,10 @@ var file_google_pubsub_v1_schema_proto_rawDesc = []byte{ 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x42, 0xaa, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x70, 0x62, 0x3b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, + 0x73, 0x75, 0x62, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x70, 0x62, 0x3b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0xaa, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x5c, 0x56, 0x31, @@ -1563,188 +1532,6 @@ func file_google_pubsub_v1_schema_proto_init() { if File_google_pubsub_v1_schema_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_pubsub_v1_schema_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Schema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*CreateSchemaRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*GetSchemaRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ListSchemasRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ListSchemasResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ListSchemaRevisionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ListSchemaRevisionsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*CommitSchemaRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*RollbackSchemaRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*DeleteSchemaRevisionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*DeleteSchemaRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ValidateSchemaRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*ValidateSchemaResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*ValidateMessageRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_pubsub_v1_schema_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*ValidateMessageResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_google_pubsub_v1_schema_proto_msgTypes[13].OneofWrappers = []any{ (*ValidateMessageRequest_Name)(nil), (*ValidateMessageRequest_Schema)(nil), diff --git a/vendor/github.com/DataDog/appsec-internal-go/appsec/config.go b/vendor/github.com/DataDog/appsec-internal-go/appsec/config.go deleted file mode 100644 index 3cfed5bc..00000000 --- a/vendor/github.com/DataDog/appsec-internal-go/appsec/config.go +++ /dev/null @@ -1,203 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2023-present Datadog, Inc. - -package appsec - -import ( - "os" - "regexp" - "strconv" - "time" - "unicode" - "unicode/utf8" - - "github.com/DataDog/appsec-internal-go/log" -) - -// Configuration environment variables -const ( - // EnvAPISecEnabled is the env var used to enable API Security - EnvAPISecEnabled = "DD_API_SECURITY_ENABLED" - // EnvAPISecSampleRate is the env var used to set the sampling rate of API Security schema extraction - EnvAPISecSampleRate = "DD_API_SECURITY_REQUEST_SAMPLE_RATE" - // EnvObfuscatorKey is the env var used to provide the WAF key obfuscation regexp - EnvObfuscatorKey = "DD_APPSEC_OBFUSCATION_PARAMETER_KEY_REGEXP" - // EnvObfuscatorValue is the env var used to provide the WAF value obfuscation regexp - EnvObfuscatorValue = "DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP" - // EnvWAFTimeout is the env var used to specify the timeout value for a WAF run - EnvWAFTimeout = "DD_APPSEC_WAF_TIMEOUT" - // EnvTraceRateLimit is the env var used to set the ASM trace limiting rate - EnvTraceRateLimit = "DD_APPSEC_TRACE_RATE_LIMIT" - // EnvRules is the env var used to provide a path to a local security rule file - EnvRules = "DD_APPSEC_RULES" - // EnvRASPEnabled is the env var used to enable/disable RASP functionalities for ASM - EnvRASPEnabled = "DD_APPSEC_RASP_ENABLED" -) - -// Configuration constants and default values -const ( - // DefaultAPISecSampleRate is the default rate at which API Security schemas are extracted from requests - DefaultAPISecSampleRate = .1 - // DefaultObfuscatorKeyRegex is the default regexp used to obfuscate keys - DefaultObfuscatorKeyRegex = `(?i)pass|pw(?:or)?d|secret|(?:api|private|public|access)[_-]?key|token|consumer[_-]?(?:id|key|secret)|sign(?:ed|ature)|bearer|authorization|jsessionid|phpsessid|asp\.net[_-]sessionid|sid|jwt` - // DefaultObfuscatorValueRegex is the default regexp used to obfuscate values - DefaultObfuscatorValueRegex = `(?i)(?:p(?:ass)?w(?:or)?d|pass(?:[_-]?phrase)?|secret(?:[_-]?key)?|(?:(?:api|private|public|access)[_-]?)key(?:[_-]?id)?|(?:(?:auth|access|id|refresh)[_-]?)?token|consumer[_-]?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?|jsessionid|phpsessid|asp\.net(?:[_-]|-)sessionid|sid|jwt)(?:\s*=[^;]|"\s*:\s*"[^"]+")|bearer\s+[a-z0-9\._\-]+|token:[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L][\w=-]+\.ey[I-L][\w=-]+(?:\.[\w.+\/=-]+)?|[\-]{5}BEGIN[a-z\s]+PRIVATE\sKEY[\-]{5}[^\-]+[\-]{5}END[a-z\s]+PRIVATE\sKEY|ssh-rsa\s*[a-z0-9\/\.+]{100,}` - // DefaultWAFTimeout is the default time limit past which a WAF run will timeout - DefaultWAFTimeout = time.Millisecond - // DefaultTraceRate is the default limit (trace/sec) past which ASM traces are sampled out - DefaultTraceRate uint = 100 // up to 100 appsec traces/s -) - -// APISecConfig holds the configuration for API Security schemas reporting -// It is used to enabled/disable the feature as well as to configure the rate -// at which schemas get reported, -type APISecConfig struct { - Enabled bool - SampleRate float64 -} - -// ObfuscatorConfig wraps the key and value regexp to be passed to the WAF to perform obfuscation. -type ObfuscatorConfig struct { - KeyRegex string - ValueRegex string -} - -// NewAPISecConfig creates and returns a new API Security configuration by reading the env -func NewAPISecConfig() APISecConfig { - return APISecConfig{ - Enabled: boolEnv(EnvAPISecEnabled, true), - SampleRate: readAPISecuritySampleRate(), - } -} -func readAPISecuritySampleRate() float64 { - value := os.Getenv(EnvAPISecSampleRate) - rate, err := strconv.ParseFloat(value, 64) - if err != nil { - logEnvVarParsingError(EnvAPISecSampleRate, value, err, DefaultAPISecSampleRate) - return DefaultAPISecSampleRate - } - // Clamp the value so that 0.0 <= rate <= 1.0 - if rate < 0. { - rate = 0. - } else if rate > 1. { - rate = 1. - } - return rate -} - -// RASPEnabled returns true if RASP functionalities are enabled through the env, or if DD_APPSEC_RASP_ENABLED -// is not set -func RASPEnabled() bool { - return boolEnv(EnvRASPEnabled, true) -} - -// NewObfuscatorConfig creates and returns a new WAF obfuscator configuration by reading the env -func NewObfuscatorConfig() ObfuscatorConfig { - keyRE := readObfuscatorConfigRegexp(EnvObfuscatorKey, DefaultObfuscatorKeyRegex) - valueRE := readObfuscatorConfigRegexp(EnvObfuscatorValue, DefaultObfuscatorValueRegex) - return ObfuscatorConfig{KeyRegex: keyRE, ValueRegex: valueRE} -} - -func readObfuscatorConfigRegexp(name, defaultValue string) string { - val, present := os.LookupEnv(name) - if !present { - log.Debug("appsec: %s not defined, starting with the default obfuscator regular expression", name) - return defaultValue - } - if _, err := regexp.Compile(val); err != nil { - logUnexpectedEnvVarValue(name, val, "could not compile the configured obfuscator regular expression", defaultValue) - return defaultValue - } - log.Debug("appsec: starting with the configured obfuscator regular expression %s", name) - return val -} - -// WAFTimeoutFromEnv reads and parses the WAF timeout value set through the env -// If not set, it defaults to `DefaultWAFTimeout` -func WAFTimeoutFromEnv() (timeout time.Duration) { - timeout = DefaultWAFTimeout - value := os.Getenv(EnvWAFTimeout) - if value == "" { - return - } - - // Check if the value ends with a letter, which means the user has - // specified their own time duration unit(s) such as 1s200ms. - // Otherwise, default to microseconds. - if lastRune, _ := utf8.DecodeLastRuneInString(value); !unicode.IsLetter(lastRune) { - value += "us" // Add the default microsecond time-duration suffix - } - - parsed, err := time.ParseDuration(value) - if err != nil { - logEnvVarParsingError(EnvWAFTimeout, value, err, timeout) - return - } - if parsed <= 0 { - logUnexpectedEnvVarValue(EnvWAFTimeout, parsed, "expecting a strictly positive duration", timeout) - return - } - return parsed -} - -// RateLimitFromEnv reads and parses the trace rate limit set through the env -// If not set, it defaults to `DefaultTraceRate` -func RateLimitFromEnv() (rate uint) { - rate = DefaultTraceRate - value := os.Getenv(EnvTraceRateLimit) - if value == "" { - return rate - } - parsed, err := strconv.ParseUint(value, 10, 0) - if err != nil { - logEnvVarParsingError(EnvTraceRateLimit, value, err, rate) - return - } - if parsed == 0 { - logUnexpectedEnvVarValue(EnvTraceRateLimit, parsed, "expecting a value strictly greater than 0", rate) - return - } - return uint(parsed) -} - -// RulesFromEnv returns the security rules provided through the environment -// If the env var is not set, the default recommended rules are returned instead -func RulesFromEnv() ([]byte, error) { - filepath := os.Getenv(EnvRules) - if filepath == "" { - log.Debug("appsec: using the default built-in recommended security rules") - return DefaultRuleset() - } - buf, err := os.ReadFile(filepath) - if err != nil { - if os.IsNotExist(err) { - err = log.Errorf("appsec: could not find the rules file in path %s: %w.", filepath, err) - } - return nil, err - } - log.Debug("appsec: using the security rules from file %s", filepath) - return buf, nil -} - -func logEnvVarParsingError(name, value string, err error, defaultValue any) { - log.Debug("appsec: could not parse the env var %s=%s as a duration: %v. Using default value %v.", name, value, err, defaultValue) -} - -func logUnexpectedEnvVarValue(name string, value any, reason string, defaultValue any) { - log.Debug("appsec: unexpected configuration value of %s=%v: %s. Using default value %v.", name, value, reason, defaultValue) -} - -func boolEnv(key string, def bool) bool { - strVal, ok := os.LookupEnv(key) - if !ok { - return def - } - v, err := strconv.ParseBool(strVal) - if err != nil { - logEnvVarParsingError(key, strVal, err, def) - return def - } - return v -} diff --git a/vendor/github.com/DataDog/appsec-internal-go/appsec/embed.go b/vendor/github.com/DataDog/appsec-internal-go/appsec/embed.go deleted file mode 100644 index cfa0a5d8..00000000 --- a/vendor/github.com/DataDog/appsec-internal-go/appsec/embed.go +++ /dev/null @@ -1,14 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package appsec - -import _ "embed" // Blank import comment for golint compliance - -// StaticRecommendedRules holds the recommended AppSec security rules (v1.12.0) -// Source: https://github.com/DataDog/appsec-event-rules/blob/1.12.0/build/recommended.json -// -//go:embed rules.json -var StaticRecommendedRules string diff --git a/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.go b/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.go deleted file mode 100644 index 29a46bf2..00000000 --- a/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.go +++ /dev/null @@ -1,26 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2023-present Datadog, Inc. -package appsec - -import "encoding/json" - -// DefaultRuleset returns the marshaled default recommended security rules for AppSec -func DefaultRuleset() ([]byte, error) { - rules, err := DefaultRulesetMap() - if err != nil { - return nil, err - } - return json.Marshal(rules) -} - -// DefaultRulesetMap returns the unmarshaled default recommended security rules for AppSec -func DefaultRulesetMap() (map[string]any, error) { - var rules map[string]any - if err := json.Unmarshal([]byte(StaticRecommendedRules), &rules); err != nil { - return nil, err - } - - return rules, nil -} diff --git a/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.json b/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.json deleted file mode 100644 index 0b25be93..00000000 --- a/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.json +++ /dev/null @@ -1,9565 +0,0 @@ -{ - "version": "2.2", - "metadata": { - "rules_version": "1.12.0" - }, - "rules": [ - { - "id": "blk-001-001", - "name": "Block IP Addresses", - "tags": { - "type": "block_ip", - "category": "security_response" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "http.client_ip" - } - ], - "data": "blocked_ips" - }, - "operator": "ip_match" - } - ], - "transformers": [], - "on_match": [ - "block" - ] - }, - { - "id": "blk-001-002", - "name": "Block User Addresses", - "tags": { - "type": "block_user", - "category": "security_response" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "usr.id" - } - ], - "data": "blocked_users" - }, - "operator": "exact_match" - } - ], - "transformers": [], - "on_match": [ - "block" - ] - }, - { - "id": "crs-913-110", - "name": "Acunetix", - "tags": { - "type": "commercial_scanner", - "crs_id": "913110", - "category": "attack_attempt", - "tool_name": "Acunetix", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies" - } - ], - "list": [ - "acunetix-product", - "(acunetix web vulnerability scanner", - "acunetix-scanning-agreement", - "acunetix-user-agreement", - "md5(acunetix_wvs_security_test)" - ] - }, - "operator": "phrase_match" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-913-120", - "name": "Known security scanner filename/argument", - "tags": { - "type": "security_scanner", - "crs_id": "913120", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "list": [ - "/.adsensepostnottherenonobook", - "/hello.html", - "/actsensepostnottherenonotive", - "/acunetix-wvs-test-for-some-inexistent-file", - "/antidisestablishmentarianism", - "/appscan_fingerprint/mac_address", - "/arachni-", - "/cybercop", - "/nessus_is_probing_you_", - "/nessustest", - "/netsparker-", - "/rfiinc.txt", - "/thereisnowaythat-you-canbethere", - "/w3af/remotefileinclude.html", - "appscan_fingerprint", - "w00tw00t.at.isc.sans.dfind", - "w00tw00t.at.blackhats.romanian.anti-sec" - ], - "options": { - "enforce_word_boundary": true - } - }, - "operator": "phrase_match" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-920-260", - "name": "Unicode Full/Half Width Abuse Attack Attempt", - "tags": { - "type": "http_protocol_violation", - "crs_id": "920260", - "category": "attack_attempt", - "cwe": "176", - "capec": "1000/255/153/267/71", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "\\%u[fF]{2}[0-9a-fA-F]{2}", - "options": { - "case_sensitive": true, - "min_length": 6 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-921-110", - "name": "HTTP Request Smuggling Attack", - "tags": { - "type": "http_protocol_violation", - "crs_id": "921110", - "category": "attack_attempt", - "cwe": "444", - "capec": "1000/210/272/220/33" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - } - ], - "regex": "(?:get|post|head|options|connect|put|delete|trace|track|patch|propfind|propatch|mkcol|copy|move|lock|unlock)\\s+[^\\s]+\\s+http/\\d", - "options": { - "case_sensitive": true, - "min_length": 12 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-921-160", - "name": "HTTP Header Injection Attack via payload (CR/LF and header-name detected)", - "tags": { - "type": "http_protocol_violation", - "crs_id": "921160", - "category": "attack_attempt", - "cwe": "113", - "capec": "1000/210/272/220/105" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.path_params" - } - ], - "regex": "[\\n\\r]+(?:refresh|(?:set-)?cookie|(?:x-)?(?:forwarded-(?:for|host|server)|via|remote-ip|remote-addr|originating-IP))\\s*:", - "options": { - "case_sensitive": true, - "min_length": 3 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-930-100", - "name": "Obfuscated Path Traversal Attack (/../)", - "tags": { - "type": "lfi", - "crs_id": "930100", - "category": "attack_attempt", - "cwe": "22", - "capec": "1000/255/153/126", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - }, - { - "address": "server.request.headers.no_cookies" - } - ], - "regex": "(?:%(?:c(?:0%(?:[2aq]f|5c|9v)|1%(?:[19p]c|8s|af))|2(?:5(?:c(?:0%25af|1%259c)|2f|5c)|%46|f)|(?:(?:f(?:8%8)?0%8|e)0%80%a|bg%q)f|%3(?:2(?:%(?:%6|4)6|F)|5%%63)|u(?:221[56]|002f|EFC8|F025)|1u|5c)|0x(?:2f|5c)|\\/|\\x5c)(?:%(?:(?:f(?:(?:c%80|8)%8)?0%8|e)0%80%ae|2(?:(?:5(?:c0%25a|2))?e|%45)|u(?:(?:002|ff0)e|2024)|%32(?:%(?:%6|4)5|E)|c0(?:%[256aef]e|\\.))|\\.(?:%0[01])?|0x2e){2,3}(?:%(?:c(?:0%(?:[2aq]f|5c|9v)|1%(?:[19p]c|8s|af))|2(?:5(?:c(?:0%25af|1%259c)|2f|5c)|%46|f)|(?:(?:f(?:8%8)?0%8|e)0%80%a|bg%q)f|%3(?:2(?:%(?:%6|4)6|F)|5%%63)|u(?:221[56]|002f|EFC8|F025)|1u|5c)|0x(?:2f|5c)|\\/|\\x5c)", - "options": { - "min_length": 4 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "normalizePath" - ] - }, - { - "id": "crs-930-110", - "name": "Simple Path Traversal Attack (/../)", - "tags": { - "type": "lfi", - "crs_id": "930110", - "category": "attack_attempt", - "cwe": "22", - "capec": "1000/255/153/126", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - }, - { - "address": "server.request.headers.no_cookies" - } - ], - "regex": "(?:(?:^|[\\x5c/])\\.{2,3}[\\x5c/]|[\\x5c/]\\.{2,3}(?:[\\x5c/]|$))", - "options": { - "case_sensitive": true, - "min_length": 3 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls" - ] - }, - { - "id": "crs-930-120", - "name": "OS File Access Attempt", - "tags": { - "type": "lfi", - "crs_id": "930120", - "category": "attack_attempt", - "cwe": "22", - "capec": "1000/255/153/126", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "list": [ - "/.htaccess", - "/.htdigest", - "/.htpasswd", - "/.addressbook", - "/.aptitude/config", - ".aws/config", - ".aws/credentials", - "/.bash_config", - "/.bash_history", - "/.bash_logout", - "/.bash_profile", - "/.bashrc", - ".cache/notify-osd.log", - ".config/odesk/odesk team.conf", - "/.cshrc", - "/.dockerignore", - ".drush/", - "/.eslintignore", - "/.fbcindex", - "/.forward", - "/.git", - ".git/", - "/.gitattributes", - "/.gitconfig", - ".gnupg/", - ".hplip/hplip.conf", - "/.ksh_history", - "/.lesshst", - ".lftp/", - "/.lhistory", - "/.lldb-history", - ".local/share/mc/", - "/.lynx_cookies", - "/.my.cnf", - "/.mysql_history", - "/.nano_history", - "/.node_repl_history", - "/.pearrc", - "/.pgpass", - "/.php_history", - "/.pinerc", - ".pki/", - "/.proclog", - "/.procmailrc", - "/.psql_history", - "/.python_history", - "/.rediscli_history", - "/.rhistory", - "/.rhosts", - "/.sh_history", - "/.sqlite_history", - ".ssh/authorized_keys", - ".ssh/config", - ".ssh/id_dsa", - ".ssh/id_dsa.pub", - ".ssh/id_rsa", - ".ssh/id_rsa.pub", - ".ssh/identity", - ".ssh/identity.pub", - ".ssh/id_ecdsa", - ".ssh/id_ecdsa.pub", - ".ssh/known_hosts", - ".subversion/auth", - ".subversion/config", - ".subversion/servers", - ".tconn/tconn.conf", - "/.tcshrc", - ".vidalia/vidalia.conf", - "/.viminfo", - "/.vimrc", - "/.www_acl", - "/.wwwacl", - "/.xauthority", - "/.zhistory", - "/.zshrc", - "/.zsh_history", - "/.nsconfig", - "data/elasticsearch", - "data/kafka", - "etc/ansible", - "etc/bind", - "etc/centos-release", - "etc/centos-release-upstream", - "etc/clam.d", - "etc/elasticsearch", - "etc/freshclam.conf", - "etc/gshadow", - "etc/gshadow-", - "etc/httpd", - "etc/kafka", - "etc/kibana", - "etc/logstash", - "etc/lvm", - "etc/mongod.conf", - "etc/my.cnf", - "etc/nuxeo.conf", - "etc/pki", - "etc/postfix", - "etc/scw-release", - "etc/subgid", - "etc/subgid-", - "etc/sudoers.d", - "etc/sysconfig", - "etc/system-release-cpe", - "opt/nuxeo", - "opt/tomcat", - "tmp/kafka-logs", - "usr/lib/rpm/rpm.log", - "var/data/elasticsearch", - "var/lib/elasticsearch", - "etc/.java", - "etc/acpi", - "etc/alsa", - "etc/alternatives", - "etc/apache2", - "etc/apm", - "etc/apparmor", - "etc/apparmor.d", - "etc/apport", - "etc/apt", - "etc/asciidoc", - "etc/avahi", - "etc/bash_completion.d", - "etc/binfmt.d", - "etc/bluetooth", - "etc/bonobo-activation", - "etc/brltty", - "etc/ca-certificates", - "etc/calendar", - "etc/chatscripts", - "etc/chromium-browser", - "etc/clamav", - "etc/cni", - "etc/console-setup", - "etc/coraza-waf", - "etc/cracklib", - "etc/cron.d", - "etc/cron.daily", - "etc/cron.hourly", - "etc/cron.monthly", - "etc/cron.weekly", - "etc/cups", - "etc/cups.save", - "etc/cupshelpers", - "etc/dbus-1", - "etc/dconf", - "etc/default", - "etc/depmod.d", - "etc/dhcp", - "etc/dictionaries-common", - "etc/dkms", - "etc/dnsmasq.d", - "etc/dockeretc/dpkg", - "etc/emacs", - "etc/environment.d", - "etc/fail2ban", - "etc/firebird", - "etc/firefox", - "etc/fonts", - "etc/fwupd", - "etc/gconf", - "etc/gdb", - "etc/gdm3", - "etc/geoclue", - "etc/ghostscript", - "etc/gimp", - "etc/glvnd", - "etc/gnome", - "etc/gnome-vfs-2.0", - "etc/gnucash", - "etc/gnustep", - "etc/groff", - "etc/grub.d", - "etc/gss", - "etc/gtk-2.0", - "etc/gtk-3.0", - "etc/hp", - "etc/ifplugd", - "etc/imagemagick-6", - "etc/init", - "etc/init.d", - "etc/initramfs-tools", - "etc/insserv.conf.d", - "etc/iproute2", - "etc/iptables", - "etc/java", - "etc/java-11-openjdk", - "etc/java-17-oracle", - "etc/java-8-openjdk", - "etc/kernel", - "etc/ld.so.conf.d", - "etc/ldap", - "etc/libblockdev", - "etc/libibverbs.d", - "etc/libnl-3", - "etc/libpaper.d", - "etc/libreoffice", - "etc/lighttpd", - "etc/logcheck", - "etc/logrotate.d", - "etc/lynx", - "etc/mail", - "etc/mc", - "etc/menu", - "etc/menu-methods", - "etc/modprobe.d", - "etc/modsecurity", - "etc/modules-load.d", - "etc/monit", - "etc/mono", - "etc/mplayer", - "etc/mpv", - "etc/muttrc.d", - "etc/mysql", - "etc/netplan", - "etc/network", - "etc/networkd-dispatcher", - "etc/networkmanager", - "etc/newt", - "etc/nghttpx", - "etc/nikto", - "etc/odbcdatasources", - "etc/openal", - "etc/openmpi", - "etc/opt", - "etc/osync", - "etc/packagekit", - "etc/pam.d", - "etc/pcmcia", - "etc/perl", - "etc/php", - "etc/pki", - "etc/pm", - "etc/polkit-1", - "etc/postfix", - "etc/ppp", - "etc/profile.d", - "etc/proftpd", - "etc/pulse", - "etc/python", - "etc/rc0.d", - "etc/rc1.d", - "etc/rc2.d", - "etc/rc3.d", - "etc/rc4.d", - "etc/rc5.d", - "etc/rc6.d", - "etc/rcs.d", - "etc/resolvconf", - "etc/rsyslog.d", - "etc/samba", - "etc/sane.d", - "etc/security", - "etc/selinux", - "etc/sensors.d", - "etc/sgml", - "etc/signon-ui", - "etc/skel", - "etc/snmp", - "etc/sound", - "etc/spamassassin", - "etc/speech-dispatcher", - "etc/ssh", - "etc/ssl", - "etc/sudoers.d", - "etc/sysctl.d", - "etc/sysstat", - "etc/systemd", - "etc/terminfo", - "etc/texmf", - "etc/thermald", - "etc/thnuclnt", - "etc/thunderbird", - "etc/timidity", - "etc/tmpfiles.d", - "etc/ubuntu-advantage", - "etc/udev", - "etc/udisks2", - "etc/ufw", - "etc/update-manager", - "etc/update-motd.d", - "etc/update-notifier", - "etc/upower", - "etc/urlview", - "etc/usb_modeswitch.d", - "etc/vim", - "etc/vmware", - "etc/vmware-installer", - "etc/vmware-vix", - "etc/vulkan", - "etc/w3m", - "etc/wireshark", - "etc/wpa_supplicant", - "etc/x11", - "etc/xdg", - "etc/xml", - "etc/redis.conf", - "etc/redis-sentinel.conf", - "etc/php.ini", - "bin/php.ini", - "etc/httpd/php.ini", - "usr/lib/php.ini", - "usr/lib/php/php.ini", - "usr/local/etc/php.ini", - "usr/local/lib/php.ini", - "usr/local/php/lib/php.ini", - "usr/local/php4/lib/php.ini", - "usr/local/php5/lib/php.ini", - "usr/local/apache/conf/php.ini", - "etc/php4.4/fcgi/php.ini", - "etc/php4/apache/php.ini", - "etc/php4/apache2/php.ini", - "etc/php5/apache/php.ini", - "etc/php5/apache2/php.ini", - "etc/php/php.ini", - "etc/php/php4/php.ini", - "etc/php/apache/php.ini", - "etc/php/apache2/php.ini", - "web/conf/php.ini", - "usr/local/zend/etc/php.ini", - "opt/xampp/etc/php.ini", - "var/local/www/conf/php.ini", - "etc/php/cgi/php.ini", - "etc/php4/cgi/php.ini", - "etc/php5/cgi/php.ini", - "home2/bin/stable/apache/php.ini", - "home/bin/stable/apache/php.ini", - "etc/httpd/conf.d/php.conf", - "php5/php.ini", - "php4/php.ini", - "php/php.ini", - "windows/php.ini", - "winnt/php.ini", - "apache/php/php.ini", - "xampp/apache/bin/php.ini", - "netserver/bin/stable/apache/php.ini", - "volumes/macintosh_hd1/usr/local/php/lib/php.ini", - "etc/mono/1.0/machine.config", - "etc/mono/2.0/machine.config", - "etc/mono/2.0/web.config", - "etc/mono/config", - "usr/local/cpanel/logs/stats_log", - "usr/local/cpanel/logs/access_log", - "usr/local/cpanel/logs/error_log", - "usr/local/cpanel/logs/license_log", - "usr/local/cpanel/logs/login_log", - "var/cpanel/cpanel.config", - "usr/local/psa/admin/logs/httpsd_access_log", - "usr/local/psa/admin/logs/panel.log", - "usr/local/psa/admin/conf/php.ini", - "etc/sw-cp-server/applications.d/plesk.conf", - "usr/local/psa/admin/conf/site_isolation_settings.ini", - "usr/local/sb/config", - "etc/sw-cp-server/applications.d/00-sso-cpserver.conf", - "etc/sso/sso_config.ini", - "etc/mysql/conf.d/old_passwords.cnf", - "var/mysql.log", - "var/mysql-bin.index", - "var/data/mysql-bin.index", - "program files/mysql/mysql server 5.0/data/{host}.err", - "program files/mysql/mysql server 5.0/data/mysql.log", - "program files/mysql/mysql server 5.0/data/mysql.err", - "program files/mysql/mysql server 5.0/data/mysql-bin.log", - "program files/mysql/mysql server 5.0/data/mysql-bin.index", - "program files/mysql/data/{host}.err", - "program files/mysql/data/mysql.log", - "program files/mysql/data/mysql.err", - "program files/mysql/data/mysql-bin.log", - "program files/mysql/data/mysql-bin.index", - "mysql/data/{host}.err", - "mysql/data/mysql.log", - "mysql/data/mysql.err", - "mysql/data/mysql-bin.log", - "mysql/data/mysql-bin.index", - "usr/local/mysql/data/mysql.log", - "usr/local/mysql/data/mysql.err", - "usr/local/mysql/data/mysql-bin.log", - "usr/local/mysql/data/mysql-slow.log", - "usr/local/mysql/data/mysqlderror.log", - "usr/local/mysql/data/{host}.err", - "usr/local/mysql/data/mysql-bin.index", - "var/lib/mysql/my.cnf", - "etc/mysql/my.cnf", - "etc/my.cnf", - "program files/mysql/mysql server 5.0/my.ini", - "program files/mysql/mysql server 5.0/my.cnf", - "program files/mysql/my.ini", - "program files/mysql/my.cnf", - "mysql/my.ini", - "mysql/my.cnf", - "mysql/bin/my.ini", - "var/postgresql/log/postgresql.log", - "usr/internet/pgsql/data/postmaster.log", - "usr/local/pgsql/data/postgresql.log", - "usr/local/pgsql/data/pg_log", - "postgresql/log/pgadmin.log", - "var/lib/pgsql/data/postgresql.conf", - "var/postgresql/db/postgresql.conf", - "var/nm2/postgresql.conf", - "usr/local/pgsql/data/postgresql.conf", - "usr/local/pgsql/data/pg_hba.conf", - "usr/internet/pgsql/data/pg_hba.conf", - "usr/local/pgsql/data/passwd", - "usr/local/pgsql/bin/pg_passwd", - "etc/postgresql/postgresql.conf", - "etc/postgresql/pg_hba.conf", - "home/postgres/data/postgresql.conf", - "home/postgres/data/pg_version", - "home/postgres/data/pg_ident.conf", - "home/postgres/data/pg_hba.conf", - "program files/postgresql/8.3/data/pg_hba.conf", - "program files/postgresql/8.3/data/pg_ident.conf", - "program files/postgresql/8.3/data/postgresql.conf", - "program files/postgresql/8.4/data/pg_hba.conf", - "program files/postgresql/8.4/data/pg_ident.conf", - "program files/postgresql/8.4/data/postgresql.conf", - "program files/postgresql/9.0/data/pg_hba.conf", - "program files/postgresql/9.0/data/pg_ident.conf", - "program files/postgresql/9.0/data/postgresql.conf", - "program files/postgresql/9.1/data/pg_hba.conf", - "program files/postgresql/9.1/data/pg_ident.conf", - "program files/postgresql/9.1/data/postgresql.conf", - "wamp/logs/access.log", - "wamp/logs/apache_error.log", - "wamp/logs/genquery.log", - "wamp/logs/mysql.log", - "wamp/logs/slowquery.log", - "wamp/bin/apache/apache2.2.22/logs/access.log", - "wamp/bin/apache/apache2.2.22/logs/error.log", - "wamp/bin/apache/apache2.2.21/logs/access.log", - "wamp/bin/apache/apache2.2.21/logs/error.log", - "wamp/bin/mysql/mysql5.5.24/data/mysql-bin.index", - "wamp/bin/mysql/mysql5.5.16/data/mysql-bin.index", - "wamp/bin/apache/apache2.2.21/conf/httpd.conf", - "wamp/bin/apache/apache2.2.22/conf/httpd.conf", - "wamp/bin/apache/apache2.2.21/wampserver.conf", - "wamp/bin/apache/apache2.2.22/wampserver.conf", - "wamp/bin/apache/apache2.2.22/conf/wampserver.conf", - "wamp/bin/mysql/mysql5.5.24/my.ini", - "wamp/bin/mysql/mysql5.5.24/wampserver.conf", - "wamp/bin/mysql/mysql5.5.16/my.ini", - "wamp/bin/mysql/mysql5.5.16/wampserver.conf", - "wamp/bin/php/php5.3.8/php.ini", - "wamp/bin/php/php5.4.3/php.ini", - "xampp/apache/logs/access.log", - "xampp/apache/logs/error.log", - "xampp/mysql/data/mysql-bin.index", - "xampp/mysql/data/mysql.err", - "xampp/mysql/data/{host}.err", - "xampp/sendmail/sendmail.log", - "xampp/apache/conf/httpd.conf", - "xampp/filezillaftp/filezilla server.xml", - "xampp/mercurymail/mercury.ini", - "xampp/php/php.ini", - "xampp/phpmyadmin/config.inc.php", - "xampp/sendmail/sendmail.ini", - "xampp/webalizer/webalizer.conf", - "opt/lampp/etc/httpd.conf", - "xampp/htdocs/aca.txt", - "xampp/htdocs/admin.php", - "xampp/htdocs/leer.txt", - "usr/local/apache/logs/audit_log", - "usr/local/apache2/logs/audit_log", - "logs/security_debug_log", - "logs/security_log", - "usr/local/apache/conf/modsec.conf", - "usr/local/apache2/conf/modsec.conf", - "winnt/system32/logfiles/msftpsvc", - "winnt/system32/logfiles/msftpsvc1", - "winnt/system32/logfiles/msftpsvc2", - "windows/system32/logfiles/msftpsvc", - "windows/system32/logfiles/msftpsvc1", - "windows/system32/logfiles/msftpsvc2", - "etc/logrotate.d/proftpd", - "www/logs/proftpd.system.log", - "etc/pam.d/proftpd", - "etc/proftp.conf", - "etc/protpd/proftpd.conf", - "etc/vhcs2/proftpd/proftpd.conf", - "etc/proftpd/modules.conf", - "etc/vsftpd.chroot_list", - "etc/logrotate.d/vsftpd.log", - "etc/vsftpd/vsftpd.conf", - "etc/vsftpd.conf", - "etc/chrootusers", - "var/adm/log/xferlog", - "etc/wu-ftpd/ftpaccess", - "etc/wu-ftpd/ftphosts", - "etc/wu-ftpd/ftpusers", - "logs/pure-ftpd.log", - "usr/sbin/pure-config.pl", - "usr/etc/pure-ftpd.conf", - "etc/pure-ftpd/pure-ftpd.conf", - "usr/local/etc/pure-ftpd.conf", - "usr/local/etc/pureftpd.pdb", - "usr/local/pureftpd/etc/pureftpd.pdb", - "usr/local/pureftpd/sbin/pure-config.pl", - "usr/local/pureftpd/etc/pure-ftpd.conf", - "etc/pure-ftpd.conf", - "etc/pure-ftpd/pure-ftpd.pdb", - "etc/pureftpd.pdb", - "etc/pureftpd.passwd", - "etc/pure-ftpd/pureftpd.pdb", - "usr/ports/ftp/pure-ftpd/pure-ftpd.conf", - "usr/ports/ftp/pure-ftpd/pureftpd.pdb", - "usr/ports/ftp/pure-ftpd/pureftpd.passwd", - "usr/ports/net/pure-ftpd/pure-ftpd.conf", - "usr/ports/net/pure-ftpd/pureftpd.pdb", - "usr/ports/net/pure-ftpd/pureftpd.passwd", - "usr/pkgsrc/net/pureftpd/pure-ftpd.conf", - "usr/pkgsrc/net/pureftpd/pureftpd.pdb", - "usr/pkgsrc/net/pureftpd/pureftpd.passwd", - "usr/ports/contrib/pure-ftpd/pure-ftpd.conf", - "usr/ports/contrib/pure-ftpd/pureftpd.pdb", - "usr/ports/contrib/pure-ftpd/pureftpd.passwd", - "usr/sbin/mudlogd", - "etc/muddleftpd/mudlog", - "etc/muddleftpd.com", - "etc/muddleftpd/mudlogd.conf", - "etc/muddleftpd/muddleftpd.conf", - "usr/sbin/mudpasswd", - "etc/muddleftpd/muddleftpd.passwd", - "etc/muddleftpd/passwd", - "etc/logrotate.d/ftp", - "etc/ftpchroot", - "etc/ftphosts", - "etc/ftpusers", - "winnt/system32/logfiles/smtpsvc", - "winnt/system32/logfiles/smtpsvc1", - "winnt/system32/logfiles/smtpsvc2", - "winnt/system32/logfiles/smtpsvc3", - "winnt/system32/logfiles/smtpsvc4", - "winnt/system32/logfiles/smtpsvc5", - "windows/system32/logfiles/smtpsvc", - "windows/system32/logfiles/smtpsvc1", - "windows/system32/logfiles/smtpsvc2", - "windows/system32/logfiles/smtpsvc3", - "windows/system32/logfiles/smtpsvc4", - "windows/system32/logfiles/smtpsvc5", - "etc/osxhttpd/osxhttpd.conf", - "system/library/webobjects/adaptors/apache2.2/apache.conf", - "etc/apache2/sites-available/default", - "etc/apache2/sites-available/default-ssl", - "etc/apache2/sites-enabled/000-default", - "etc/apache2/sites-enabled/default", - "etc/apache2/apache2.conf", - "etc/apache2/ports.conf", - "usr/local/etc/apache/httpd.conf", - "usr/pkg/etc/httpd/httpd.conf", - "usr/pkg/etc/httpd/httpd-default.conf", - "usr/pkg/etc/httpd/httpd-vhosts.conf", - "etc/httpd/mod_php.conf", - "etc/httpd/extra/httpd-ssl.conf", - "etc/rc.d/rc.httpd", - "usr/local/apache/conf/httpd.conf.default", - "usr/local/apache/conf/access.conf", - "usr/local/apache22/conf/httpd.conf", - "usr/local/apache22/httpd.conf", - "usr/local/etc/apache22/conf/httpd.conf", - "usr/local/apps/apache22/conf/httpd.conf", - "etc/apache22/conf/httpd.conf", - "etc/apache22/httpd.conf", - "opt/apache22/conf/httpd.conf", - "usr/local/etc/apache2/vhosts.conf", - "usr/local/apache/conf/vhosts.conf", - "usr/local/apache2/conf/vhosts.conf", - "usr/local/apache/conf/vhosts-custom.conf", - "usr/local/apache2/conf/vhosts-custom.conf", - "etc/apache/default-server.conf", - "etc/apache2/default-server.conf", - "usr/local/apache2/conf/extra/httpd-ssl.conf", - "usr/local/apache2/conf/ssl.conf", - "etc/httpd/conf.d", - "usr/local/etc/apache22/httpd.conf", - "usr/local/etc/apache2/httpd.conf", - "etc/apache2/httpd2.conf", - "etc/apache2/ssl-global.conf", - "etc/apache2/vhosts.d/00_default_vhost.conf", - "apache/conf/httpd.conf", - "etc/apache/httpd.conf", - "etc/httpd/conf", - "http/httpd.conf", - "usr/local/apache1.3/conf/httpd.conf", - "usr/local/etc/httpd/conf", - "var/apache/conf/httpd.conf", - "var/www/conf", - "www/apache/conf/httpd.conf", - "www/conf/httpd.conf", - "etc/init.d", - "etc/apache/access.conf", - "etc/rc.conf", - "www/logs/freebsddiary-error.log", - "www/logs/freebsddiary-access_log", - "library/webserver/documents/index.html", - "library/webserver/documents/index.htm", - "library/webserver/documents/default.html", - "library/webserver/documents/default.htm", - "library/webserver/documents/index.php", - "library/webserver/documents/default.php", - "usr/local/etc/webmin/miniserv.conf", - "etc/webmin/miniserv.conf", - "usr/local/etc/webmin/miniserv.users", - "etc/webmin/miniserv.users", - "winnt/system32/logfiles/w3svc/inetsvn1.log", - "winnt/system32/logfiles/w3svc1/inetsvn1.log", - "winnt/system32/logfiles/w3svc2/inetsvn1.log", - "winnt/system32/logfiles/w3svc3/inetsvn1.log", - "windows/system32/logfiles/w3svc/inetsvn1.log", - "windows/system32/logfiles/w3svc1/inetsvn1.log", - "windows/system32/logfiles/w3svc2/inetsvn1.log", - "windows/system32/logfiles/w3svc3/inetsvn1.log", - "apache/logs/error.log", - "apache/logs/access.log", - "apache2/logs/error.log", - "apache2/logs/access.log", - "logs/error.log", - "logs/access.log", - "etc/httpd/logs/access_log", - "etc/httpd/logs/access.log", - "etc/httpd/logs/error_log", - "etc/httpd/logs/error.log", - "usr/local/apache/logs/access_log", - "usr/local/apache/logs/access.log", - "usr/local/apache/logs/error_log", - "usr/local/apache/logs/error.log", - "usr/local/apache2/logs/access_log", - "usr/local/apache2/logs/access.log", - "usr/local/apache2/logs/error_log", - "usr/local/apache2/logs/error.log", - "var/www/logs/access_log", - "var/www/logs/access.log", - "var/www/logs/error_log", - "var/www/logs/error.log", - "opt/lampp/logs/access_log", - "opt/lampp/logs/error_log", - "opt/xampp/logs/access_log", - "opt/xampp/logs/error_log", - "opt/lampp/logs/access.log", - "opt/lampp/logs/error.log", - "opt/xampp/logs/access.log", - "opt/xampp/logs/error.log", - "program files/apache group/apache/logs/access.log", - "program files/apache group/apache/logs/error.log", - "program files/apache software foundation/apache2.2/logs/error.log", - "program files/apache software foundation/apache2.2/logs/access.log", - "opt/apache/apache.conf", - "opt/apache/conf/apache.conf", - "opt/apache2/apache.conf", - "opt/apache2/conf/apache.conf", - "opt/httpd/apache.conf", - "opt/httpd/conf/apache.conf", - "etc/httpd/apache.conf", - "etc/apache2/apache.conf", - "etc/httpd/conf/apache.conf", - "usr/local/apache/apache.conf", - "usr/local/apache/conf/apache.conf", - "usr/local/apache2/apache.conf", - "usr/local/apache2/conf/apache.conf", - "usr/local/php/apache.conf.php", - "usr/local/php4/apache.conf.php", - "usr/local/php5/apache.conf.php", - "usr/local/php/apache.conf", - "usr/local/php4/apache.conf", - "usr/local/php5/apache.conf", - "private/etc/httpd/apache.conf", - "opt/apache/apache2.conf", - "opt/apache/conf/apache2.conf", - "opt/apache2/apache2.conf", - "opt/apache2/conf/apache2.conf", - "opt/httpd/apache2.conf", - "opt/httpd/conf/apache2.conf", - "etc/httpd/apache2.conf", - "etc/httpd/conf/apache2.conf", - "usr/local/apache/apache2.conf", - "usr/local/apache/conf/apache2.conf", - "usr/local/apache2/apache2.conf", - "usr/local/apache2/conf/apache2.conf", - "usr/local/php/apache2.conf.php", - "usr/local/php4/apache2.conf.php", - "usr/local/php5/apache2.conf.php", - "usr/local/php/apache2.conf", - "usr/local/php4/apache2.conf", - "usr/local/php5/apache2.conf", - "private/etc/httpd/apache2.conf", - "usr/local/apache/conf/httpd.conf", - "usr/local/apache2/conf/httpd.conf", - "etc/httpd/conf/httpd.conf", - "etc/apache/apache.conf", - "etc/apache/conf/httpd.conf", - "etc/apache2/httpd.conf", - "usr/apache2/conf/httpd.conf", - "usr/apache/conf/httpd.conf", - "usr/local/etc/apache/conf/httpd.conf", - "usr/local/apache/httpd.conf", - "usr/local/apache2/httpd.conf", - "usr/local/httpd/conf/httpd.conf", - "usr/local/etc/apache2/conf/httpd.conf", - "usr/local/etc/httpd/conf/httpd.conf", - "usr/local/apps/apache2/conf/httpd.conf", - "usr/local/apps/apache/conf/httpd.conf", - "usr/local/php/httpd.conf.php", - "usr/local/php4/httpd.conf.php", - "usr/local/php5/httpd.conf.php", - "usr/local/php/httpd.conf", - "usr/local/php4/httpd.conf", - "usr/local/php5/httpd.conf", - "etc/apache2/conf/httpd.conf", - "etc/http/conf/httpd.conf", - "etc/httpd/httpd.conf", - "etc/http/httpd.conf", - "etc/httpd.conf", - "opt/apache/conf/httpd.conf", - "opt/apache2/conf/httpd.conf", - "var/www/conf/httpd.conf", - "private/etc/httpd/httpd.conf", - "private/etc/httpd/httpd.conf.default", - "etc/apache2/vhosts.d/default_vhost.include", - "etc/apache2/conf.d/charset", - "etc/apache2/conf.d/security", - "etc/apache2/envvars", - "etc/apache2/mods-available/autoindex.conf", - "etc/apache2/mods-available/deflate.conf", - "etc/apache2/mods-available/dir.conf", - "etc/apache2/mods-available/mem_cache.conf", - "etc/apache2/mods-available/mime.conf", - "etc/apache2/mods-available/proxy.conf", - "etc/apache2/mods-available/setenvif.conf", - "etc/apache2/mods-available/ssl.conf", - "etc/apache2/mods-enabled/alias.conf", - "etc/apache2/mods-enabled/deflate.conf", - "etc/apache2/mods-enabled/dir.conf", - "etc/apache2/mods-enabled/mime.conf", - "etc/apache2/mods-enabled/negotiation.conf", - "etc/apache2/mods-enabled/php5.conf", - "etc/apache2/mods-enabled/status.conf", - "program files/apache group/apache/conf/httpd.conf", - "program files/apache group/apache2/conf/httpd.conf", - "program files/xampp/apache/conf/apache.conf", - "program files/xampp/apache/conf/apache2.conf", - "program files/xampp/apache/conf/httpd.conf", - "program files/apache group/apache/apache.conf", - "program files/apache group/apache/conf/apache.conf", - "program files/apache group/apache2/conf/apache.conf", - "program files/apache group/apache/apache2.conf", - "program files/apache group/apache/conf/apache2.conf", - "program files/apache group/apache2/conf/apache2.conf", - "program files/apache software foundation/apache2.2/conf/httpd.conf", - "volumes/macintosh_hd1/opt/httpd/conf/httpd.conf", - "volumes/macintosh_hd1/opt/apache/conf/httpd.conf", - "volumes/macintosh_hd1/opt/apache2/conf/httpd.conf", - "volumes/macintosh_hd1/usr/local/php/httpd.conf.php", - "volumes/macintosh_hd1/usr/local/php4/httpd.conf.php", - "volumes/macintosh_hd1/usr/local/php5/httpd.conf.php", - "volumes/webbackup/opt/apache2/conf/httpd.conf", - "volumes/webbackup/private/etc/httpd/httpd.conf", - "volumes/webbackup/private/etc/httpd/httpd.conf.default", - "usr/local/etc/apache/vhosts.conf", - "usr/local/jakarta/tomcat/conf/jakarta.conf", - "usr/local/jakarta/tomcat/conf/server.xml", - "usr/local/jakarta/tomcat/conf/context.xml", - "usr/local/jakarta/tomcat/conf/workers.properties", - "usr/local/jakarta/tomcat/conf/logging.properties", - "usr/local/jakarta/dist/tomcat/conf/jakarta.conf", - "usr/local/jakarta/dist/tomcat/conf/server.xml", - "usr/local/jakarta/dist/tomcat/conf/context.xml", - "usr/local/jakarta/dist/tomcat/conf/workers.properties", - "usr/local/jakarta/dist/tomcat/conf/logging.properties", - "usr/share/tomcat6/conf/server.xml", - "usr/share/tomcat6/conf/context.xml", - "usr/share/tomcat6/conf/workers.properties", - "usr/share/tomcat6/conf/logging.properties", - "var/cpanel/tomcat.options", - "usr/local/jakarta/tomcat/logs/catalina.out", - "usr/local/jakarta/tomcat/logs/catalina.err", - "opt/tomcat/logs/catalina.out", - "opt/tomcat/logs/catalina.err", - "usr/share/logs/catalina.out", - "usr/share/logs/catalina.err", - "usr/share/tomcat/logs/catalina.out", - "usr/share/tomcat/logs/catalina.err", - "usr/share/tomcat6/logs/catalina.out", - "usr/share/tomcat6/logs/catalina.err", - "usr/local/apache/logs/mod_jk.log", - "usr/local/jakarta/tomcat/logs/mod_jk.log", - "usr/local/jakarta/dist/tomcat/logs/mod_jk.log", - "opt/[jboss]/server/default/conf/jboss-minimal.xml", - "opt/[jboss]/server/default/conf/jboss-service.xml", - "opt/[jboss]/server/default/conf/jndi.properties", - "opt/[jboss]/server/default/conf/log4j.xml", - "opt/[jboss]/server/default/conf/login-config.xml", - "opt/[jboss]/server/default/conf/standardjaws.xml", - "opt/[jboss]/server/default/conf/standardjboss.xml", - "opt/[jboss]/server/default/conf/server.log.properties", - "opt/[jboss]/server/default/deploy/jboss-logging.xml", - "usr/local/[jboss]/server/default/conf/jboss-minimal.xml", - "usr/local/[jboss]/server/default/conf/jboss-service.xml", - "usr/local/[jboss]/server/default/conf/jndi.properties", - "usr/local/[jboss]/server/default/conf/log4j.xml", - "usr/local/[jboss]/server/default/conf/login-config.xml", - "usr/local/[jboss]/server/default/conf/standardjaws.xml", - "usr/local/[jboss]/server/default/conf/standardjboss.xml", - "usr/local/[jboss]/server/default/conf/server.log.properties", - "usr/local/[jboss]/server/default/deploy/jboss-logging.xml", - "private/tmp/[jboss]/server/default/conf/jboss-minimal.xml", - "private/tmp/[jboss]/server/default/conf/jboss-service.xml", - "private/tmp/[jboss]/server/default/conf/jndi.properties", - "private/tmp/[jboss]/server/default/conf/log4j.xml", - "private/tmp/[jboss]/server/default/conf/login-config.xml", - "private/tmp/[jboss]/server/default/conf/standardjaws.xml", - "private/tmp/[jboss]/server/default/conf/standardjboss.xml", - "private/tmp/[jboss]/server/default/conf/server.log.properties", - "private/tmp/[jboss]/server/default/deploy/jboss-logging.xml", - "tmp/[jboss]/server/default/conf/jboss-minimal.xml", - "tmp/[jboss]/server/default/conf/jboss-service.xml", - "tmp/[jboss]/server/default/conf/jndi.properties", - "tmp/[jboss]/server/default/conf/log4j.xml", - "tmp/[jboss]/server/default/conf/login-config.xml", - "tmp/[jboss]/server/default/conf/standardjaws.xml", - "tmp/[jboss]/server/default/conf/standardjboss.xml", - "tmp/[jboss]/server/default/conf/server.log.properties", - "tmp/[jboss]/server/default/deploy/jboss-logging.xml", - "program files/[jboss]/server/default/conf/jboss-minimal.xml", - "program files/[jboss]/server/default/conf/jboss-service.xml", - "program files/[jboss]/server/default/conf/jndi.properties", - "program files/[jboss]/server/default/conf/log4j.xml", - "program files/[jboss]/server/default/conf/login-config.xml", - "program files/[jboss]/server/default/conf/standardjaws.xml", - "program files/[jboss]/server/default/conf/standardjboss.xml", - "program files/[jboss]/server/default/conf/server.log.properties", - "program files/[jboss]/server/default/deploy/jboss-logging.xml", - "[jboss]/server/default/conf/jboss-minimal.xml", - "[jboss]/server/default/conf/jboss-service.xml", - "[jboss]/server/default/conf/jndi.properties", - "[jboss]/server/default/conf/log4j.xml", - "[jboss]/server/default/conf/login-config.xml", - "[jboss]/server/default/conf/standardjaws.xml", - "[jboss]/server/default/conf/standardjboss.xml", - "[jboss]/server/default/conf/server.log.properties", - "[jboss]/server/default/deploy/jboss-logging.xml", - "opt/[jboss]/server/default/log/server.log", - "opt/[jboss]/server/default/log/boot.log", - "usr/local/[jboss]/server/default/log/server.log", - "usr/local/[jboss]/server/default/log/boot.log", - "private/tmp/[jboss]/server/default/log/server.log", - "private/tmp/[jboss]/server/default/log/boot.log", - "tmp/[jboss]/server/default/log/server.log", - "tmp/[jboss]/server/default/log/boot.log", - "program files/[jboss]/server/default/log/server.log", - "program files/[jboss]/server/default/log/boot.log", - "[jboss]/server/default/log/server.log", - "[jboss]/server/default/log/boot.log", - "var/lighttpd.log", - "var/logs/access.log", - "usr/local/apache2/logs/lighttpd.error.log", - "usr/local/apache2/logs/lighttpd.log", - "usr/local/apache/logs/lighttpd.error.log", - "usr/local/apache/logs/lighttpd.log", - "usr/local/lighttpd/log/lighttpd.error.log", - "usr/local/lighttpd/log/access.log", - "usr/home/user/var/log/lighttpd.error.log", - "usr/home/user/var/log/apache.log", - "home/user/lighttpd/lighttpd.conf", - "usr/home/user/lighttpd/lighttpd.conf", - "etc/lighttpd/lighthttpd.conf", - "usr/local/etc/lighttpd.conf", - "usr/local/lighttpd/conf/lighttpd.conf", - "usr/local/etc/lighttpd.conf.new", - "var/www/.lighttpdpassword", - "logs/access_log", - "logs/error_log", - "etc/nginx/nginx.conf", - "usr/local/etc/nginx/nginx.conf", - "usr/local/nginx/conf/nginx.conf", - "usr/local/zeus/web/global.cfg", - "usr/local/zeus/web/log/errors", - "opt/lsws/conf/httpd_conf.xml", - "usr/local/lsws/conf/httpd_conf.xml", - "opt/lsws/logs/error.log", - "opt/lsws/logs/access.log", - "usr/local/lsws/logs/error.log", - "usr/local/logs/access.log", - "usr/local/samba/lib/log.user", - "usr/local/logs/samba.log", - "etc/samba/netlogon", - "etc/smbpasswd", - "etc/smb.conf", - "etc/samba/dhcp.conf", - "etc/samba/smb.conf", - "etc/samba/samba.conf", - "etc/samba/smb.conf.user", - "etc/samba/smbpasswd", - "etc/samba/smbusers", - "etc/samba/private/smbpasswd", - "usr/local/etc/smb.conf", - "usr/local/samba/lib/smb.conf.user", - "etc/dhcp3/dhclient.conf", - "etc/dhcp3/dhcpd.conf", - "etc/dhcp/dhclient.conf", - "program files/vidalia bundle/polipo/polipo.conf", - "etc/tor/tor-tsocks.conf", - "etc/stunnel/stunnel.conf", - "etc/tsocks.conf", - "etc/tinyproxy/tinyproxy.conf", - "etc/miredo-server.conf", - "etc/miredo.conf", - "etc/miredo/miredo-server.conf", - "etc/miredo/miredo.conf", - "etc/wicd/dhclient.conf.template.default", - "etc/wicd/manager-settings.conf", - "etc/wicd/wired-settings.conf", - "etc/wicd/wireless-settings.conf", - "etc/ipfw.rules", - "etc/ipfw.conf", - "etc/firewall.rules", - "winnt/system32/logfiles/firewall/pfirewall.log", - "winnt/system32/logfiles/firewall/pfirewall.log.old", - "windows/system32/logfiles/firewall/pfirewall.log", - "windows/system32/logfiles/firewall/pfirewall.log.old", - "etc/clamav/clamd.conf", - "etc/clamav/freshclam.conf", - "etc/x11/xorg.conf", - "etc/x11/xorg.conf-vesa", - "etc/x11/xorg.conf-vmware", - "etc/x11/xorg.conf.beforevmwaretoolsinstall", - "etc/x11/xorg.conf.orig", - "etc/bluetooth/input.conf", - "etc/bluetooth/main.conf", - "etc/bluetooth/network.conf", - "etc/bluetooth/rfcomm.conf", - "etc/bash_completion.d/debconf", - "root/.bash_logout", - "root/.bash_history", - "root/.bash_config", - "root/.bashrc", - "etc/bash.bashrc", - "var/adm/syslog", - "var/adm/sulog", - "var/adm/utmp", - "var/adm/utmpx", - "var/adm/wtmp", - "var/adm/wtmpx", - "var/adm/lastlog/username", - "usr/spool/lp/log", - "var/adm/lp/lpd-errs", - "usr/lib/cron/log", - "var/adm/loginlog", - "var/adm/pacct", - "var/adm/dtmp", - "var/adm/acct/sum/loginlog", - "var/adm/x0msgs", - "var/adm/crash/vmcore", - "var/adm/crash/unix", - "etc/newsyslog.conf", - "var/adm/qacct", - "var/adm/ras/errlog", - "var/adm/ras/bootlog", - "var/adm/cron/log", - "etc/utmp", - "etc/security/lastlog", - "etc/security/failedlogin", - "usr/spool/mqueue/syslog", - "var/adm/messages", - "var/adm/aculogs", - "var/adm/aculog", - "var/adm/vold.log", - "var/adm/log/asppp.log", - "var/lp/logs/lpsched", - "var/lp/logs/lpnet", - "var/lp/logs/requests", - "var/cron/log", - "var/saf/_log", - "var/saf/port/log", - "tmp/access.log", - "etc/sensors.conf", - "etc/sensors3.conf", - "etc/host.conf", - "etc/pam.conf", - "etc/resolv.conf", - "etc/apt/apt.conf", - "etc/inetd.conf", - "etc/syslog.conf", - "etc/sysctl.conf", - "etc/sysctl.d/10-console-messages.conf", - "etc/sysctl.d/10-network-security.conf", - "etc/sysctl.d/10-process-security.conf", - "etc/sysctl.d/wine.sysctl.conf", - "etc/security/access.conf", - "etc/security/group.conf", - "etc/security/limits.conf", - "etc/security/namespace.conf", - "etc/security/pam_env.conf", - "etc/security/sepermit.conf", - "etc/security/time.conf", - "etc/ssh/sshd_config", - "etc/adduser.conf", - "etc/deluser.conf", - "etc/avahi/avahi-daemon.conf", - "etc/ca-certificates.conf", - "etc/ca-certificates.conf.dpkg-old", - "etc/casper.conf", - "etc/chkrootkit.conf", - "etc/debconf.conf", - "etc/dns2tcpd.conf", - "etc/e2fsck.conf", - "etc/esound/esd.conf", - "etc/etter.conf", - "etc/fuse.conf", - "etc/foremost.conf", - "etc/hdparm.conf", - "etc/kernel-img.conf", - "etc/kernel-pkg.conf", - "etc/ld.so.conf", - "etc/ltrace.conf", - "etc/mail/sendmail.conf", - "etc/manpath.config", - "etc/kbd/config", - "etc/ldap/ldap.conf", - "etc/logrotate.conf", - "etc/mtools.conf", - "etc/smi.conf", - "etc/updatedb.conf", - "etc/pulse/client.conf", - "usr/share/adduser/adduser.conf", - "etc/hostname", - "etc/networks", - "etc/timezone", - "etc/modules", - "etc/passwd", - "etc/shadow", - "etc/fstab", - "etc/motd", - "etc/hosts", - "etc/group", - "etc/alias", - "etc/crontab", - "etc/crypttab", - "etc/exports", - "etc/mtab", - "etc/hosts.allow", - "etc/hosts.deny", - "etc/os-release", - "etc/password.master", - "etc/profile", - "etc/default/grub", - "etc/resolvconf/update-libc.d/sendmail", - "etc/inittab", - "etc/issue", - "etc/issue.net", - "etc/login.defs", - "etc/sudoers", - "etc/sysconfig/network-scripts/ifcfg-eth0", - "etc/redhat-release", - "etc/scw-release", - "etc/system-release-cpe", - "etc/debian_version", - "etc/fedora-release", - "etc/mandrake-release", - "etc/slackware-release", - "etc/suse-release", - "etc/security/group", - "etc/security/passwd", - "etc/security/user", - "etc/security/environ", - "etc/security/limits", - "etc/security/opasswd", - "boot/grub/grub.cfg", - "boot/grub/menu.lst", - "root/.ksh_history", - "root/.xauthority", - "usr/lib/security/mkuser.default", - "var/lib/squirrelmail/prefs/squirrelmail.log", - "etc/squirrelmail/apache.conf", - "etc/squirrelmail/config_local.php", - "etc/squirrelmail/default_pref", - "etc/squirrelmail/index.php", - "etc/squirrelmail/config_default.php", - "etc/squirrelmail/config.php", - "etc/squirrelmail/filters_setup.php", - "etc/squirrelmail/sqspell_config.php", - "etc/squirrelmail/config/config.php", - "etc/httpd/conf.d/squirrelmail.conf", - "usr/share/squirrelmail/config/config.php", - "private/etc/squirrelmail/config/config.php", - "srv/www/htdos/squirrelmail/config/config.php", - "var/www/squirrelmail/config/config.php", - "var/www/html/squirrelmail/config/config.php", - "var/www/html/squirrelmail-1.2.9/config/config.php", - "usr/share/squirrelmail/plugins/squirrel_logger/setup.php", - "usr/local/squirrelmail/www/readme", - "windows/system32/drivers/etc/hosts", - "windows/system32/drivers/etc/lmhosts.sam", - "windows/system32/drivers/etc/networks", - "windows/system32/drivers/etc/protocol", - "windows/system32/drivers/etc/services", - "/boot.ini", - "windows/debug/netsetup.log", - "windows/comsetup.log", - "windows/repair/setup.log", - "windows/setupact.log", - "windows/setupapi.log", - "windows/setuperr.log", - "windows/updspapi.log", - "windows/wmsetup.log", - "windows/windowsupdate.log", - "windows/odbc.ini", - "usr/local/psa/admin/htdocs/domains/databases/phpmyadmin/libraries/config.default.php", - "etc/apache2/conf.d/phpmyadmin.conf", - "etc/phpmyadmin/config.inc.php", - "etc/openldap/ldap.conf", - "etc/cups/acroread.conf", - "etc/cups/cupsd.conf", - "etc/cups/cupsd.conf.default", - "etc/cups/pdftops.conf", - "etc/cups/printers.conf", - "windows/system32/macromed/flash/flashinstall.log", - "windows/system32/macromed/flash/install.log", - "etc/cvs-cron.conf", - "etc/cvs-pserver.conf", - "etc/subversion/config", - "etc/modprobe.d/vmware-tools.conf", - "etc/updatedb.conf.beforevmwaretoolsinstall", - "etc/vmware-tools/config", - "etc/vmware-tools/tpvmlp.conf", - "etc/vmware-tools/vmware-tools-libraries.conf", - "var/log", - "var/log/sw-cp-server/error_log", - "var/log/sso/sso.log", - "var/log/dpkg.log", - "var/log/btmp", - "var/log/utmp", - "var/log/wtmp", - "var/log/mysql/mysql-bin.log", - "var/log/mysql/mysql-bin.index", - "var/log/mysql/data/mysql-bin.index", - "var/log/mysql.log", - "var/log/mysql.err", - "var/log/mysqlderror.log", - "var/log/mysql/mysql.log", - "var/log/mysql/mysql-slow.log", - "var/log/mysql-bin.index", - "var/log/data/mysql-bin.index", - "var/log/postgresql/postgresql.log", - "var/log/postgres/pg_backup.log", - "var/log/postgres/postgres.log", - "var/log/postgresql.log", - "var/log/pgsql/pgsql.log", - "var/log/postgresql/postgresql-8.1-main.log", - "var/log/postgresql/postgresql-8.3-main.log", - "var/log/postgresql/postgresql-8.4-main.log", - "var/log/postgresql/postgresql-9.0-main.log", - "var/log/postgresql/postgresql-9.1-main.log", - "var/log/pgsql8.log", - "var/log/postgresql/postgres.log", - "var/log/pgsql_log", - "var/log/postgresql/main.log", - "var/log/cron", - "var/log/postgres.log", - "var/log/proftpd", - "var/log/proftpd/xferlog.legacy", - "var/log/proftpd.access_log", - "var/log/proftpd.xferlog", - "var/log/vsftpd.log", - "var/log/xferlog", - "var/log/pure-ftpd/pure-ftpd.log", - "var/log/pureftpd.log", - "var/log/muddleftpd", - "var/log/muddleftpd.conf", - "var/log/ftp-proxy/ftp-proxy.log", - "var/log/ftp-proxy", - "var/log/ftplog", - "var/log/exim_mainlog", - "var/log/exim/mainlog", - "var/log/maillog", - "var/log/exim_paniclog", - "var/log/exim/paniclog", - "var/log/exim/rejectlog", - "var/log/exim_rejectlog", - "var/log/webmin/miniserv.log", - "var/log/httpd/access_log", - "var/log/httpd/error_log", - "var/log/httpd/access.log", - "var/log/httpd/error.log", - "var/log/apache/access_log", - "var/log/apache/access.log", - "var/log/apache/error_log", - "var/log/apache/error.log", - "var/log/apache2/access_log", - "var/log/apache2/access.log", - "var/log/apache2/error_log", - "var/log/apache2/error.log", - "var/log/access_log", - "var/log/access.log", - "var/log/error_log", - "var/log/error.log", - "var/log/tomcat6/catalina.out", - "var/log/lighttpd.error.log", - "var/log/lighttpd.access.log", - "var/logs/access.log", - "var/log/lighttpd/", - "var/log/lighttpd/error.log", - "var/log/lighttpd/access.www.log", - "var/log/lighttpd/error.www.log", - "var/log/lighttpd/access.log", - "var/log/lighttpd/{domain}/access.log", - "var/log/lighttpd/{domain}/error.log", - "var/log/nginx/access_log", - "var/log/nginx/error_log", - "var/log/nginx/access.log", - "var/log/nginx/error.log", - "var/log/nginx.access_log", - "var/log/nginx.error_log", - "var/log/samba/log.smbd", - "var/log/samba/log.nmbd", - "var/log/samba.log", - "var/log/samba.log1", - "var/log/samba.log2", - "var/log/log.smb", - "var/log/ipfw.log", - "var/log/ipfw", - "var/log/ipfw/ipfw.log", - "var/log/ipfw.today", - "var/log/poplog", - "var/log/authlog", - "var/log/news.all", - "var/log/news/news.all", - "var/log/news/news.crit", - "var/log/news/news.err", - "var/log/news/news.notice", - "var/log/news/suck.err", - "var/log/news/suck.notice", - "var/log/messages", - "var/log/messages.1", - "var/log/user.log", - "var/log/user.log.1", - "var/log/auth.log", - "var/log/pm-powersave.log", - "var/log/xorg.0.log", - "var/log/daemon.log", - "var/log/daemon.log.1", - "var/log/kern.log", - "var/log/kern.log.1", - "var/log/mail.err", - "var/log/mail.info", - "var/log/mail.warn", - "var/log/ufw.log", - "var/log/boot.log", - "var/log/syslog", - "var/log/syslog.1", - "var/log/squirrelmail.log", - "var/log/apache2/squirrelmail.log", - "var/log/apache2/squirrelmail.err.log", - "var/log/mail.log", - "var/log/vmware/hostd.log", - "var/log/vmware/hostd-1.log", - "/wp-config.php", - "/wp-config.bak", - "/wp-config.old", - "/wp-config.temp", - "/wp-config.tmp", - "/wp-config.txt", - "/config.yml", - "/config_dev.yml", - "/config_prod.yml", - "/config_test.yml", - "/parameters.yml", - "/routing.yml", - "/security.yml", - "/services.yml", - "sites/default/default.settings.php", - "sites/default/settings.php", - "sites/default/settings.local.php", - "app/etc/local.xml", - "/sftp-config.json", - "/web.config", - "includes/config.php", - "includes/configure.php", - "/config.inc.php", - "/localsettings.php", - "inc/config.php", - "typo3conf/localconf.php", - "config/app.php", - "config/custom.php", - "config/database.php", - "/configuration.php", - "/config.php", - "var/mail/www-data", - "etc/network/", - "etc/init/", - "inetpub/wwwroot/global.asa", - "system32/inetsrv/config/applicationhost.config", - "system32/inetsrv/config/administration.config", - "system32/inetsrv/config/redirection.config", - "system32/config/default", - "system32/config/sam", - "system32/config/system", - "system32/config/software", - "winnt/repair/sam._", - "/package.json", - "/package-lock.json", - "/gruntfile.js", - "/npm-debug.log", - "/ormconfig.json", - "/tsconfig.json", - "/webpack.config.js", - "/yarn.lock", - "proc/0", - "proc/1", - "proc/2", - "proc/3", - "proc/4", - "proc/5", - "proc/6", - "proc/7", - "proc/8", - "proc/9", - "proc/acpi", - "proc/asound", - "proc/bootconfig", - "proc/buddyinfo", - "proc/bus", - "proc/cgroups", - "proc/cmdline", - "proc/config.gz", - "proc/consoles", - "proc/cpuinfo", - "proc/crypto", - "proc/devices", - "proc/diskstats", - "proc/dma", - "proc/docker", - "proc/driver", - "proc/dynamic_debug", - "proc/execdomains", - "proc/fb", - "proc/filesystems", - "proc/fs", - "proc/interrupts", - "proc/iomem", - "proc/ioports", - "proc/ipmi", - "proc/irq", - "proc/kallsyms", - "proc/kcore", - "proc/keys", - "proc/keys", - "proc/key-users", - "proc/kmsg", - "proc/kpagecgroup", - "proc/kpagecount", - "proc/kpageflags", - "proc/latency_stats", - "proc/loadavg", - "proc/locks", - "proc/mdstat", - "proc/meminfo", - "proc/misc", - "proc/modules", - "proc/mounts", - "proc/mpt", - "proc/mtd", - "proc/mtrr", - "proc/net", - "proc/net/tcp", - "proc/net/udp", - "proc/pagetypeinfo", - "proc/partitions", - "proc/pressure", - "proc/sched_debug", - "proc/schedstat", - "proc/scsi", - "proc/self", - "proc/self/cmdline", - "proc/self/environ", - "proc/self/fd/0", - "proc/self/fd/1", - "proc/self/fd/10", - "proc/self/fd/11", - "proc/self/fd/12", - "proc/self/fd/13", - "proc/self/fd/14", - "proc/self/fd/15", - "proc/self/fd/2", - "proc/self/fd/3", - "proc/self/fd/4", - "proc/self/fd/5", - "proc/self/fd/6", - "proc/self/fd/7", - "proc/self/fd/8", - "proc/self/fd/9", - "proc/self/mounts", - "proc/self/stat", - "proc/self/status", - "proc/slabinfo", - "proc/softirqs", - "proc/stat", - "proc/swaps", - "proc/sys", - "proc/sysrq-trigger", - "proc/sysvipc", - "proc/thread-self", - "proc/timer_list", - "proc/timer_stats", - "proc/tty", - "proc/uptime", - "proc/version", - "proc/version_signature", - "proc/vmallocinfo", - "proc/vmstat", - "proc/zoneinfo", - "sys/block", - "sys/bus", - "sys/class", - "sys/dev", - "sys/devices", - "sys/firmware", - "sys/fs", - "sys/hypervisor", - "sys/kernel", - "sys/module", - "sys/power", - "windows\\win.ini", - "default\\ntuser.dat", - "/var/run/secrets/kubernetes.io/serviceaccount" - ], - "options": { - "enforce_word_boundary": true - } - }, - "operator": "phrase_match" - } - ], - "transformers": [ - "lowercase", - "normalizePath" - ] - }, - { - "id": "crs-931-110", - "name": "RFI: Common RFI Vulnerable Parameter Name used w/ URL Payload", - "tags": { - "type": "rfi", - "crs_id": "931110", - "category": "attack_attempt", - "cwe": "98", - "capec": "1000/152/175/253/193", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - } - ], - "regex": "(?:\\binclude\\s*\\([^)]*|mosConfig_absolute_path|_CONF\\[path\\]|_SERVER\\[DOCUMENT_ROOT\\]|GALLERY_BASEDIR|path\\[docroot\\]|appserv_root|config\\[root_dir\\])=(?:file|ftps?|https?)://", - "options": { - "min_length": 15 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-931-120", - "name": "RFI: URL Payload Used w/Trailing Question Mark Character (?)", - "tags": { - "type": "rfi", - "crs_id": "931120", - "category": "attack_attempt", - "cwe": "98", - "capec": "1000/152/175/253/193" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "^(?i:file|ftps?)://.*?\\?+$", - "options": { - "case_sensitive": true, - "min_length": 4 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-932-160", - "name": "Remote Command Execution: Unix Shell Code Found", - "tags": { - "type": "command_injection", - "crs_id": "932160", - "category": "attack_attempt", - "cwe": "77", - "capec": "1000/152/248/88", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "options": { - "enforce_word_boundary": true - }, - "list": [ - "${cdpath}", - "${dirstack}", - "${home}", - "${hostname}", - "${ifs}", - "${oldpwd}", - "${ostype}", - "${path}", - "${pwd}", - "$cdpath", - "$dirstack", - "$home", - "$hostname", - "$ifs", - "$oldpwd", - "$ostype", - "$pwd", - "dev/fd/", - "dev/null", - "dev/stderr", - "dev/stdin", - "dev/stdout", - "dev/tcp/", - "dev/udp/", - "dev/zero", - "etc/master.passwd", - "etc/pwd.db", - "etc/shells", - "etc/spwd.db", - "proc/self/", - "bin/7z", - "bin/7za", - "bin/7zr", - "bin/ab", - "bin/agetty", - "bin/ansible-playbook", - "bin/apt", - "bin/apt-get", - "bin/ar", - "bin/aria2c", - "bin/arj", - "bin/arp", - "bin/as", - "bin/ascii-xfr", - "bin/ascii85", - "bin/ash", - "bin/aspell", - "bin/at", - "bin/atobm", - "bin/awk", - "bin/base32", - "bin/base64", - "bin/basenc", - "bin/bash", - "bin/bpftrace", - "bin/bridge", - "bin/bundler", - "bin/bunzip2", - "bin/busctl", - "bin/busybox", - "bin/byebug", - "bin/bzcat", - "bin/bzcmp", - "bin/bzdiff", - "bin/bzegrep", - "bin/bzexe", - "bin/bzfgrep", - "bin/bzgrep", - "bin/bzip2", - "bin/bzip2recover", - "bin/bzless", - "bin/bzmore", - "bin/bzz", - "bin/c89", - "bin/c99", - "bin/cancel", - "bin/capsh", - "bin/cat", - "bin/cc", - "bin/certbot", - "bin/check_by_ssh", - "bin/check_cups", - "bin/check_log", - "bin/check_memory", - "bin/check_raid", - "bin/check_ssl_cert", - "bin/check_statusfile", - "bin/chmod", - "bin/choom", - "bin/chown", - "bin/chroot", - "bin/clang", - "bin/clang++", - "bin/cmp", - "bin/cobc", - "bin/column", - "bin/comm", - "bin/composer", - "bin/core_perl/zipdetails", - "bin/cowsay", - "bin/cowthink", - "bin/cp", - "bin/cpan", - "bin/cpio", - "bin/cpulimit", - "bin/crash", - "bin/crontab", - "bin/csh", - "bin/csplit", - "bin/csvtool", - "bin/cupsfilter", - "bin/curl", - "bin/cut", - "bin/dash", - "bin/date", - "bin/dd", - "bin/dev/fd/", - "bin/dev/null", - "bin/dev/stderr", - "bin/dev/stdin", - "bin/dev/stdout", - "bin/dev/tcp/", - "bin/dev/udp/", - "bin/dev/zero", - "bin/dialog", - "bin/diff", - "bin/dig", - "bin/dmesg", - "bin/dmidecode", - "bin/dmsetup", - "bin/dnf", - "bin/docker", - "bin/dosbox", - "bin/dpkg", - "bin/du", - "bin/dvips", - "bin/easy_install", - "bin/eb", - "bin/echo", - "bin/ed", - "bin/efax", - "bin/emacs", - "bin/env", - "bin/eqn", - "bin/es", - "bin/esh", - "bin/etc/group", - "bin/etc/master.passwd", - "bin/etc/passwd", - "bin/etc/pwd.db", - "bin/etc/shadow", - "bin/etc/shells", - "bin/etc/spwd.db", - "bin/ex", - "bin/exiftool", - "bin/expand", - "bin/expect", - "bin/expr", - "bin/facter", - "bin/fetch", - "bin/file", - "bin/find", - "bin/finger", - "bin/fish", - "bin/flock", - "bin/fmt", - "bin/fold", - "bin/fping", - "bin/ftp", - "bin/gawk", - "bin/gcc", - "bin/gcore", - "bin/gdb", - "bin/gem", - "bin/genie", - "bin/genisoimage", - "bin/ghc", - "bin/ghci", - "bin/gimp", - "bin/ginsh", - "bin/git", - "bin/grc", - "bin/grep", - "bin/gtester", - "bin/gunzip", - "bin/gzexe", - "bin/gzip", - "bin/hd", - "bin/head", - "bin/hexdump", - "bin/highlight", - "bin/hping3", - "bin/iconv", - "bin/id", - "bin/iftop", - "bin/install", - "bin/ionice", - "bin/ip", - "bin/irb", - "bin/ispell", - "bin/jjs", - "bin/join", - "bin/journalctl", - "bin/jq", - "bin/jrunscript", - "bin/knife", - "bin/ksh", - "bin/ksshell", - "bin/latex", - "bin/ld", - "bin/ldconfig", - "bin/less", - "bin/lftp", - "bin/ln", - "bin/loginctl", - "bin/logsave", - "bin/look", - "bin/lp", - "bin/ls", - "bin/ltrace", - "bin/lua", - "bin/lualatex", - "bin/luatex", - "bin/lwp-download", - "bin/lwp-request", - "bin/lz", - "bin/lz4", - "bin/lz4c", - "bin/lz4cat", - "bin/lzcat", - "bin/lzcmp", - "bin/lzdiff", - "bin/lzegrep", - "bin/lzfgrep", - "bin/lzgrep", - "bin/lzless", - "bin/lzma", - "bin/lzmadec", - "bin/lzmainfo", - "bin/lzmore", - "bin/mail", - "bin/make", - "bin/man", - "bin/mawk", - "bin/mkfifo", - "bin/mknod", - "bin/more", - "bin/mosquitto", - "bin/mount", - "bin/msgattrib", - "bin/msgcat", - "bin/msgconv", - "bin/msgfilter", - "bin/msgmerge", - "bin/msguniq", - "bin/mtr", - "bin/mv", - "bin/mysql", - "bin/nano", - "bin/nasm", - "bin/nawk", - "bin/nc", - "bin/ncat", - "bin/neofetch", - "bin/nice", - "bin/nl", - "bin/nm", - "bin/nmap", - "bin/node", - "bin/nohup", - "bin/npm", - "bin/nroff", - "bin/nsenter", - "bin/octave", - "bin/od", - "bin/openssl", - "bin/openvpn", - "bin/openvt", - "bin/opkg", - "bin/paste", - "bin/pax", - "bin/pdb", - "bin/pdflatex", - "bin/pdftex", - "bin/pdksh", - "bin/perf", - "bin/perl", - "bin/pg", - "bin/php", - "bin/php-cgi", - "bin/php5", - "bin/php7", - "bin/pic", - "bin/pico", - "bin/pidstat", - "bin/pigz", - "bin/pip", - "bin/pkexec", - "bin/pkg", - "bin/pr", - "bin/printf", - "bin/proc/self/", - "bin/pry", - "bin/ps", - "bin/psed", - "bin/psftp", - "bin/psql", - "bin/ptx", - "bin/puppet", - "bin/pxz", - "bin/python", - "bin/python2", - "bin/python3", - "bin/rake", - "bin/rbash", - "bin/rc", - "bin/readelf", - "bin/red", - "bin/redcarpet", - "bin/restic", - "bin/rev", - "bin/rlogin", - "bin/rlwrap", - "bin/rpm", - "bin/rpmquery", - "bin/rsync", - "bin/ruby", - "bin/run-mailcap", - "bin/run-parts", - "bin/rview", - "bin/rvim", - "bin/sash", - "bin/sbin/capsh", - "bin/sbin/logsave", - "bin/sbin/service", - "bin/sbin/start-stop-daemon", - "bin/scp", - "bin/screen", - "bin/script", - "bin/sed", - "bin/service", - "bin/setarch", - "bin/sftp", - "bin/sg", - "bin/sh", - "bin/shuf", - "bin/sleep", - "bin/slsh", - "bin/smbclient", - "bin/snap", - "bin/socat", - "bin/soelim", - "bin/sort", - "bin/split", - "bin/sqlite3", - "bin/ss", - "bin/ssh", - "bin/ssh-keygen", - "bin/ssh-keyscan", - "bin/sshpass", - "bin/start-stop-daemon", - "bin/stdbuf", - "bin/strace", - "bin/strings", - "bin/su", - "bin/sysctl", - "bin/systemctl", - "bin/systemd-resolve", - "bin/tac", - "bin/tail", - "bin/tar", - "bin/task", - "bin/taskset", - "bin/tbl", - "bin/tclsh", - "bin/tcpdump", - "bin/tcsh", - "bin/tee", - "bin/telnet", - "bin/tex", - "bin/tftp", - "bin/tic", - "bin/time", - "bin/timedatectl", - "bin/timeout", - "bin/tmux", - "bin/top", - "bin/troff", - "bin/tshark", - "bin/ul", - "bin/uname", - "bin/uncompress", - "bin/unexpand", - "bin/uniq", - "bin/unlz4", - "bin/unlzma", - "bin/unpigz", - "bin/unrar", - "bin/unshare", - "bin/unxz", - "bin/unzip", - "bin/unzstd", - "bin/update-alternatives", - "bin/uudecode", - "bin/uuencode", - "bin/valgrind", - "bin/vi", - "bin/view", - "bin/vigr", - "bin/vim", - "bin/vimdiff", - "bin/vipw", - "bin/virsh", - "bin/volatility", - "bin/wall", - "bin/watch", - "bin/wc", - "bin/wget", - "bin/whiptail", - "bin/who", - "bin/whoami", - "bin/whois", - "bin/wireshark", - "bin/wish", - "bin/xargs", - "bin/xelatex", - "bin/xetex", - "bin/xmodmap", - "bin/xmore", - "bin/xpad", - "bin/xxd", - "bin/xz", - "bin/xzcat", - "bin/xzcmp", - "bin/xzdec", - "bin/xzdiff", - "bin/xzegrep", - "bin/xzfgrep", - "bin/xzgrep", - "bin/xzless", - "bin/xzmore", - "bin/yarn", - "bin/yelp", - "bin/yes", - "bin/yum", - "bin/zathura", - "bin/zip", - "bin/zipcloak", - "bin/zipcmp", - "bin/zipdetails", - "bin/zipgrep", - "bin/zipinfo", - "bin/zipmerge", - "bin/zipnote", - "bin/zipsplit", - "bin/ziptool", - "bin/zsh", - "bin/zsoelim", - "bin/zstd", - "bin/zstdcat", - "bin/zstdgrep", - "bin/zstdless", - "bin/zstdmt", - "bin/zypper" - ] - }, - "operator": "phrase_match" - } - ], - "transformers": [ - "lowercase", - "cmdLine" - ] - }, - { - "id": "crs-932-171", - "name": "Remote Command Execution: Shellshock (CVE-2014-6271)", - "tags": { - "type": "command_injection", - "crs_id": "932171", - "category": "attack_attempt", - "cwe": "77", - "capec": "1000/152/248/88", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "^\\(\\s*\\)\\s+{", - "options": { - "case_sensitive": true, - "min_length": 4 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-932-180", - "name": "Restricted File Upload Attempt", - "tags": { - "type": "command_injection", - "crs_id": "932180", - "category": "attack_attempt", - "cwe": "706", - "capec": "1000/225/122/17/177", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "x-filename" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "x_filename" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "x-file-name" - ] - } - ], - "list": [ - ".htaccess", - ".htdigest", - ".htpasswd", - "wp-config.php", - "config.yml", - "config_dev.yml", - "config_prod.yml", - "config_test.yml", - "parameters.yml", - "routing.yml", - "security.yml", - "services.yml", - "default.settings.php", - "settings.php", - "settings.local.php", - "local.xml", - ".env" - ], - "options": { - "enforce_word_boundary": true - } - }, - "operator": "phrase_match" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-933-111", - "name": "PHP Injection Attack: PHP Script File Upload Found", - "tags": { - "type": "unrestricted_file_upload", - "crs_id": "933111", - "category": "attack_attempt", - "cwe": "434", - "capec": "1000/225/122/17/650", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "x-filename" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "x_filename" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "x.filename" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "x-file-name" - ] - } - ], - "regex": ".*\\.(?:php\\d*|phtml)\\..*$", - "options": { - "case_sensitive": true, - "min_length": 5 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-933-130", - "name": "PHP Injection Attack: Global Variables Found", - "tags": { - "type": "php_code_injection", - "crs_id": "933130", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/225/122/17/650", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "options": { - "enforce_word_boundary": true - }, - "list": [ - "$globals", - "$_cookie", - "$_env", - "$_files", - "$_get", - "$_post", - "$_request", - "$_server", - "$_session", - "$argc", - "$argv", - "$http_\\u200bresponse_\\u200bheader", - "$php_\\u200berrormsg", - "$http_cookie_vars", - "$http_env_vars", - "$http_get_vars", - "$http_post_files", - "$http_post_vars", - "$http_raw_post_data", - "$http_request_vars", - "$http_server_vars" - ] - }, - "operator": "phrase_match" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-933-131", - "name": "PHP Injection Attack: HTTP Headers Values Found", - "tags": { - "type": "php_code_injection", - "crs_id": "933131", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/225/122/17/650" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:HTTP_(?:ACCEPT(?:_(?:ENCODING|LANGUAGE|CHARSET))?|(?:X_FORWARDED_FO|REFERE)R|(?:USER_AGEN|HOS)T|CONNECTION|KEEP_ALIVE)|PATH_(?:TRANSLATED|INFO)|ORIG_PATH_INFO|QUERY_STRING|REQUEST_URI|AUTH_TYPE)", - "options": { - "case_sensitive": true, - "min_length": 9 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-933-140", - "name": "PHP Injection Attack: I/O Stream Found", - "tags": { - "type": "php_code_injection", - "crs_id": "933140", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/225/122/17/650", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "php://(?:std(?:in|out|err)|(?:in|out)put|fd|memory|temp|filter)", - "options": { - "min_length": 8 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-933-150", - "name": "PHP Injection Attack: High-Risk PHP Function Name Found", - "tags": { - "type": "php_code_injection", - "crs_id": "933150", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/225/122/17/650", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "list": [ - "__halt_compiler", - "apache_child_terminate", - "base64_decode", - "bzdecompress", - "call_user_func", - "call_user_func_array", - "call_user_method", - "call_user_method_array", - "convert_uudecode", - "file_get_contents", - "file_put_contents", - "fsockopen", - "get_class_methods", - "get_class_vars", - "get_defined_constants", - "get_defined_functions", - "get_defined_vars", - "gzdecode", - "gzinflate", - "gzuncompress", - "include_once", - "invokeargs", - "pcntl_exec", - "pcntl_fork", - "pfsockopen", - "posix_getcwd", - "posix_getpwuid", - "posix_getuid", - "posix_uname", - "reflectionfunction", - "require_once", - "shell_exec", - "str_rot13", - "sys_get_temp_dir", - "wp_remote_fopen", - "wp_remote_get", - "wp_remote_head", - "wp_remote_post", - "wp_remote_request", - "wp_safe_remote_get", - "wp_safe_remote_head", - "wp_safe_remote_post", - "wp_safe_remote_request", - "zlib_decode" - ], - "options": { - "enforce_word_boundary": true - } - }, - "operator": "phrase_match" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-933-160", - "name": "PHP Injection Attack: High-Risk PHP Function Call Found", - "tags": { - "type": "php_code_injection", - "crs_id": "933160", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/225/122/17/650" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b(?:s(?:e(?:t(?:_(?:e(?:xception|rror)_handler|magic_quotes_runtime|include_path)|defaultstub)|ssion_s(?:et_save_handler|tart))|qlite_(?:(?:(?:unbuffered|single|array)_)?query|create_(?:aggregate|function)|p?open|exec)|tr(?:eam_(?:context_create|socket_client)|ipc?slashes|rev)|implexml_load_(?:string|file)|ocket_c(?:onnect|reate)|h(?:ow_sourc|a1_fil)e|pl_autoload_register|ystem)|p(?:r(?:eg_(?:replace(?:_callback(?:_array)?)?|match(?:_all)?|split)|oc_(?:(?:terminat|clos|nic)e|get_status|open)|int_r)|o(?:six_(?:get(?:(?:e[gu]|g)id|login|pwnam)|mk(?:fifo|nod)|ttyname|kill)|pen)|hp(?:_(?:strip_whitespac|unam)e|version|info)|g_(?:(?:execut|prepar)e|connect|query)|a(?:rse_(?:ini_file|str)|ssthru)|utenv)|r(?:unkit_(?:function_(?:re(?:defin|nam)e|copy|add)|method_(?:re(?:defin|nam)e|copy|add)|constant_(?:redefine|add))|e(?:(?:gister_(?:shutdown|tick)|name)_function|ad(?:(?:gz)?file|_exif_data|dir))|awurl(?:de|en)code)|i(?:mage(?:createfrom(?:(?:jpe|pn)g|x[bp]m|wbmp|gif)|(?:jpe|pn)g|g(?:d2?|if)|2?wbmp|xbm)|s_(?:(?:(?:execut|write?|read)ab|fi)le|dir)|ni_(?:get(?:_all)?|set)|terator_apply|ptcembed)|g(?:et(?:_(?:c(?:urrent_use|fg_va)r|meta_tags)|my(?:[gpu]id|inode)|(?:lastmo|cw)d|imagesize|env)|z(?:(?:(?:defla|wri)t|encod|fil)e|compress|open|read)|lob)|a(?:rray_(?:u(?:intersect(?:_u?assoc)?|diff(?:_u?assoc)?)|intersect_u(?:assoc|key)|diff_u(?:assoc|key)|filter|reduce|map)|ssert(?:_options)?|tob)|h(?:tml(?:specialchars(?:_decode)?|_entity_decode|entities)|(?:ash(?:_(?:update|hmac))?|ighlight)_file|e(?:ader_register_callback|x2bin))|f(?:i(?:le(?:(?:[acm]tim|inod)e|(?:_exist|perm)s|group)?|nfo_open)|tp_(?:nb_(?:ge|pu)|connec|ge|pu)t|(?:unction_exis|pu)ts|write|open)|o(?:b_(?:get_(?:c(?:ontents|lean)|flush)|end_(?:clean|flush)|clean|flush|start)|dbc_(?:result(?:_all)?|exec(?:ute)?|connect)|pendir)|m(?:b_(?:ereg(?:_(?:replace(?:_callback)?|match)|i(?:_replace)?)?|parse_str)|(?:ove_uploaded|d5)_file|ethod_exists|ysql_query|kdir)|e(?:x(?:if_(?:t(?:humbnail|agname)|imagetype|read_data)|ec)|scapeshell(?:arg|cmd)|rror_reporting|val)|c(?:url_(?:file_create|exec|init)|onvert_uuencode|reate_function|hr)|u(?:n(?:serialize|pack)|rl(?:de|en)code|[ak]?sort)|b(?:(?:son_(?:de|en)|ase64_en)code|zopen|toa)|(?:json_(?:de|en)cod|debug_backtrac|tmpfil)e|var_dump)(?:\\s|/\\*.*\\*/|//.*|#.*|\\\"|')*\\((?:(?:\\s|/\\*.*\\*/|//.*|#.*)*(?:\\$\\w+|[A-Z\\d]\\w*|\\w+\\(.*\\)|\\\\?\"(?:[^\"]|\\\\\"|\"\"|\"\\+\")*\\\\?\"|\\\\?'(?:[^']|''|'\\+')*\\\\?')(?:\\s|/\\*.*\\*/|//.*|#.*)*(?:(?:::|\\.|->)(?:\\s|/\\*.*\\*/|//.*|#.*)*\\w+(?:\\(.*\\))?)?,)*(?:(?:\\s|/\\*.*\\*/|//.*|#.*)*(?:\\$\\w+|[A-Z\\d]\\w*|\\w+\\(.*\\)|\\\\?\"(?:[^\"]|\\\\\"|\"\"|\"\\+\")*\\\\?\"|\\\\?'(?:[^']|''|'\\+')*\\\\?')(?:\\s|/\\*.*\\*/|//.*|#.*)*(?:(?:::|\\.|->)(?:\\s|/\\*.*\\*/|//.*|#.*)*\\w+(?:\\(.*\\))?)?)?\\)", - "options": { - "case_sensitive": true, - "min_length": 5 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-933-170", - "name": "PHP Injection Attack: Serialized Object Injection", - "tags": { - "type": "php_code_injection", - "crs_id": "933170", - "category": "attack_attempt", - "cwe": "502", - "capec": "1000/152/586", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "[oOcC]:\\d+:\\\".+?\\\":\\d+:{[\\W\\w]*}", - "options": { - "case_sensitive": true, - "min_length": 12 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-933-200", - "name": "PHP Injection Attack: Wrapper scheme detected", - "tags": { - "type": "php_code_injection", - "crs_id": "933200", - "category": "attack_attempt", - "cwe": "502", - "capec": "1000/152/586" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:(?:bzip|ssh)2|z(?:lib|ip)|(?:ph|r)ar|expect|glob|ogg)://", - "options": { - "case_sensitive": true, - "min_length": 6 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls" - ] - }, - { - "id": "crs-934-100", - "name": "Node.js Injection Attack 1/2", - "tags": { - "type": "js_code_injection", - "crs_id": "934100", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b(?:(?:l(?:(?:utimes|chmod)(?:Sync)?|(?:stat|ink)Sync)|w(?:rite(?:(?:File|v)(?:Sync)?|Sync)|atchFile)|u(?:n(?:watchFile|linkSync)|times(?:Sync)?)|s(?:(?:ymlink|tat)Sync|pawn(?:File|Sync))|ex(?:ec(?:File(?:Sync)?|Sync)|istsSync)|a(?:ppendFile|ccess)(?:Sync)?|(?:Caveat|Inode)s|open(?:dir)?Sync|new\\s+Function|Availability|\\beval)\\s*\\(|m(?:ain(?:Module\\s*(?:\\W*\\s*(?:constructor|require)|\\[)|\\s*(?:\\W*\\s*(?:constructor|require)|\\[))|kd(?:temp(?:Sync)?|irSync)\\s*\\(|odule\\.exports\\s*=)|c(?:(?:(?:h(?:mod|own)|lose)Sync|reate(?:Write|Read)Stream|p(?:Sync)?)\\s*\\(|o(?:nstructor\\s*(?:\\W*\\s*_load|\\[)|pyFile(?:Sync)?\\s*\\())|f(?:(?:(?:s(?:(?:yncS)?|tatS)|datas(?:yncS)?)ync|ch(?:mod|own)(?:Sync)?)\\s*\\(|u(?:nction\\s*\\(\\s*\\)\\s*{|times(?:Sync)?\\s*\\())|r(?:e(?:(?:ad(?:(?:File|link|dir)?Sync|v(?:Sync)?)|nameSync)\\s*\\(|quire\\s*(?:\\W*\\s*main|\\[))|m(?:Sync)?\\s*\\()|process\\s*(?:\\W*\\s*(?:mainModule|binding)|\\[)|t(?:his\\.constructor|runcateSync\\s*\\()|_(?:\\$\\$ND_FUNC\\$\\$_|_js_function)|global\\s*(?:\\W*\\s*process|\\[)|String\\s*\\.\\s*fromCharCode|binding\\s*\\[)", - "options": { - "case_sensitive": true, - "min_length": 3 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-934-101", - "name": "Node.js Injection Attack 2/2", - "tags": { - "type": "js_code_injection", - "crs_id": "934101", - "category": "attack_attempt", - "confidence": "1", - "cwe": "94", - "capec": "1000/152/242" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b(?:w(?:atch|rite)|(?:spaw|ope)n|exists|close|fork|read)\\s*\\(", - "options": { - "case_sensitive": true, - "min_length": 5 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-941-110", - "name": "XSS Filter - Category 1: Script Tag Vector", - "tags": { - "type": "xss", - "crs_id": "941110", - "category": "attack_attempt", - "cwe": "80", - "capec": "1000/152/242/63/591", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "referer" - ] - }, - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "]*>[\\s\\S]*?", - "options": { - "case_sensitive": false, - "min_length": 8 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls", - "urlDecodeUni" - ] - }, - { - "id": "crs-941-120", - "name": "XSS Filter - Category 2: Event Handler Vector", - "tags": { - "type": "xss", - "crs_id": "941120", - "category": "attack_attempt", - "cwe": "83", - "capec": "1000/152/242/63/591/243", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "referer" - ] - }, - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\bon(?:d(?:r(?:ag(?:en(?:ter|d)|leave|start|over)?|op)|urationchange|blclick)|s(?:e(?:ek(?:ing|ed)|arch|lect)|u(?:spend|bmit)|talled|croll|how)|m(?:ouse(?:(?:lea|mo)ve|o(?:ver|ut)|enter|down|up)|essage)|p(?:a(?:ge(?:hide|show)|(?:st|us)e)|lay(?:ing)?|rogress|aste|ointer(?:cancel|down|enter|leave|move|out|over|rawupdate|up))|c(?:anplay(?:through)?|o(?:ntextmenu|py)|hange|lick|ut)|a(?:nimation(?:iteration|start|end)|(?:fterprin|bor)t|uxclick|fterscriptexecute)|t(?:o(?:uch(?:cancel|start|move|end)|ggle)|imeupdate)|f(?:ullscreen(?:change|error)|ocus(?:out|in)?|inish)|(?:(?:volume|hash)chang|o(?:ff|n)lin)e|b(?:efore(?:unload|print)|lur)|load(?:ed(?:meta)?data|start|end)?|r(?:es(?:ize|et)|atechange)|key(?:press|down|up)|w(?:aiting|heel)|in(?:valid|put)|e(?:nded|rror)|unload)[\\s\\x0B\\x09\\x0C\\x3B\\x2C\\x28\\x3B]*?=[^=]", - "options": { - "min_length": 8 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls", - "urlDecodeUni" - ] - }, - { - "id": "crs-941-140", - "name": "XSS Filter - Category 4: Javascript URI Vector", - "tags": { - "type": "xss", - "crs_id": "941140", - "category": "attack_attempt", - "cwe": "84", - "capec": "1000/152/242/63/591/244", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "referer" - ] - }, - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "[a-z]+=(?:[^:=]+:.+;)*?[^:=]+:url\\(javascript", - "options": { - "min_length": 18 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls", - "urlDecodeUni" - ] - }, - { - "id": "crs-941-170", - "name": "NoScript XSS InjectionChecker: Attribute Injection", - "tags": { - "type": "xss", - "crs_id": "941170", - "category": "attack_attempt", - "cwe": "83", - "capec": "1000/152/242/63/591/243", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "referer" - ] - }, - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:\\W|^)(?:javascript:(?:[\\s\\S]+[=\\x5c\\(\\[\\.<]|[\\s\\S]*?(?:\\bname\\b|\\x5c[ux]\\d)))|@\\W*?i\\W*?m\\W*?p\\W*?o\\W*?r\\W*?t\\W*?(?:/\\*[\\s\\S]*?)?(?:[\\\"']|\\W*?u\\W*?r\\W*?l[\\s\\S]*?\\()|[^-]*?-\\W*?m\\W*?o\\W*?z\\W*?-\\W*?b\\W*?i\\W*?n\\W*?d\\W*?i\\W*?n\\W*?g[^:]*?:\\W*?u\\W*?r\\W*?l[\\s\\S]*?\\(", - "options": { - "min_length": 6 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls", - "urlDecodeUni" - ] - }, - { - "id": "crs-941-180", - "name": "Node-Validator Deny List Keywords", - "tags": { - "type": "xss", - "crs_id": "941180", - "category": "attack_attempt", - "cwe": "79", - "capec": "1000/152/242/63/591" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "options": { - "enforce_word_boundary": true - }, - "list": [ - "document.cookie", - "document.write", - ".parentnode", - ".innerhtml", - "window.location", - "-moz-binding" - ] - }, - "operator": "phrase_match" - } - ], - "transformers": [ - "removeNulls", - "lowercase" - ] - }, - { - "id": "crs-941-200", - "name": "IE XSS Filters - Attack Detected via vmlframe tag", - "tags": { - "type": "xss", - "crs_id": "941200", - "category": "attack_attempt", - "cwe": "80", - "capec": "1000/152/242/63/591", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i:<.*[:]?vmlframe.*?[\\s/+]*?src[\\s/+]*=)", - "options": { - "case_sensitive": true, - "min_length": 13 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls" - ] - }, - { - "id": "crs-941-210", - "name": "IE XSS Filters - Obfuscated Attack Detected via javascript injection", - "tags": { - "type": "xss", - "crs_id": "941210", - "category": "attack_attempt", - "cwe": "80", - "capec": "1000/152/242/63/591", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i:(?:j|&#x?0*(?:74|4A|106|6A);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:a|&#x?0*(?:65|41|97|61);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:v|&#x?0*(?:86|56|118|76);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:a|&#x?0*(?:65|41|97|61);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:s|&#x?0*(?:83|53|115|73);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:c|&#x?0*(?:67|43|99|63);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:r|&#x?0*(?:82|52|114|72);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:i|&#x?0*(?:73|49|105|69);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:p|&#x?0*(?:80|50|112|70);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:t|&#x?0*(?:84|54|116|74);?)(?:\\t|\\n|\\r|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?::|&(?:#x?0*(?:58|3A);?|colon;)).)", - "options": { - "case_sensitive": true, - "min_length": 12 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls" - ] - }, - { - "id": "crs-941-220", - "name": "IE XSS Filters - Obfuscated Attack Detected via vbscript injection", - "tags": { - "type": "xss", - "crs_id": "941220", - "category": "attack_attempt", - "cwe": "80", - "capec": "1000/152/242/63/591", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i:(?:v|&#x?0*(?:86|56|118|76);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:b|&#x?0*(?:66|42|98|62);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:s|&#x?0*(?:83|53|115|73);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:c|&#x?0*(?:67|43|99|63);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:r|&#x?0*(?:82|52|114|72);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:i|&#x?0*(?:73|49|105|69);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:p|&#x?0*(?:80|50|112|70);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?:t|&#x?0*(?:84|54|116|74);?)(?:\\t|&(?:#x?0*(?:9|13|10|A|D);?|tab;|newline;))*(?::|&(?:#x?0*(?:58|3A);?|colon;)).)", - "options": { - "case_sensitive": true, - "min_length": 10 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls" - ] - }, - { - "id": "crs-941-230", - "name": "IE XSS Filters - Attack Detected via embed tag", - "tags": { - "type": "xss", - "crs_id": "941230", - "category": "attack_attempt", - "cwe": "83", - "capec": "1000/152/242/63/591/243", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "]", - "options": { - "min_length": 8 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls" - ] - }, - { - "id": "crs-941-300", - "name": "IE XSS Filters - Attack Detected via object tag", - "tags": { - "type": "xss", - "crs_id": "941300", - "category": "attack_attempt", - "cwe": "83", - "capec": "1000/152/242/63/591/243", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": ")|<.*\\+AD4-", - "options": { - "case_sensitive": true, - "min_length": 6 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-941-360", - "name": "JSFuck / Hieroglyphy obfuscation detected", - "tags": { - "type": "xss", - "crs_id": "941360", - "category": "attack_attempt", - "cwe": "87", - "capec": "1000/152/242/63/591/199" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "![!+ ]\\[\\]", - "options": { - "case_sensitive": true, - "min_length": 4 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-941-390", - "name": "Javascript method detected", - "tags": { - "type": "xss", - "crs_id": "941390", - "category": "attack_attempt", - "confidence": "1", - "cwe": "79", - "capec": "1000/152/242/63/591" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b(?i:eval|settimeout|setinterval|new\\s+Function|alert|prompt)[\\s+]*\\([^\\)]", - "options": { - "case_sensitive": true, - "min_length": 5 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-942-100", - "name": "SQL Injection Attack Detected via libinjection", - "tags": { - "type": "sql_injection", - "crs_id": "942100", - "category": "attack_attempt", - "cwe": "89", - "capec": "1000/152/248/66" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ] - }, - "operator": "is_sqli" - } - ], - "transformers": [ - "removeNulls" - ] - }, - { - "id": "crs-942-160", - "name": "Detects blind sqli tests using sleep() or benchmark()", - "tags": { - "type": "sql_injection", - "crs_id": "942160", - "category": "attack_attempt", - "cwe": "89", - "capec": "1000/152/248/66/7", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i:sleep\\(\\s*?\\d*?\\s*?\\)|benchmark\\(.*?\\,.*?\\))", - "options": { - "case_sensitive": true, - "min_length": 7 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-942-240", - "name": "Detects MySQL charset switch and MSSQL DoS attempts", - "tags": { - "type": "sql_injection", - "crs_id": "942240", - "category": "attack_attempt", - "cwe": "89", - "capec": "1000/152/248/66/7", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:[\\\"'`](?:;*?\\s*?waitfor\\s+(?:delay|time)\\s+[\\\"'`]|;.*?:\\s*?goto)|alter\\s*?\\w+.*?cha(?:racte)?r\\s+set\\s+\\w+)", - "options": { - "min_length": 7 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-942-250", - "name": "Detects MATCH AGAINST, MERGE and EXECUTE IMMEDIATE injections", - "tags": { - "type": "sql_injection", - "crs_id": "942250", - "category": "attack_attempt", - "cwe": "89", - "capec": "1000/152/248/66" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i:merge.*?using\\s*?\\(|execute\\s*?immediate\\s*?[\\\"'`]|match\\s*?[\\w(?:),+-]+\\s*?against\\s*?\\()", - "options": { - "case_sensitive": true, - "min_length": 11 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-942-270", - "name": "Basic SQL injection", - "tags": { - "type": "sql_injection", - "crs_id": "942270", - "category": "attack_attempt", - "cwe": "89", - "capec": "1000/152/248/66" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "union.*?select.*?from", - "options": { - "min_length": 15 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-942-280", - "name": "SQL Injection with delay functions", - "tags": { - "type": "sql_injection", - "crs_id": "942280", - "category": "attack_attempt", - "cwe": "89", - "capec": "1000/152/248/66/7", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:;\\s*?shutdown\\s*?(?:[#;{]|\\/\\*|--)|waitfor\\s*?delay\\s?[\\\"'`]+\\s?\\d|select\\s*?pg_sleep)", - "options": { - "min_length": 10 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-942-290", - "name": "Finds basic MongoDB SQL injection attempts", - "tags": { - "type": "nosql_injection", - "crs_id": "942290", - "category": "attack_attempt", - "cwe": "943", - "capec": "1000/152/248/676" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i:(?:\\[?\\$(?:(?:s(?:lic|iz)|wher)e|e(?:lemMatch|xists|q)|n(?:o[rt]|in?|e)|l(?:ike|te?)|t(?:ext|ype)|a(?:ll|nd)|jsonSchema|between|regex|x?or|div|mod)\\]?)\\b)", - "options": { - "case_sensitive": true, - "min_length": 3 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "keys_only" - ] - }, - { - "id": "crs-942-360", - "name": "Detects concatenated basic SQL injection and SQLLFI attempts", - "tags": { - "type": "sql_injection", - "crs_id": "942360", - "category": "attack_attempt", - "cwe": "89", - "capec": "1000/152/248/66/470" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:^[\\W\\d]+\\s*?(?:alter\\s*(?:a(?:(?:pplication\\s*rol|ggregat)e|s(?:ymmetric\\s*ke|sembl)y|u(?:thorization|dit)|vailability\\s*group)|c(?:r(?:yptographic\\s*provider|edential)|o(?:l(?:latio|um)|nversio)n|ertificate|luster)|s(?:e(?:rv(?:ice|er)|curity|quence|ssion|arch)|y(?:mmetric\\s*key|nonym)|togroup|chema)|m(?:a(?:s(?:ter\\s*key|k)|terialized)|e(?:ssage\\s*type|thod)|odule)|l(?:o(?:g(?:file\\s*group|in)|ckdown)|a(?:ngua|r)ge|ibrary)|t(?:(?:abl(?:espac)?|yp)e|r(?:igger|usted)|hreshold|ext)|p(?:a(?:rtition|ckage)|ro(?:cedur|fil)e|ermission)|d(?:i(?:mension|skgroup)|atabase|efault|omain)|r(?:o(?:l(?:lback|e)|ute)|e(?:sourc|mot)e)|f(?:u(?:lltext|nction)|lashback|oreign)|e(?:xte(?:nsion|rnal)|(?:ndpoi|ve)nt)|in(?:dex(?:type)?|memory|stance)|b(?:roker\\s*priority|ufferpool)|x(?:ml\\s*schema|srobject)|w(?:ork(?:load)?|rapper)|hi(?:erarchy|stogram)|o(?:perator|utline)|(?:nicknam|queu)e|us(?:age|er)|group|java|view)|union\\s*(?:(?:distin|sele)ct|all))\\b|\\b(?:(?:(?:trunc|cre|upd)at|renam)e|(?:inser|selec)t|de(?:lete|sc)|alter|load)\\s+(?:group_concat|load_file|char)\\b\\s*\\(?|[\\s(]load_file\\s*?\\(|[\\\"'`]\\s+regexp\\W)", - "options": { - "min_length": 5 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-942-500", - "name": "MySQL in-line comment detected", - "tags": { - "type": "sql_injection", - "crs_id": "942500", - "category": "attack_attempt", - "cwe": "89", - "capec": "1000/152/248/66" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i:/\\*[!+](?:[\\w\\s=_\\-(?:)]+)?\\*/)", - "options": { - "case_sensitive": true, - "min_length": 5 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-943-100", - "name": "Possible Session Fixation Attack: Setting Cookie Values in HTML", - "tags": { - "type": "http_protocol_violation", - "crs_id": "943100", - "category": "attack_attempt", - "cwe": "384", - "capec": "1000/225/21/593/61", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i:\\.cookie\\b.*?;\\W*?(?:expires|domain)\\W*?=|\\bhttp-equiv\\W+set-cookie\\b)", - "options": { - "case_sensitive": true, - "min_length": 15 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-944-100", - "name": "Remote Command Execution: Suspicious Java class detected", - "tags": { - "type": "java_code_injection", - "crs_id": "944100", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "java\\.lang\\.(?:runtime|processbuilder)", - "options": { - "case_sensitive": true, - "min_length": 17 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-944-110", - "name": "Remote Command Execution: Java process spawn (CVE-2017-9805)", - "tags": { - "type": "java_code_injection", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:unmarshaller|base64data|java\\.).*(?:runtime|processbuilder)", - "options": { - "case_sensitive": false, - "min_length": 13 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "crs-944-130", - "name": "Suspicious Java class detected", - "tags": { - "type": "java_code_injection", - "crs_id": "944130", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "list": [ - "com.opensymphony.xwork2", - "com.sun.org.apache", - "java.io.bufferedinputstream", - "java.io.bufferedreader", - "java.io.bytearrayinputstream", - "java.io.bytearrayoutputstream", - "java.io.chararrayreader", - "java.io.datainputstream", - "java.io.file", - "java.io.fileoutputstream", - "java.io.filepermission", - "java.io.filewriter", - "java.io.filterinputstream", - "java.io.filteroutputstream", - "java.io.filterreader", - "java.io.inputstream", - "java.io.inputstreamreader", - "java.io.linenumberreader", - "java.io.objectoutputstream", - "java.io.outputstream", - "java.io.pipedoutputstream", - "java.io.pipedreader", - "java.io.printstream", - "java.io.pushbackinputstream", - "java.io.reader", - "java.io.stringreader", - "java.lang.class", - "java.lang.integer", - "java.lang.number", - "java.lang.object", - "java.lang.process", - "java.lang.reflect", - "java.lang.runtime", - "java.lang.string", - "java.lang.stringbuilder", - "java.lang.system", - "javax.script.scriptenginemanager", - "org.apache.commons", - "org.apache.struts", - "org.apache.struts2", - "org.omg.corba", - "java.beans.xmldecode" - ], - "options": { - "enforce_word_boundary": true - } - }, - "operator": "phrase_match" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "crs-944-260", - "name": "Remote Command Execution: Malicious class-loading payload", - "tags": { - "type": "java_code_injection", - "crs_id": "944260", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:class\\.module\\.classLoader\\.resources\\.context\\.parent\\.pipeline|springframework\\.context\\.support\\.FileSystemXmlApplicationContext)", - "options": { - "case_sensitive": true, - "min_length": 58 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-000-001", - "name": "Look for Cassandra injections", - "tags": { - "type": "nosql_injection", - "category": "attack_attempt", - "cwe": "943", - "capec": "1000/152/248/676" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - }, - { - "address": "server.request.headers.no_cookies" - } - ], - "regex": "\\ballow\\s+filtering\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeComments" - ] - }, - { - "id": "dog-000-002", - "name": "OGNL - Look for formatting injection patterns", - "tags": { - "type": "java_code_injection", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - }, - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - }, - { - "address": "server.request.headers.no_cookies" - } - ], - "regex": "[#%$]{(?:[^}]+[^\\w\\s}\\-_][^}]+|\\d+-\\d+)}", - "options": { - "case_sensitive": true - } - } - } - ], - "transformers": [] - }, - { - "id": "dog-000-003", - "name": "OGNL - Detect OGNL exploitation primitives", - "tags": { - "type": "java_code_injection", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "[@#]ognl", - "options": { - "case_sensitive": true - } - } - } - ], - "transformers": [] - }, - { - "id": "dog-000-004", - "name": "Spring4Shell - Attempts to exploit the Spring4shell vulnerability", - "tags": { - "type": "exploit_detection", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.body" - } - ], - "regex": "^class\\.module\\.classLoader\\.", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [ - "keys_only" - ] - }, - { - "id": "dog-000-005", - "name": "Node.js: Prototype pollution through __proto__", - "tags": { - "type": "js_code_injection", - "category": "attack_attempt", - "cwe": "1321", - "capec": "1000/152/242", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - } - ], - "regex": "^__proto__$" - }, - "operator": "match_regex" - } - ], - "transformers": [ - "keys_only" - ] - }, - { - "id": "dog-000-006", - "name": "Node.js: Prototype pollution through constructor.prototype", - "tags": { - "type": "js_code_injection", - "category": "attack_attempt", - "cwe": "1321", - "capec": "1000/152/242", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - } - ], - "regex": "^constructor$" - }, - "operator": "match_regex" - }, - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - } - ], - "regex": "^prototype$" - }, - "operator": "match_regex" - } - ], - "transformers": [ - "keys_only" - ] - }, - { - "id": "dog-000-007", - "name": "Server side template injection: Velocity & Freemarker", - "tags": { - "type": "java_code_injection", - "category": "attack_attempt", - "cwe": "1336", - "capec": "1000/152/242/19", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "#(?:set|foreach|macro|parse|if)\\(.*\\)|<#assign.*>" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-001", - "name": "BurpCollaborator OOB domain", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "tool_name": "BurpCollaborator", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b(?:burpcollaborator\\.net|oastify\\.com)\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-002", - "name": "Qualys OOB domain", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "tool_name": "Qualys", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\bqualysperiscope\\.com\\b|\\.oscomm\\." - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-003", - "name": "Probely OOB domain", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "tool_name": "Probely", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\bprbly\\.win\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-004", - "name": "Known malicious out-of-band interaction domain", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b(?:webhook\\.site|\\.canarytokens\\.com|vii\\.one|act1on3\\.ru|gdsburp\\.com|arcticwolf\\.net|oob\\.li|htbiw\\.com|h4\\.vc|mochan\\.cloud|imshopping\\.com|bootstrapnodejs\\.com|mooo-ng\\.com|securitytrails\\.com|canyouhackit\\.io|7bae\\.xyz)\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-005", - "name": "Known suspicious out-of-band interaction domain", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b(?:\\.ngrok\\.io|requestbin\\.com|requestbin\\.net)\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-006", - "name": "Rapid7 OOB domain", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "tool_name": "Rapid7", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\bappspidered\\.rapid7\\." - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-007", - "name": "Interact.sh OOB domain", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "tool_name": "interact.sh", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b(?:interact\\.sh|oast\\.(?:pro|live|site|online|fun|me)|indusfacefinder\\.in|where\\.land|syhunt\\.net|tssrt\\.de|boardofcyber\\.io|assetnote-callback\\.com|praetorianlabs\\.dev|netspi\\.sh)\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-008", - "name": "Netsparker OOB domain", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "tool_name": "Netsparker", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b(?:\\.|(?:\\\\|&#)(?:0*46|x0*2e);)?r87(?:\\.|(?:\\\\|&#)(?:0*46|x0*2e);)(?:me|com)\\b", - "options": { - "case_sensitive": false, - "min_length": 7 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-009", - "name": "WhiteHat Security OOB domain", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "tool_name": "WhiteHatSecurity", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\bwhsec(?:\\.|(?:\\\\|&#)(?:0*46|x0*2e);)us\\b", - "options": { - "case_sensitive": false, - "min_length": 8 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-010", - "name": "Nessus OOB domain", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "tool_name": "Nessus", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\b\\.nessus\\.org\\b", - "options": { - "case_sensitive": false, - "min_length": 8 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-011", - "name": "Watchtowr OOB domain", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "tool_name": "Watchtowr", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\bwatchtowr\\.com\\b", - "options": { - "case_sensitive": false, - "min_length": 8 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-913-012", - "name": "AppCheck NG OOB domain", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "tool_name": "AppCheckNG", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\bptst\\.io\\b", - "options": { - "case_sensitive": false, - "min_length": 7 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-920-001", - "name": "JWT authentication bypass", - "tags": { - "type": "http_protocol_violation", - "category": "attack_attempt", - "cwe": "287", - "capec": "1000/225/115", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.cookies" - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "authorization" - ] - } - ], - "regex": "^(?:Bearer )?ey[A-Za-z0-9+_\\-/]*([QY][UW]x[Hn]Ij([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gOiAi[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]Ij([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]IDogI[km]5[Pv][Tb][km][U-X]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]IiA6ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]IDoi[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]Ij([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gOiAi[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciOiAi[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*IDogI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]IDogI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yIgO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciIDoi[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*IDogI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yIgOiJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]IDoi[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]IDogI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yI6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yI6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]IiA6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*IDogI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yIgO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]Ij([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciOiJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*IDoi[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gOiJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yIgOiAi[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]IDoi[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]IjogI[km]5[Pv][Tb][km][U-X]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]IiA6I[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6I[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yI6I[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yI6ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciIDogI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[QY][UW]x[Hn]IiA6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]IiA6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*IDoi[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ciO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6I[km]5[Pv][Tb][km][U-X]|[QY][UW]x[Hn]IiA6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yI6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yIgO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gOiJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ID([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gI[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yIgO([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[\\x2b\\x2f-9A-Za-z]ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*ICJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]I([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*IDoi[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]A6I[km]5[Pv][Tb][km][U-X]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]y([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gOiJ[Ou][Tb][02]5[Fl]|[QY][UW]x[Hn]Ijoi[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z]{2}[159BFJNRVZdhlptx][Bh][Tb][EG]ci([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[048AEIMQUYcgkosw]gOiAi[Tb][km]9[Ou][RZ][Q-Za-f]|[\\x2b\\x2f-9A-Za-z][02EGUWkm]F[Ms][RZ]yI6([048ACEIMQSUYcgikoswy]|[\\x2b\\x2f-9A-Za-z]I)*[CSiy]Ai[Tb][km]9[Ou][RZ][Q-Za-f])[A-Za-z0-9+-/]*\\.[A-Za-z0-9+_\\-/]+\\.(?:[A-Za-z0-9+_\\-/]+)?$", - "options": { - "case_sensitive": true - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-931-001", - "name": "RFI: URL Payload to well known RFI target", - "tags": { - "type": "rfi", - "category": "attack_attempt", - "cwe": "98", - "capec": "1000/152/175/253/193", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "^(?i:file|ftps?|https?).*/rfiinc\\.txt\\?+$", - "options": { - "case_sensitive": true, - "min_length": 17 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-932-100", - "name": "Shell spawn executing network command", - "tags": { - "type": "command_injection", - "category": "attack_attempt", - "cwe": "77", - "capec": "1000/152/248/88", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:(?:['\"\\x60({|;&]|(?:^|['\"\\x60({|;&])(?:cmd(?:\\.exe)?\\s+(?:/\\w(?::\\w+)?\\s+)*))(?:ping|curl|wget|telnet)|\\bnslookup)[\\s,]", - "options": { - "case_sensitive": true, - "min_length": 5 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-934-001", - "name": "XXE - XML file loads external entity", - "tags": { - "type": "xxe", - "category": "attack_attempt", - "cwe": "91", - "capec": "1000/152/248/250", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.body" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?:<\\?xml[^>]*>.*)]+SYSTEM\\s+[^>]+>", - "options": { - "case_sensitive": false, - "min_length": 24 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "dog-941-001", - "name": "XSS in source property", - "tags": { - "type": "xss", - "category": "attack_attempt", - "cwe": "83", - "capec": "1000/152/242/63/591/243", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - }, - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "referer" - ] - }, - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "<(?:iframe|esi:include)(?:(?:\\s|/)*\\w+=[\"'\\w]+)*(?:\\s|/)*src(?:doc)?=[\"']?(?:data:|javascript:|http:|dns:|//)[^\\s'\"]+['\"]?", - "options": { - "min_length": 14 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls", - "urlDecodeUni" - ] - }, - { - "id": "dog-942-001", - "name": "Blind XSS callback domains", - "tags": { - "type": "xss", - "category": "attack_attempt", - "cwe": "83", - "capec": "1000/152/242/63/591/243", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "https?:\\/\\/(?:.*\\.)?(?:bxss\\.(?:in|me)|xss\\.ht|js\\.rip)", - "options": { - "case_sensitive": false - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "nfd-000-001", - "name": "Detect common directory discovery scans", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "phrase_match", - "parameters": { - "options": { - "enforce_word_boundary": true - }, - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "list": [ - "/wordpress/", - "/etc/", - "/login.php", - "/install.php", - "/administrator", - "/admin.php", - "/wp-config", - "/phpmyadmin", - "/fckeditor", - "/mysql", - "/manager/html", - ".htaccess", - "/config.php", - "/configuration", - "/cgi-bin/php", - "/search.php", - "/tinymce", - "/tiny_mce", - "/settings.php", - "../../..", - "/install/", - "/download.php", - "/webdav", - "/forum.php", - "/user.php", - "/style.php", - "/jmx-console", - "/modules.php", - "/include.php", - "/default.asp", - "/help.php", - "/database.yml", - "/database.yml.pgsql", - "/database.yml.sqlite3", - "/database.yml.sqlite", - "/database.yml.mysql", - ".%2e/", - "/view.php", - "/header.php", - "/search.asp", - "%5c%5c", - "/server/php/", - "/invoker/jmxinvokerservlet", - "/phpmyadmin/index.php", - "/data/admin/allowurl.txt", - "/verify.php", - "/misc/ajax.js", - "/.idea", - "/module.php", - "/backup.rar", - "/backup.tar", - "/backup.zip", - "/backup.7z", - "/backup.gz", - "/backup.tgz", - "/backup.tar.gz", - "waitfor%20delay", - "/calendar.php", - "/news.php", - "/dompdf.php", - "))))))))))))))))", - "/web.config", - "tree.php", - "/cgi-bin-sdb/printenv", - "/comments.php", - "/detail.asp", - "/license.txt", - "/admin.asp", - "/auth.php", - "/list.php", - "/content.php", - "/mod.php", - "/mini.php", - "/install.pgsql", - "/install.mysql", - "/install.sqlite", - "/install.sqlite3", - "/install.txt", - "/install.md", - "/doku.php", - "/main.asp", - "/myadmin", - "/force-download.php", - "/iisprotect/admin", - "/.gitignore", - "/print.php", - "/common.php", - "/mainfile.php", - "/functions.php", - "/scripts/setup.php", - "/faq.php", - "/op/op.login.php", - "/home.php", - "/includes/hnmain.inc.php3", - "/preview.php", - "/dump.rar", - "/dump.tar", - "/dump.zip", - "/dump.7z", - "/dump.gz", - "/dump.tgz", - "/dump.tar.gz", - "/thumbnail.php", - "/sendcard.php", - "/global.asax", - "/directory.php", - "/footer.php", - "/error.asp", - "/forum.asp", - "/save.php", - "/htmlsax3.php", - "/adm/krgourl.php", - "/includes/converter.inc.php", - "/nucleus/libs/pluginadmin.php", - "/base_qry_common.php", - "/fileadmin", - "/bitrix/admin/", - "/adm.php", - "/util/barcode.php", - "/action.php", - "/rss.asp", - "/downloads.php", - "/page.php", - "/snarf_ajax.php", - "/fck/editor", - "/sendmail.php", - "/detail.php", - "/iframe.php", - "/swfupload.swf", - "/jenkins/login", - "/phpmyadmin/main.php", - "/phpmyadmin/scripts/setup.php", - "/user/index.php", - "/checkout.php", - "/process.php", - "/ks_inc/ajax.js", - "/export.php", - "/register.php", - "/cart.php", - "/console.php", - "/friend.php", - "/readmsg.php", - "/install.asp", - "/dagent/downloadreport.asp", - "/system/index.php", - "/core/changelog.txt", - "/js/util.js", - "/interna.php", - "/gallery.php", - "/links.php", - "/data/admin/ver.txt", - "/language/zh-cn.xml", - "/productdetails.asp", - "/admin/template/article_more/config.htm", - "/components/com_moofaq/includes/file_includer.php", - "/licence.txt", - "/rss.xsl", - "/vtigerservice.php", - "/mysql/main.php", - "/passwiki.php", - "/scr/soustab.php", - "/global.php", - "/email.php", - "/user.asp", - "/msd", - "/products.php", - "/cultbooking.php", - "/cron.php", - "/static/js/admincp.js", - "/comment.php", - "/maintainers", - "/modules/plain/adminpart/addplain.php", - "/wp-content/plugins/ungallery/source_vuln.php", - "/upgrade.txt", - "/category.php", - "/index_logged.php", - "/members.asp", - "/script/html.js", - "/images/ad.js", - "/awstats/awstats.pl", - "/includes/esqueletos/skel_null.php", - "/modules/profile/user.php", - "/window_top.php", - "/openbrowser.php", - "/thread.php", - "tinfoil_xss", - "/includes/include.php", - "/urheber.php", - "/header.inc.php", - "/mysqldumper", - "/display.php", - "/website.php", - "/stats.php", - "/assets/plugins/mp3_id/mp3_id.php", - "/siteminderagent/forms/smpwservices.fcc", - "/eval-stdin.php" - ] - } - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "nfd-000-002", - "name": "Detect failed attempt to fetch readme files", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "readme\\.[\\.a-z0-9]+$", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [] - }, - { - "id": "nfd-000-003", - "name": "Detect failed attempt to fetch Java EE resource files", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "^(?:.*web\\-inf)(?:.*web\\.xml).*$", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [] - }, - { - "id": "nfd-000-004", - "name": "Detect failed attempt to fetch code files", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "\\.(java|pyc?|rb|class)\\b", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [] - }, - { - "id": "nfd-000-005", - "name": "Detect failed attempt to fetch source code archives", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "\\.(sql|log|ndb|gz|zip|tar\\.gz|tar|regVV|reg|conf|bz2|ini|db|war|bat|inc|btr|server|ds|conf|config|admin|master|sln|bak)\\b(?:[^.]|$)", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [] - }, - { - "id": "nfd-000-006", - "name": "Detect failed attempt to fetch sensitive files", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "\\.(cgi|bat|dll|exe|key|cert|crt|pem|der|pkcs|pkcs|pkcs[0-9]*|nsf|jsa|war|java|class|vb|vba|so|git|svn|hg|cvs)([^a-zA-Z0-9_]|$)", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [] - }, - { - "id": "nfd-000-007", - "name": "Detect failed attempt to fetch archives", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "/[\\d\\-_]*\\.(rar|tar|zip|7z|gz|tgz|tar.gz)", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [] - }, - { - "id": "nfd-000-008", - "name": "Detect failed attempt to trigger incorrect application behavior", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "(/(administrator/components/com.*\\.php|response\\.write\\(.+\\))|select\\(.+\\)from|\\(.*sleep\\(.+\\)|(%[a-zA-Z0-9]{2}[a-zA-Z]{0,1})+\\))", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [] - }, - { - "id": "nfd-000-009", - "name": "Detect failed attempt to leak the structure of the application", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "/(login\\.rol|LICENSE|[\\w-]+\\.(plx|pwd))$", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [] - }, - { - "id": "nfd-000-010", - "name": "Detect failed attempts to find API documentation", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "0" - }, - "conditions": [ - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.response.status" - } - ], - "regex": "^404$", - "options": { - "case_sensitive": true - } - } - }, - { - "operator": "match_regex", - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - } - ], - "regex": "(?:/swagger\\b|/api[-/]docs?\\b)", - "options": { - "case_sensitive": false - } - } - } - ], - "transformers": [] - }, - { - "id": "rasp-930-100", - "name": "Local file inclusion exploit", - "enabled": false, - "tags": { - "type": "lfi", - "category": "vulnerability_trigger", - "cwe": "22", - "capec": "1000/255/153/126", - "confidence": "0", - "module": "rasp" - }, - "conditions": [ - { - "parameters": { - "resource": [ - { - "address": "server.io.fs.file" - } - ], - "params": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ] - }, - "operator": "lfi_detector" - } - ], - "transformers": [], - "on_match": [ - "stack_trace" - ] - }, - { - "id": "rasp-934-100", - "name": "Server-side request forgery exploit", - "enabled": false, - "tags": { - "type": "ssrf", - "category": "vulnerability_trigger", - "cwe": "918", - "capec": "1000/225/115/664", - "confidence": "0", - "module": "rasp" - }, - "conditions": [ - { - "parameters": { - "resource": [ - { - "address": "server.io.net.url" - } - ], - "params": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ] - }, - "operator": "ssrf_detector" - } - ], - "transformers": [], - "on_match": [ - "stack_trace" - ] - }, - { - "id": "rasp-942-100", - "name": "SQL injection exploit", - "enabled": false, - "tags": { - "type": "sql_injection", - "category": "vulnerability_trigger", - "cwe": "89", - "capec": "1000/152/248/66", - "confidence": "0", - "module": "rasp" - }, - "conditions": [ - { - "parameters": { - "resource": [ - { - "address": "server.db.statement" - } - ], - "params": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "db_type": [ - { - "address": "server.db.system" - } - ] - }, - "operator": "sqli_detector" - } - ], - "transformers": [], - "on_match": [ - "stack_trace" - ] - }, - { - "id": "sqr-000-001", - "name": "SSRF: Try to access the credential manager of the main cloud services", - "tags": { - "type": "ssrf", - "category": "attack_attempt", - "cwe": "918", - "capec": "1000/225/115/664", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i)^\\W*((http|ftp)s?://)?\\W*((::f{4}:)?(169|(0x)?0*a9|0+251)\\.?(254|(0x)?0*fe|0+376)[0-9a-fx\\.:]+|metadata\\.google\\.internal|metadata\\.goog)\\W*/", - "options": { - "min_length": 4 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls" - ] - }, - { - "id": "sqr-000-002", - "name": "Server-side Javascript injection: Try to detect obvious JS injection", - "tags": { - "type": "js_code_injection", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "require\\(['\"][\\w\\.]+['\"]\\)|process\\.\\w+\\([\\w\\.]*\\)|\\.toString\\(\\)", - "options": { - "min_length": 4 - } - }, - "operator": "match_regex" - } - ], - "transformers": [ - "removeNulls" - ] - }, - { - "id": "sqr-000-008", - "name": "Windows: Detect attempts to exfiltrate .ini files", - "tags": { - "type": "command_injection", - "category": "attack_attempt", - "cwe": "78", - "capec": "1000/152/248/88", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i)[&|]\\s*type\\s+%\\w+%\\\\+\\w+\\.ini\\s*[&|]" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "sqr-000-009", - "name": "Linux: Detect attempts to exfiltrate passwd files", - "tags": { - "type": "command_injection", - "category": "attack_attempt", - "cwe": "78", - "capec": "1000/152/248/88", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i)[&|]\\s*cat\\s*\\/etc\\/[\\w\\.\\/]*passwd\\s*[&|]" - }, - "operator": "match_regex" - } - ], - "transformers": [ - "cmdLine" - ] - }, - { - "id": "sqr-000-010", - "name": "Windows: Detect attempts to timeout a shell", - "tags": { - "type": "command_injection", - "category": "attack_attempt", - "cwe": "78", - "capec": "1000/152/248/88", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(?i)[&|]\\s*timeout\\s+/t\\s+\\d+\\s*[&|]" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "sqr-000-011", - "name": "SSRF: Try to access internal OMI service (CVE-2021-38647)", - "tags": { - "type": "ssrf", - "category": "attack_attempt", - "cwe": "918", - "capec": "1000/225/115/664", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "http(s?):\\/\\/([A-Za-z0-9\\.\\-\\_]+|\\[[A-Fa-f0-9\\:]+\\]|):5986\\/wsman", - "options": { - "min_length": 4 - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "sqr-000-012", - "name": "SSRF: Detect SSRF attempt on internal service", - "tags": { - "type": "ssrf", - "category": "attack_attempt", - "cwe": "918", - "capec": "1000/225/115/664", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "^(jar:)?(http|https):\\/\\/([0-9oq]{1,5}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|[0-9]{1,10})(:[0-9]{1,5})?(\\/[^:@]*)?$" - }, - "operator": "match_regex" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "sqr-000-013", - "name": "SSRF: Detect SSRF attempts using IPv6 or octal/hexdecimal obfuscation", - "tags": { - "type": "ssrf", - "category": "attack_attempt", - "cwe": "918", - "capec": "1000/225/115/664", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "^(jar:)?(http|https):\\/\\/((\\[)?[:0-9a-f\\.x]{2,}(\\])?)(:[0-9]{1,5})?(\\/[^:@]*)?$" - }, - "operator": "match_regex" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "sqr-000-014", - "name": "SSRF: Detect SSRF domain redirection bypass", - "tags": { - "type": "ssrf", - "category": "attack_attempt", - "cwe": "918", - "capec": "1000/225/115/664", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "grpc.server.request.message" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "(http|https):\\/\\/(?:.*\\.)?(?:burpcollaborator\\.net|localtest\\.me|mail\\.ebc\\.apple\\.com|bugbounty\\.dod\\.network|.*\\.[nx]ip\\.io|oastify\\.com|oast\\.(?:pro|live|site|online|fun|me)|sslip\\.io|requestbin\\.com|requestbin\\.net|hookbin\\.com|webhook\\.site|canarytokens\\.com|interact\\.sh|ngrok\\.io|bugbounty\\.click|prbly\\.win|qualysperiscope\\.com|vii\\.one|act1on3\\.ru)" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "sqr-000-015", - "name": "SSRF: Detect SSRF attempt using non HTTP protocol", - "tags": { - "type": "ssrf", - "category": "attack_attempt", - "cwe": "918", - "capec": "1000/225/115/664", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "^(jar:)?((file|netdoc):\\/\\/[\\\\\\/]+|(dict|gopher|ldap|sftp|tftp):\\/\\/.*:[0-9]{1,5})" - }, - "operator": "match_regex" - } - ], - "transformers": [ - "lowercase" - ] - }, - { - "id": "sqr-000-017", - "name": "Log4shell: Attempt to exploit log4j CVE-2021-44228", - "tags": { - "type": "exploit_detection", - "category": "attack_attempt", - "cwe": "94", - "capec": "1000/152/242", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.uri.raw" - }, - { - "address": "server.request.query" - }, - { - "address": "server.request.body" - }, - { - "address": "server.request.path_params" - }, - { - "address": "server.request.headers.no_cookies" - }, - { - "address": "graphql.server.all_resolvers" - }, - { - "address": "graphql.server.resolver" - } - ], - "regex": "\\${[^j]*j[^n]*n[^d]*d[^i]*i[^:]*:[^}]*}" - }, - "operator": "match_regex" - } - ], - "transformers": [ - "unicode_normalize" - ] - }, - { - "id": "ua0-600-0xx", - "name": "Joomla exploitation tool", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Joomla exploitation tool", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "JDatabaseDriverMysqli" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-10x", - "name": "Nessus", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Nessus", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)^Nessus(/|([ :]+SOAP))" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-12x", - "name": "Arachni", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Arachni", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "^Arachni\\/v" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-13x", - "name": "Jorgee", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Jorgee", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bJorgee\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-14x", - "name": "Probely", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Probely", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bProbely\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-15x", - "name": "Metis", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Metis", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bmetis\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-16x", - "name": "SQL power injector", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "SQLPowerInjector", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "sql power injector" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-18x", - "name": "N-Stealth", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "N-Stealth", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bn-stealth\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-19x", - "name": "Brutus", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Brutus", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bbrutus\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-1xx", - "name": "Shellshock exploitation tool", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "\\(\\) \\{ :; *\\}" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-20x", - "name": "Netsparker", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Netsparker", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "\\bnetsparker\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-22x", - "name": "JAASCois", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "JAASCois", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bjaascois\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-26x", - "name": "Nsauditor", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Nsauditor", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bnsauditor\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-27x", - "name": "Paros", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Paros", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)Mozilla/.* Paros/" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-28x", - "name": "DirBuster", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "DirBuster", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bdirbuster\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-29x", - "name": "Pangolin", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Pangolin", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bpangolin\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-2xx", - "name": "Qualys", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Qualys", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bqualys\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-30x", - "name": "SQLNinja", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "SQLNinja", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bsqlninja\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-31x", - "name": "Nikto", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Nikto", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "\\(Nikto/[\\d\\.]+\\)" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-33x", - "name": "BlackWidow", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "BlackWidow", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bblack\\s?widow\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-34x", - "name": "Grendel-Scan", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Grendel-Scan", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bgrendel-scan\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-35x", - "name": "Havij", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Havij", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bhavij\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-36x", - "name": "w3af", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "w3af", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bw3af\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-37x", - "name": "Nmap", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Nmap", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "nmap (nse|scripting engine)" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-39x", - "name": "Nessus Scripted", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Nessus", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)^'?[a-z0-9_]+\\.nasl'?$" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-3xx", - "name": "Evil Scanner", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "EvilScanner", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bevilScanner\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-40x", - "name": "WebFuck", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "WebFuck", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bWebFuck\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-42x", - "name": "OpenVAS", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "OpenVAS", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)OpenVAS\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-43x", - "name": "Spider-Pig", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Spider-Pig", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "Powered by Spider-Pig by tinfoilsecurity\\.com" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-44x", - "name": "Zgrab", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Zgrab", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "Mozilla/\\d+.\\d+ zgrab" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-45x", - "name": "Zmeu", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Zmeu", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bZmEu\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-47x", - "name": "GoogleSecurityScanner", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "GoogleSecurityScanner", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bGoogleSecurityScanner\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-48x", - "name": "Commix", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Commix", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "^commix\\/" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-49x", - "name": "Gobuster", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Gobuster", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "^gobuster\\/" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-4xx", - "name": "CGIchk", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "CGIchk", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bcgichk\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-51x", - "name": "FFUF", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "FFUF", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)^Fuzz Faster U Fool\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-52x", - "name": "Nuclei", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Nuclei", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)^Nuclei\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-53x", - "name": "Tsunami", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Tsunami", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bTsunamiSecurityScanner\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-54x", - "name": "Nimbostratus", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Nimbostratus", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bnimbostratus-bot\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-55x", - "name": "Datadog test scanner: user-agent", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Datadog Canary Test", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - }, - { - "address": "grpc.server.request.metadata", - "key_path": [ - "dd-canary" - ] - } - ], - "regex": "^dd-test-scanner-log(?:$|/|\\s)" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-56x", - "name": "Datadog test scanner - blocking version: user-agent", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Datadog Canary Test", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - }, - { - "address": "grpc.server.request.metadata", - "key_path": [ - "dd-canary" - ] - } - ], - "regex": "^dd-test-scanner-log-block(?:$|/|\\s)" - }, - "operator": "match_regex" - } - ], - "transformers": [], - "on_match": [ - "block" - ] - }, - { - "id": "ua0-600-57x", - "name": "AlertLogic", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "AlertLogic", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "\\bAlertLogic-MDR-" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-58x", - "name": "wfuzz", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "wfuzz", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "\\bwfuzz\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-59x", - "name": "Detectify", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Detectify", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "\\bdetectify\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-5xx", - "name": "Blind SQL Injection Brute Forcer", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "BSQLBF", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)\\bbsqlbf\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-60x", - "name": "masscan", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "masscan", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "^masscan/" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-61x", - "name": "WPScan", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "WPScan", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "^wpscan\\b" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-62x", - "name": "Aon pentesting services", - "tags": { - "type": "commercial_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Aon", - "confidence": "0" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "^Aon/" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-63x", - "name": "FeroxBuster", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "feroxbuster", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "^feroxbuster/" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-6xx", - "name": "Stealthy scanner", - "tags": { - "type": "security_scanner", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "mozilla/4\\.0 \\(compatible(; msie (?:6\\.0; (?:win32|Windows NT 5\\.0)|4\\.0; Windows NT))?\\)", - "options": { - "case_sensitive": false - } - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-7xx", - "name": "SQLmap", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "SQLmap", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "^sqlmap/" - }, - "operator": "match_regex" - } - ], - "transformers": [] - }, - { - "id": "ua0-600-9xx", - "name": "Skipfish", - "tags": { - "type": "attack_tool", - "category": "attack_attempt", - "cwe": "200", - "capec": "1000/118/169", - "tool_name": "Skipfish", - "confidence": "1" - }, - "conditions": [ - { - "parameters": { - "inputs": [ - { - "address": "server.request.headers.no_cookies", - "key_path": [ - "user-agent" - ] - } - ], - "regex": "(?i)mozilla/5\\.0 sf/" - }, - "operator": "match_regex" - } - ], - "transformers": [] - } - ], - "processors": [ - { - "id": "extract-content", - "generator": "extract_schema", - "conditions": [ - { - "operator": "equals", - "parameters": { - "inputs": [ - { - "address": "waf.context.processor", - "key_path": [ - "extract-schema" - ] - } - ], - "type": "boolean", - "value": true - } - } - ], - "parameters": { - "mappings": [ - { - "inputs": [ - { - "address": "server.request.body" - } - ], - "output": "_dd.appsec.s.req.body" - }, - { - "inputs": [ - { - "address": "server.request.cookies" - } - ], - "output": "_dd.appsec.s.req.cookies" - }, - { - "inputs": [ - { - "address": "server.request.query" - } - ], - "output": "_dd.appsec.s.req.query" - }, - { - "inputs": [ - { - "address": "server.request.path_params" - } - ], - "output": "_dd.appsec.s.req.params" - }, - { - "inputs": [ - { - "address": "server.response.body" - } - ], - "output": "_dd.appsec.s.res.body" - }, - { - "inputs": [ - { - "address": "graphql.server.all_resolvers" - } - ], - "output": "_dd.appsec.s.graphql.all_resolvers" - }, - { - "inputs": [ - { - "address": "graphql.server.resolver" - } - ], - "output": "_dd.appsec.s.graphql.resolver" - } - ], - "scanners": [ - { - "tags": { - "category": "payment" - } - }, - { - "tags": { - "category": "pii" - } - } - ] - }, - "evaluate": false, - "output": true - }, - { - "id": "extract-headers", - "generator": "extract_schema", - "conditions": [ - { - "operator": "equals", - "parameters": { - "inputs": [ - { - "address": "waf.context.processor", - "key_path": [ - "extract-schema" - ] - } - ], - "type": "boolean", - "value": true - } - } - ], - "parameters": { - "mappings": [ - { - "inputs": [ - { - "address": "server.request.headers.no_cookies" - } - ], - "output": "_dd.appsec.s.req.headers" - }, - { - "inputs": [ - { - "address": "server.response.headers.no_cookies" - } - ], - "output": "_dd.appsec.s.res.headers" - } - ], - "scanners": [ - { - "tags": { - "category": "credentials" - } - }, - { - "tags": { - "category": "pii" - } - } - ] - }, - "evaluate": false, - "output": true - } - ], - "scanners": [ - { - "id": "406f8606-52c4-4663-8db9-df70f9e8766c", - "name": "ZIP Code", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:zip|postal)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "^[0-9]{5}(?:-[0-9]{4})?$", - "options": { - "case_sensitive": true, - "min_length": 5 - } - } - }, - "tags": { - "type": "zipcode", - "category": "address" - } - }, - { - "id": "JU1sRk3mSzqSUJn6GrVn7g", - "name": "American Express Card Scanner (4+4+4+3 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b3[47]\\d{2}(?:(?:\\s\\d{4}\\s\\d{4}\\s\\d{3})|(?:\\,\\d{4}\\,\\d{4}\\,\\d{3})|(?:-\\d{4}-\\d{4}-\\d{3})|(?:\\.\\d{4}\\.\\d{4}\\.\\d{3}))\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "amex", - "category": "payment" - } - }, - { - "id": "edmH513UTQWcRiQ9UnzHlw-mod", - "name": "American Express Card Scanner (4+6|5+5|6 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b3[47]\\d{2}(?:(?:\\s\\d{5,6}\\s\\d{5,6})|(?:\\.\\d{5,6}\\.\\d{5,6})|(?:-\\d{5,6}-\\d{5,6})|(?:,\\d{5,6},\\d{5,6}))\\b", - "options": { - "case_sensitive": false, - "min_length": 17 - } - } - }, - "tags": { - "type": "card", - "card_type": "amex", - "category": "payment" - } - }, - { - "id": "e6K4h_7qTLaMiAbaNXoSZA", - "name": "American Express Card Scanner (8+7 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b3[47]\\d{6}(?:(?:\\s\\d{7})|(?:\\,\\d{7})|(?:-\\d{7})|(?:\\.\\d{7}))\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "amex", - "category": "payment" - } - }, - { - "id": "K2rZflWzRhGM9HiTc6whyQ", - "name": "American Express Card Scanner (1x15 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b3[47]\\d{13}\\b", - "options": { - "case_sensitive": false, - "min_length": 15 - } - } - }, - "tags": { - "type": "card", - "card_type": "amex", - "category": "payment" - } - }, - { - "id": "9d7756e343cefa22a5c098e1092590f806eb5446", - "name": "Basic Authentication Scanner", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\bauthorization\\b", - "options": { - "case_sensitive": false, - "min_length": 13 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "^basic\\s+[A-Za-z0-9+/=]+", - "options": { - "case_sensitive": false, - "min_length": 7 - } - } - }, - "tags": { - "type": "basic_auth", - "category": "credentials" - } - }, - { - "id": "mZy8XjZLReC9smpERXWnnw", - "name": "Bearer Authentication Scanner", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\bauthorization\\b", - "options": { - "case_sensitive": false, - "min_length": 13 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "^bearer\\s+[-a-z0-9._~+/]{4,}", - "options": { - "case_sensitive": false, - "min_length": 11 - } - } - }, - "tags": { - "type": "bearer_token", - "category": "credentials" - } - }, - { - "id": "450239afc250a19799b6c03dc0e16fd6a4b2a1af", - "name": "Canadian Social Insurance Number Scanner", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:social[\\s_]?(?:insurance(?:\\s+number)?)?|SIN|Canadian[\\s_]?(?:social[\\s_]?(?:insurance)?|insurance[\\s_]?number)?)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b\\d{3}-\\d{3}-\\d{3}\\b", - "options": { - "case_sensitive": false, - "min_length": 11 - } - } - }, - "tags": { - "type": "canadian_sin", - "category": "pii" - } - }, - { - "id": "87a879ff33693b46c8a614d8211f5a2c289beca0", - "name": "Digest Authentication Scanner", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\bauthorization\\b", - "options": { - "case_sensitive": false, - "min_length": 13 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "^digest\\s+", - "options": { - "case_sensitive": false, - "min_length": 7 - } - } - }, - "tags": { - "type": "digest_auth", - "category": "credentials" - } - }, - { - "id": "qWumeP1GQUa_E4ffAnT-Yg", - "name": "American Express Card Scanner (1x14 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "(?:30[0-59]\\d|3[689]\\d{2})(?:\\d{10})", - "options": { - "case_sensitive": false, - "min_length": 14 - } - } - }, - "tags": { - "type": "card", - "card_type": "diners", - "category": "payment" - } - }, - { - "id": "NlTWWM5LS6W0GSqBLuvtRw", - "name": "Diners Card Scanner (4+4+4+2 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:30[0-59]\\d|3[689]\\d{2})(?:(?:\\s\\d{4}\\s\\d{4}\\s\\d{2})|(?:\\,\\d{4}\\,\\d{4}\\,\\d{2})|(?:-\\d{4}-\\d{4}-\\d{2})|(?:\\.\\d{4}\\.\\d{4}\\.\\d{2}))\\b", - "options": { - "case_sensitive": false, - "min_length": 17 - } - } - }, - "tags": { - "type": "card", - "card_type": "diners", - "category": "payment" - } - }, - { - "id": "Xr5VdbQSTXitYGGiTfxBpw", - "name": "Diners Card Scanner (4+6+4 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:30[0-59]\\d|3[689]\\d{2})(?:(?:\\s\\d{6}\\s\\d{4})|(?:\\.\\d{6}\\.\\d{4})|(?:-\\d{6}-\\d{4})|(?:,\\d{6},\\d{4}))\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "diners", - "category": "payment" - } - }, - { - "id": "gAbunN_WQNytxu54DjcbAA-mod", - "name": "Diners Card Scanner (8+6 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:30[0-59]\\d{5}|3[689]\\d{6})\\s?(?:(?:\\s\\d{6})|(?:\\,\\d{6})|(?:-\\d{6})|(?:\\.\\d{6}))\\b", - "options": { - "case_sensitive": false, - "min_length": 14 - } - } - }, - "tags": { - "type": "card", - "card_type": "diners", - "category": "payment" - } - }, - { - "id": "9cs4qCfEQBeX17U7AepOvQ", - "name": "MasterCard Scanner (2x8 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:6221(?:2[6-9]|[3-9][0-9])\\d{2}(?:,\\d{8}|\\s\\d{8}|-\\d{8}|\\.\\d{8})|6229(?:[01][0-9]|2[0-5])\\d{2}(?:,\\d{8}|\\s\\d{8}|-\\d{8}|\\.\\d{8})|(?:6011|65\\d{2}|64[4-9]\\d|622[2-8])\\d{4}(?:,\\d{8}|\\s\\d{8}|-\\d{8}|\\.\\d{8}))\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "discover", - "category": "payment" - } - }, - { - "id": "YBIDWJIvQWW_TFOyU0CGJg", - "name": "Discover Card Scanner (4x4 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:(?:(?:6221(?:2[6-9]|[3-9][0-9])\\d{2}(?:,\\d{4}){2})|(?:6221\\s(?:2[6-9]|[3-9][0-9])\\d{2}(?:\\s\\d{4}){2})|(?:6221\\.(?:2[6-9]|[3-9][0-9])\\d{2}(?:\\.\\d{4}){2})|(?:6221-(?:2[6-9]|[3-9][0-9])\\d{2}(?:-\\d{4}){2}))|(?:(?:6229(?:[01][0-9]|2[0-5])\\d{2}(?:,\\d{4}){2})|(?:6229\\s(?:[01][0-9]|2[0-5])\\d{2}(?:\\s\\d{4}){2})|(?:6229\\.(?:[01][0-9]|2[0-5])\\d{2}(?:\\.\\d{4}){2})|(?:6229-(?:[01][0-9]|2[0-5])\\d{2}(?:-\\d{4}){2}))|(?:(?:6011|65\\d{2}|64[4-9]\\d|622[2-8])(?:(?:\\s\\d{4}){3}|(?:\\.\\d{4}){3}|(?:-\\d{4}){3}|(?:,\\d{4}){3})))\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "discover", - "category": "payment" - } - }, - { - "id": "12cpbjtVTMaMutFhh9sojQ", - "name": "Discover Card Scanner (1x16 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:6221(?:2[6-9]|[3-9][0-9])\\d{10}|6229(?:[01][0-9]|2[0-5])\\d{10}|(?:6011|65\\d{2}|64[4-9]\\d|622[2-8])\\d{12})\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "discover", - "category": "payment" - } - }, - { - "id": "PuXiVTCkTHOtj0Yad1ppsw", - "name": "Standard E-mail Address", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:(?:e[-\\s]?)?mail|address|sender|\\bto\\b|from|recipient)\\b", - "options": { - "case_sensitive": false, - "min_length": 2 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b[\\w!#$%&'*+/=?`{|}~^-]+(?:\\.[\\w!#$%&'*+/=?`{|}~^-]+)*(%40|@)(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,6}\\b", - "options": { - "case_sensitive": false, - "min_length": 5 - } - } - }, - "tags": { - "type": "email", - "category": "pii" - } - }, - { - "id": "8VS2RKxzR8a_95L5fuwaXQ", - "name": "IBAN", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:iban|account|sender|receiver)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:NO\\d{2}(?:[ \\-]?\\d{4}){2}[ \\-]?\\d{3}|BE\\d{2}(?:[ \\-]?\\d{4}){3}|(?:DK|FO|FI|GL|SD)\\d{2}(?:[ \\-]?\\d{4}){3}[ \\-]?\\d{2}|NL\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?\\d{4}){2}[ \\-]?\\d{2}|MK\\d{2}[ \\-]?\\d{3}[A-Z0-9](?:[ \\-]?[A-Z0-9]{4}){2}[ \\-]?[A-Z0-9]\\d{2}|SI\\d{17}|(?:AT|BA|EE|LT|XK)\\d{18}|(?:LU|KZ|EE|LT)\\d{5}[A-Z0-9]{13}|LV\\d{2}[A-Z]{4}[A-Z0-9]{13}|(?:LI|CH)\\d{2}[ \\-]?\\d{4}[ \\-]?\\d[A-Z0-9]{3}(?:[ \\-]?[A-Z0-9]{4}){2}[ \\-]?[A-Z0-9]|HR\\d{2}(?:[ \\-]?\\d{4}){4}[ \\-]?\\d|GE\\d{2}[ \\-]?[A-Z0-9]{2}\\d{2}\\d{14}|VA\\d{20}|BG\\d{2}[A-Z]{4}\\d{6}[A-Z0-9]{8}|BH\\d{2}[A-Z]{4}[A-Z0-9]{14}|GB\\d{2}[A-Z]{4}(?:[ \\-]?\\d{4}){3}[ \\-]?\\d{2}|IE\\d{2}[ \\-]?[A-Z0-9]{4}(?:[ \\-]?\\d{4}){3}[ \\-]?\\d{2}|(?:CR|DE|ME|RS)\\d{2}(?:[ \\-]?\\d{4}){4}[ \\-]?\\d{2}|(?:AE|TL|IL)\\d{2}(?:[ \\-]?\\d{4}){4}[ \\-]?\\d{3}|GI\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?[A-Z0-9]{4}){3}[ \\-]?[A-Z0-9]{3}|IQ\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?\\d{4}){3}[ \\-]?\\d{3}|MD\\d{2}(?:[ \\-]?[A-Z0-9]{4}){5}|SA\\d{2}[ \\-]?\\d{2}[A-Z0-9]{2}(?:[ \\-]?[A-Z0-9]{4}){4}|RO\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?[A-Z0-9]{4}){4}|(?:PK|VG)\\d{2}[ \\-]?[A-Z0-9]{4}(?:[ \\-]?\\d{4}){4}|AD\\d{2}(?:[ \\-]?\\d{4}){2}(?:[ \\-]?[A-Z0-9]{4}){3}|(?:CZ|SK|ES|SE|TN)\\d{2}(?:[ \\-]?\\d{4}){5}|(?:LY|PT|ST)\\d{2}(?:[ \\-]?\\d{4}){5}[ \\-]?\\d|TR\\d{2}[ \\-]?\\d{4}[ \\-]?\\d[A-Z0-9]{3}(?:[ \\-]?[A-Z0-9]{4}){3}[ \\-]?[A-Z0-9]{2}|IS\\d{2}(?:[ \\-]?\\d{4}){5}[ \\-]?\\d{2}|(?:IT|SM)\\d{2}[ \\-]?[A-Z]\\d{3}[ \\-]?\\d{4}[ \\-]?\\d{3}[A-Z0-9](?:[ \\-]?[A-Z0-9]{4}){2}[ \\-]?[A-Z0-9]{3}|GR\\d{2}[ \\-]?\\d{4}[ \\-]?\\d{3}[A-Z0-9](?:[ \\-]?[A-Z0-9]{4}){3}[A-Z0-9]{3}|(?:FR|MC)\\d{2}(?:[ \\-]?\\d{4}){2}[ \\-]?\\d{2}[A-Z0-9]{2}(?:[ \\-]?[A-Z0-9]{4}){2}[ \\-]?[A-Z0-9]\\d{2}|MR\\d{2}(?:[ \\-]?\\d{4}){5}[ \\-]?\\d{3}|(?:SV|DO)\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?\\d{4}){5}|BY\\d{2}[ \\-]?[A-Z]{4}[ \\-]?\\d{4}(?:[ \\-]?[A-Z0-9]{4}){4}|GT\\d{2}(?:[ \\-]?[A-Z0-9]{4}){6}|AZ\\d{2}[ \\-]?[A-Z0-9]{4}(?:[ \\-]?\\d{5}){4}|LB\\d{2}[ \\-]?\\d{4}(?:[ \\-]?[A-Z0-9]{5}){4}|(?:AL|CY)\\d{2}(?:[ \\-]?\\d{4}){2}(?:[ \\-]?[A-Z0-9]{4}){4}|(?:HU|PL)\\d{2}(?:[ \\-]?\\d{4}){6}|QA\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?[A-Z0-9]{4}){5}[ \\-]?[A-Z0-9]|PS\\d{2}[ \\-]?[A-Z0-9]{4}(?:[ \\-]?\\d{4}){5}[ \\-]?\\d|UA\\d{2}[ \\-]?\\d{4}[ \\-]?\\d{2}[A-Z0-9]{2}(?:[ \\-]?[A-Z0-9]{4}){4}[ \\-]?[A-Z0-9]|BR\\d{2}(?:[ \\-]?\\d{4}){5}[ \\-]?\\d{3}[A-Z0-9][ \\-]?[A-Z0-9]|EG\\d{2}(?:[ \\-]?\\d{4}){6}\\d|MU\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?\\d{4}){4}\\d{3}[A-Z][ \\-]?[A-Z]{2}|(?:KW|JO)\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?[A-Z0-9]{4}){5}[ \\-]?[A-Z0-9]{2}|MT\\d{2}[ \\-]?[A-Z]{4}[ \\-]?\\d{4}[ \\-]?\\d[A-Z0-9]{3}(?:[ \\-]?[A-Z0-9]{3}){4}[ \\-]?[A-Z0-9]{3}|SC\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?\\d{4}){5}[ \\-]?[A-Z]{3}|LC\\d{2}[ \\-]?[A-Z]{4}(?:[ \\-]?[A-Z0-9]{4}){6})\\b", - "options": { - "case_sensitive": false, - "min_length": 15 - } - } - }, - "tags": { - "type": "iban", - "category": "payment" - } - }, - { - "id": "h6WJcecQTwqvN9KeEtwDvg", - "name": "JCB Card Scanner (1x16 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b35(?:2[89]|[3-9][0-9])(?:\\d{12})\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "jcb", - "category": "payment" - } - }, - { - "id": "gcEaMu_VSJ2-bGCEkgyC0w", - "name": "JCB Card Scanner (2x8 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b35(?:2[89]|[3-9][0-9])\\d{4}(?:(?:,\\d{8})|(?:-\\d{8})|(?:\\s\\d{8})|(?:\\.\\d{8}))\\b", - "options": { - "case_sensitive": false, - "min_length": 17 - } - } - }, - "tags": { - "type": "card", - "card_type": "jcb", - "category": "payment" - } - }, - { - "id": "imTliuhXT5GAeRNhqChXQQ", - "name": "JCB Card Scanner (4x4 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b35(?:2[89]|[3-9][0-9])(?:(?:\\s\\d{4}){3}|(?:\\.\\d{4}){3}|(?:-\\d{4}){3}|(?:,\\d{4}){3})\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "jcb", - "category": "payment" - } - }, - { - "id": "9osY3xc9Q7ONAV0zw9Uz4A", - "name": "JSON Web Token", - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\bey[I-L][\\w=-]+\\.ey[I-L][\\w=-]+(\\.[\\w.+\\/=-]+)?\\b", - "options": { - "case_sensitive": false, - "min_length": 20 - } - } - }, - "tags": { - "type": "json_web_token", - "category": "credentials" - } - }, - { - "id": "d1Q9D3YMRxuVKf6CZInJPw", - "name": "Maestro Card Scanner (1x16 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:5[06-9]\\d{2}|6\\d{3})(?:\\d{12})\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "maestro", - "category": "payment" - } - }, - { - "id": "M3YIQKKjRVmoeQuM3pjzrw", - "name": "Maestro Card Scanner (2x8 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:5[06-9]\\d{6}|6\\d{7})(?:\\s\\d{8}|\\.\\d{8}|-\\d{8}|,\\d{8})\\b", - "options": { - "case_sensitive": false, - "min_length": 17 - } - } - }, - "tags": { - "type": "card", - "card_type": "maestro", - "category": "payment" - } - }, - { - "id": "hRxiQBlSSVKcjh5U7LZYLA", - "name": "Maestro Card Scanner (4x4 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:5[06-9]\\d{2}|6\\d{3})(?:(?:\\s\\d{4}){3}|(?:\\.\\d{4}){3}|(?:-\\d{4}){3}|(?:,\\d{4}){3})\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "maestro", - "category": "payment" - } - }, - { - "id": "NwhIYNS4STqZys37WlaIKA", - "name": "MasterCard Scanner (2x8 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:(?:5[1-5]\\d{2})|(?:222[1-9])|(?:22[3-9]\\d)|(?:2[3-6]\\d{2})|(?:27[0-1]\\d)|(?:2720))(?:(?:\\d{4}(?:(?:,\\d{8})|(?:-\\d{8})|(?:\\s\\d{8})|(?:\\.\\d{8}))))\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "mastercard", - "category": "payment" - } - }, - { - "id": "axxJkyjhRTOuhjwlsA35Vw", - "name": "MasterCard Scanner (4x4 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:(?:5[1-5]\\d{2})|(?:222[1-9])|(?:22[3-9]\\d)|(?:2[3-6]\\d{2})|(?:27[0-1]\\d)|(?:2720))(?:(?:\\s\\d{4}){3}|(?:\\.\\d{4}){3}|(?:-\\d{4}){3}|(?:,\\d{4}){3})\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "mastercard", - "category": "payment" - } - }, - { - "id": "76EhmoK3TPqJcpM-fK0pLw", - "name": "MasterCard Scanner (1x16 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:(?:5[1-5]\\d{2})|(?:222[1-9])|(?:22[3-9]\\d)|(?:2[3-6]\\d{2})|(?:27[0-1]\\d)|(?:2720))(?:\\d{12})\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "mastercard", - "category": "payment" - } - }, - { - "id": "18b608bd7a764bff5b2344c0", - "name": "Phone number", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\bphone|number|mobile\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "^(?:\\(\\+\\d{1,3}\\)|\\+\\d{1,3}|00\\d{1,3})?[-\\s\\.]?(?:\\(\\d{3}\\)[-\\s\\.]?)?(?:\\d[-\\s\\.]?){6,10}$", - "options": { - "case_sensitive": false, - "min_length": 6 - } - } - }, - "tags": { - "type": "phone", - "category": "pii" - } - }, - { - "id": "de0899e0cbaaa812bb624cf04c912071012f616d-mod", - "name": "UK National Insurance Number Scanner", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "^nin$|\\binsurance\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b[A-Z]{2}[\\s-]?\\d{6}[\\s-]?[A-Z]?\\b", - "options": { - "case_sensitive": false, - "min_length": 8 - } - } - }, - "tags": { - "type": "uk_nin", - "category": "pii" - } - }, - { - "id": "d962f7ddb3f55041e39195a60ff79d4814a7c331", - "name": "US Passport Scanner", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\bpassport\\b", - "options": { - "case_sensitive": false, - "min_length": 8 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b[0-9A-Z]{9}\\b|\\b[0-9]{6}[A-Z][0-9]{2}\\b", - "options": { - "case_sensitive": false, - "min_length": 8 - } - } - }, - "tags": { - "type": "passport_number", - "category": "pii" - } - }, - { - "id": "7771fc3b-b205-4b93-bcef-28608c5c1b54", - "name": "United States Social Security Number Scanner", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:SSN|(?:(?:social)?[\\s_]?(?:security)?[\\s_]?(?:number)?)?)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b\\d{3}[-\\s\\.]{1}\\d{2}[-\\s\\.]{1}\\d{4}\\b", - "options": { - "case_sensitive": false, - "min_length": 11 - } - } - }, - "tags": { - "type": "us_ssn", - "category": "pii" - } - }, - { - "id": "ac6d683cbac77f6e399a14990793dd8fd0fca333", - "name": "US Vehicle Identification Number Scanner", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:vehicle[_\\s-]*identification[_\\s-]*number|vin)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b[A-HJ-NPR-Z0-9]{17}\\b", - "options": { - "case_sensitive": false, - "min_length": 17 - } - } - }, - "tags": { - "type": "vin", - "category": "pii" - } - }, - { - "id": "wJIgOygRQhKkR69b_9XbRQ", - "name": "Visa Card Scanner (2x8 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b4\\d{3}(?:(?:\\d{4}(?:(?:,\\d{8})|(?:-\\d{8})|(?:\\s\\d{8})|(?:\\.\\d{8}))))\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "visa", - "category": "payment" - } - }, - { - "id": "0o71SJxXQNK7Q6gMbBesFQ", - "name": "Visa Card Scanner (4x4 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "\\b4\\d{3}(?:(?:,\\d{4}){3}|(?:\\s\\d{4}){3}|(?:\\.\\d{4}){3}|(?:-\\d{4}){3})\\b", - "options": { - "case_sensitive": false, - "min_length": 16 - } - } - }, - "tags": { - "type": "card", - "card_type": "visa", - "category": "payment" - } - }, - { - "id": "QrHD6AfgQm6z-j0wStxTvA", - "name": "Visa Card Scanner (1x15 & 1x16 & 1x19 digits)", - "key": { - "operator": "match_regex", - "parameters": { - "regex": "\\b(?:card|cc|credit|debit|payment|amex|visa|mastercard|maestro|discover|jcb|diner)\\b", - "options": { - "case_sensitive": false, - "min_length": 3 - } - } - }, - "value": { - "operator": "match_regex", - "parameters": { - "regex": "4[0-9]{12}(?:[0-9]{3})?", - "options": { - "case_sensitive": false, - "min_length": 13 - } - } - }, - "tags": { - "type": "card", - "card_type": "visa", - "category": "payment" - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/appsec-internal-go/httpsec/client_ip.go b/vendor/github.com/DataDog/appsec-internal-go/httpsec/client_ip.go deleted file mode 100644 index 3761a773..00000000 --- a/vendor/github.com/DataDog/appsec-internal-go/httpsec/client_ip.go +++ /dev/null @@ -1,126 +0,0 @@ -package httpsec - -import ( - "net" - "net/textproto" - "strings" - - "github.com/DataDog/appsec-internal-go/netip" -) - -const ( - // RemoteIPTag is the tag name used for the remote HTTP request IP address. - RemoteIPTag = "network.client.ip" - // ClientIPTag is the tag name used for the client IP deduced from the HTTP - // request headers with ClientIP(). - ClientIPTag = "http.client_ip" -) - -// ClientIPTags returns the resulting Datadog span tags `http.client_ip` -// containing the client IP and `network.client.ip` containing the remote IP. -// The tags are present only if a valid ip address has been returned by -// ClientIP(). -func ClientIPTags(remoteIP, clientIP netip.Addr) (tags map[string]string) { - remoteIPValid := remoteIP.IsValid() - clientIPValid := clientIP.IsValid() - if !remoteIPValid && !clientIPValid { - return nil - } - - tags = make(map[string]string, 2) - if remoteIPValid { - tags[RemoteIPTag] = remoteIP.String() - } - if clientIPValid { - tags[ClientIPTag] = clientIP.String() - } - return tags -} - -// ClientIP returns the first public IP address found in the given headers. If -// none is present, it returns the first valid IP address present, possibly -// being a local IP address. The remote address, when valid, is used as fallback -// when no IP address has been found at all. -func ClientIP(hdrs map[string][]string, hasCanonicalHeaders bool, remoteAddr string, monitoredHeaders []string) (remoteIP, clientIP netip.Addr) { - // Walk IP-related headers - var foundIP netip.Addr -headersLoop: - for _, headerName := range monitoredHeaders { - if hasCanonicalHeaders { - headerName = textproto.CanonicalMIMEHeaderKey(headerName) - } - - headerValues, exists := hdrs[headerName] - if !exists { - continue // this monitored header is not present - } - - // Assuming a list of comma-separated IP addresses, split them and build - // the list of values to try to parse as IP addresses - var ips []string - for _, ip := range headerValues { - ips = append(ips, strings.Split(ip, ",")...) - } - - // Look for the first valid or global IP address in the comma-separated list - for _, ipstr := range ips { - ip := parseIP(strings.TrimSpace(ipstr)) - if !ip.IsValid() { - continue - } - // Replace foundIP if still not valid in order to keep the oldest - if !foundIP.IsValid() { - foundIP = ip - } - if isGlobal(ip) { - foundIP = ip - break headersLoop - } - } - } - - // Decide which IP address is the client one by starting with the remote IP - if ip := parseIP(remoteAddr); ip.IsValid() { - remoteIP = ip - clientIP = ip - } - - // The IP address found in the headers supersedes a private remote IP address. - if foundIP.IsValid() && !isGlobal(remoteIP) || isGlobal(foundIP) { - clientIP = foundIP - } - - return remoteIP, clientIP -} - -func parseIP(s string) netip.Addr { - if ip, err := netip.ParseAddr(s); err == nil { - return ip - } - if h, _, err := net.SplitHostPort(s); err == nil { - if ip, err := netip.ParseAddr(h); err == nil { - return ip - } - } - return netip.Addr{} -} - -var ipv6SpecialNetworks = [...]netip.Prefix{ - netip.MustParsePrefix("fec0::/10"), // site local -} - -func isGlobal(ip netip.Addr) bool { - // IsPrivate also checks for ipv6 ULA. - // We care to check for these addresses are not considered public, hence not global. - // See https://www.rfc-editor.org/rfc/rfc4193.txt for more details. - isGlobal := ip.IsValid() && !ip.IsPrivate() && !ip.IsLoopback() && !ip.IsLinkLocalUnicast() - if !isGlobal || !ip.Is6() { - return isGlobal - } - for _, n := range ipv6SpecialNetworks { - if n.Contains(ip) { - return false - } - } - return isGlobal -} diff --git a/vendor/github.com/DataDog/appsec-internal-go/log/backend.go b/vendor/github.com/DataDog/appsec-internal-go/log/backend.go deleted file mode 100644 index b9d94f5c..00000000 --- a/vendor/github.com/DataDog/appsec-internal-go/log/backend.go +++ /dev/null @@ -1,138 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2022-present Datadog, Inc. - -package log - -import ( - "fmt" - "log" - "os" - "strings" -) - -var ( - backend = Backend{ - Trace: defaultWithLevel(logLevelTrace), - Debug: defaultWithLevel(logLevelDebug), - Info: defaultWithLevel(logLevelInfo), - Warn: defaultWithLevel(logLevelWarn), - Errorf: defaultErrorfWithLevel(logLevelError), - Criticalf: defaultErrorfWithLevel(logLevelCritical), - } - defaultBackendLogLevel = logLevelError -) - -type Backend struct { - Trace func(string, ...any) - Debug func(string, ...any) - Info func(string, ...any) - Warn func(string, ...any) - Errorf func(string, ...any) error - Criticalf func(string, ...any) error -} - -// SetBackend replaces the active log backend with the provided one. Any nil -// function in the new backend will silently ignore any message logged at that -// level. -func SetBackend(newBackend Backend) { - if newBackend.Trace == nil { - newBackend.Trace = noopLogger - } - if newBackend.Debug == nil { - newBackend.Debug = noopLogger - } - if newBackend.Info == nil { - newBackend.Info = noopLogger - } - if newBackend.Warn == nil { - newBackend.Warn = noopLogger - } - if newBackend.Errorf == nil { - newBackend.Errorf = fmt.Errorf - } - if newBackend.Criticalf == nil { - newBackend.Criticalf = fmt.Errorf - } - - backend = newBackend -} - -// defaultWithLevel returns the default log backend function for the provided -// logLevel. This returns a no-op function if the default backend logLevel does -// not enable logging at that level. -func defaultWithLevel(level logLevel) func(string, ...any) { - if defaultBackendLogLevel < level { - return noopLogger - } - return func(format string, args ...any) { - log.Printf(fmt.Sprintf("[%s] %s\n", level, format), args...) - } -} - -// defaultErrorfWithLevel returns the default log backend function for the -// provided error logLevel. -func defaultErrorfWithLevel(level logLevel) func(string, ...any) error { - if defaultBackendLogLevel < level { - return fmt.Errorf - } - return func(format string, args ...any) error { - err := fmt.Errorf(format, args...) - log.Printf("[%s] %v", level, err) - return err - } -} - -// noopLogger does nothing. -func noopLogger(string, ...any) { /* noop */ } - -type logLevel uint8 - -const ( - logLevelTrace logLevel = 1 << iota - logLevelDebug - logLevelInfo - logLevelWarn - logLevelError - logLevelCritical -) - -func (l logLevel) String() string { - switch l { - case logLevelTrace: - return "TRACE" - case logLevelDebug: - return "DEBUG" - case logLevelInfo: - return "INFO" - case logLevelWarn: - return "WARN" - case logLevelError: - return "ERROR" - case logLevelCritical: - return "CRITICAL" - default: - return "UNKNOWN" - } -} - -func init() { - ddLogLevel := os.Getenv("DD_LOG_LEVEL") - switch strings.ToUpper(ddLogLevel) { - case "TRACE": - defaultBackendLogLevel = logLevelTrace - case "DEBUG": - defaultBackendLogLevel = logLevelDebug - case "INFO": - defaultBackendLogLevel = logLevelInfo - case "WARN": - defaultBackendLogLevel = logLevelWarn - case "ERROR": - defaultBackendLogLevel = logLevelError - case "CRITICAL": - defaultBackendLogLevel = logLevelCritical - default: - // Ignore invalid/unexpected values - } -} diff --git a/vendor/github.com/DataDog/appsec-internal-go/log/log.go b/vendor/github.com/DataDog/appsec-internal-go/log/log.go deleted file mode 100644 index a34f578d..00000000 --- a/vendor/github.com/DataDog/appsec-internal-go/log/log.go +++ /dev/null @@ -1,45 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2022-present Datadog, Inc. - -// Package log provides a logging facility that is used by this library, and -// which can be configured to piggyback on another logging facility where -// available. If not explicitly configured, this will log messages using the Go -// standar library log package, filtered according to the log level set in the -// `DD_LOG_LEVEL` environment variable (or `ERROR` if none is set). -// -// Custom logger intergrations are configured by calling the SetBackend function. -package log - -// Trace logs a message with format using the TRACE log level. -func Trace(format string, args ...any) { - backend.Trace(format, args...) -} - -// Debug logs a message with format using the DEBUG log level. -func Debug(format string, args ...any) { - backend.Debug(format, args...) -} - -// Info logs a message with format using the INFO log level. -func Info(format string, args ...any) { - backend.Info(format, args...) -} - -// Warn logs a message with format using the WARN log level. -func Warn(format string, args ...any) { - backend.Warn(format, args...) -} - -// Errorf logs a message with format using the ERROR log level and returns an -// error containing the formatted log message. -func Errorf(format string, args ...any) error { - return backend.Errorf(format, args...) -} - -// Errorf logs a message with format using the CRITICAL log level and returns an -// error containing the formatted log message. -func Criticalf(format string, args ...any) error { - return backend.Criticalf(format, args...) -} diff --git a/vendor/github.com/DataDog/appsec-internal-go/netip/ip.go b/vendor/github.com/DataDog/appsec-internal-go/netip/ip.go deleted file mode 100644 index 99e1e627..00000000 --- a/vendor/github.com/DataDog/appsec-internal-go/netip/ip.go +++ /dev/null @@ -1,31 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2022 Datadog, Inc. - -package netip - -import "net/netip" - -// Addr wraps a netip.Addr value -type Addr = netip.Addr - -// Prefix wraps a netip.Prefix value -type Prefix = netip.Prefix - -var ( - // ParseAddr wraps the netip.ParseAddr function - ParseAddr = netip.ParseAddr - // MustParsePrefix wraps the netip.MustParsePrefix function - MustParsePrefix = netip.MustParsePrefix - // MustParseAddr wraps the netip.MustParseAddr function - MustParseAddr = netip.MustParseAddr - // AddrFrom16 wraps the netIP.AddrFrom16 function - AddrFrom16 = netip.AddrFrom16 -) - -// IPv4 wraps the netip.AddrFrom4 function -func IPv4(a, b, c, d byte) Addr { - e := [4]byte{a, b, c, d} - return netip.AddrFrom4(e) -} diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/tagger/origindetection/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/core/tagger/origindetection/LICENSE new file mode 100644 index 00000000..b370545b --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/comp/core/tagger/origindetection/LICENSE @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-present Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/tagger/origindetection/origindetection.go b/vendor/github.com/DataDog/datadog-agent/comp/core/tagger/origindetection/origindetection.go new file mode 100644 index 00000000..3c61af22 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/comp/core/tagger/origindetection/origindetection.go @@ -0,0 +1,150 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// TODO: A lot of the code in this file is currently duplicated in taggertypes. +// We will need to move all the code in taggertype to this file and remove the taggertypes package. + +// Package origindetection contains the types and functions used for Origin Detection. +package origindetection + +import ( + "fmt" + "strconv" + "strings" +) + +// ProductOrigin is the origin of the product that sent the entity. +type ProductOrigin int + +const ( + // ProductOriginDogStatsDLegacy is the ProductOrigin for DogStatsD in Legacy mode. + // TODO: remove this when dogstatsd_origin_detection_unified is enabled by default + ProductOriginDogStatsDLegacy ProductOrigin = iota + // ProductOriginDogStatsD is the ProductOrigin for DogStatsD. + ProductOriginDogStatsD ProductOrigin = iota + // ProductOriginAPM is the ProductOrigin for APM. + ProductOriginAPM ProductOrigin = iota + + // Local Data Prefixes + // These prefixes are used to build the Local Data list. + + // LocalDataContainerIDPrefix is the prefix used for the Container ID sent in the Local Data list. + LocalDataContainerIDPrefix = "ci-" + // LocalDataLegacyContainerIDPrefix is the Legacy prefix used by APM for the Container ID sent in the Local Data list. + LocalDataLegacyContainerIDPrefix = "cid-" + // LocalDataInodePrefix is the prefix used for the Inode sent in the Local Data list. + LocalDataInodePrefix = "in-" + + // External Data Prefixes + // These prefixes are used to build the External Data Environment Variable. + + // ExternalDataInitPrefix is the prefix for the Init flag in the External Data. + ExternalDataInitPrefix = "it-" + // ExternalDataContainerNamePrefix is the prefix for the Container Name in the External Data. + ExternalDataContainerNamePrefix = "cn-" + // ExternalDataPodUIDPrefix is the prefix for the Pod UID in the External Data. + ExternalDataPodUIDPrefix = "pu-" +) + +// OriginInfo contains the Origin Detection information. +type OriginInfo struct { + LocalData LocalData // LocalData is the local data list. + ExternalData ExternalData // ExternalData is the external data list. + Cardinality string // Cardinality is the cardinality of the resolved origin. + ProductOrigin ProductOrigin // ProductOrigin is the product that sent the origin information. +} + +// OriginInfoString returns a string representation of the OriginInfo. +func OriginInfoString(originInfo OriginInfo) string { + return LocalDataString(originInfo.LocalData) + ExternalDataString(originInfo.ExternalData) +} + +// LocalData that is generated by the client and sent to the Agent. +type LocalData struct { + ProcessID uint32 // ProcessID of the container process on the host. + ContainerID string // ContainerID sent from the client. + Inode uint64 // Inode is the Cgroup inode of the container. + PodUID string // PodUID of the pod sent from the client. +} + +// LocalDataString returns a string representation of the LocalData. +func LocalDataString(localData LocalData) string { + return fmt.Sprintf("%v%v%v%v", localData.ProcessID, localData.ContainerID, localData.Inode, localData.PodUID) +} + +// ExternalData generated by the Admission Controller and sent to the Agent. +type ExternalData struct { + Init bool // Init is true if the container is an init container. + ContainerName string // ContainerName is the name of the container as seen by the Admission Controller. + PodUID string // PodUID is the UID of the pod as seen by the Admission Controller. +} + +// ExternalDataString returns a string representation of the ExternalData. +func ExternalDataString(externalData ExternalData) string { + return fmt.Sprintf("%v%v%v", externalData.Init, externalData.ContainerName, externalData.PodUID) +} + +// GenerateContainerIDFromExternalData generates a container ID from the external data. +type GenerateContainerIDFromExternalData func(externalData ExternalData) (string, error) + +// ParseLocalData parses the local data string into a LocalData struct. +func ParseLocalData(rawLocalData string) (LocalData, error) { + if rawLocalData == "" { + return LocalData{}, nil + } + + var localData LocalData + var parsingError error + + if strings.Contains(rawLocalData, ",") { + // The Local Data can contain a list. + items := strings.Split(rawLocalData, ",") + for _, item := range items { + if strings.HasPrefix(item, LocalDataContainerIDPrefix) { + localData.ContainerID = item[len(LocalDataContainerIDPrefix):] + } else if strings.HasPrefix(item, LocalDataInodePrefix) { + localData.Inode, parsingError = strconv.ParseUint(item[len(LocalDataInodePrefix):], 10, 64) + } + } + } else { + switch { + case strings.HasPrefix(rawLocalData, LocalDataContainerIDPrefix): + localData.ContainerID = rawLocalData[len(LocalDataContainerIDPrefix):] + case strings.HasPrefix(rawLocalData, LocalDataInodePrefix): + localData.Inode, parsingError = strconv.ParseUint(rawLocalData[len(LocalDataInodePrefix):], 10, 64) + case strings.HasPrefix(rawLocalData, LocalDataLegacyContainerIDPrefix): + // Container ID with old APM format: cid:. Kept for backward compatibility. + localData.ContainerID = rawLocalData[len(LocalDataLegacyContainerIDPrefix):] + default: + // Container ID with old DogStatsD format: . Kept for backward compatibility. + localData.ContainerID = rawLocalData + } + } + + return localData, parsingError +} + +// ParseExternalData parses the external data string into an ExternalData struct. +func ParseExternalData(externalEnv string) (ExternalData, error) { + if externalEnv == "" { + return ExternalData{}, nil + } + + var externalData ExternalData + var parsingError error + + for _, item := range strings.Split(externalEnv, ",") { + switch { + case strings.HasPrefix(item, ExternalDataInitPrefix): + externalData.Init, parsingError = strconv.ParseBool(item[len(ExternalDataInitPrefix):]) + case strings.HasPrefix(item, ExternalDataContainerNamePrefix): + externalData.ContainerName = item[len(ExternalDataContainerNamePrefix):] + case strings.HasPrefix(item, ExternalDataPodUIDPrefix): + externalData.PodUID = item[len(ExternalDataPodUIDPrefix):] + } + } + + return externalData, parsingError +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/cache.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/cache.go index 837b2b15..3faf030b 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/cache.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/cache.go @@ -51,8 +51,9 @@ func (c *measuredCache) statsLoop() { } type cacheOptions struct { - On bool - Statsd StatsClient + On bool + Statsd StatsClient + MaxSize int64 } // newMeasuredCache returns a new measuredCache. @@ -62,17 +63,12 @@ func newMeasuredCache(opts cacheOptions) *measuredCache { return &measuredCache{} } cfg := &ristretto.Config{ - // We know that the maximum allowed resource length is 5K. This means that - // in 5MB we can store a minimum of 1000 queries. - MaxCost: 5000000, - - // An appromixated worst-case scenario when the cache is filled with small - // queries averaged as being of length 11 ("LOCK TABLES"), we would be able - // to fit 476K of them into 5MB of cost. - // - // We average it to 500K and multiply 10x as the documentation recommends. - NumCounters: 500000 * 10, - + MaxCost: opts.MaxSize, + // Assuming the minimum query size is 10 bytes , the maximum number of queries + // that can be stored is calculated as opts.MaxSize / (10 + 320). + // The 320 bytes is the fixed size of the ObfuscatedQuery struct which is stored in the cache. + // Multiplying this maximum number by 10 (opts.MaxSize / 330 * 10) as per the ristretto documentation. + NumCounters: int64(opts.MaxSize / 330 * 10), BufferItems: 64, // default recommended value Metrics: true, // enable hit/miss counters } diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/credit_cards.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/credit_cards.go index 03adf154..3246d6b0 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/credit_cards.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/credit_cards.go @@ -5,9 +5,76 @@ package obfuscate +import ( + "strings" +) + +// creditCard maintains credit card obfuscation state and processing. +type creditCard struct { + luhn bool + keepValues map[string]struct{} +} + +func newCCObfuscator(config *CreditCardsConfig) *creditCard { + keepValues := make(map[string]struct{}, len(config.KeepValues)) + for _, sk := range config.KeepValues { + keepValues[sk] = struct{}{} + } + return &creditCard{ + luhn: config.Luhn, + keepValues: keepValues, + } +} + +// ObfuscateCreditCardNumber obfuscates any "credit card like" numbers in value for keys not in the allow-list +func (o *Obfuscator) ObfuscateCreditCardNumber(key, val string) string { + switch key { + case "_sample_rate", + "_sampling_priority_v1", + "account_id", + "aws_account", + "error", + "error.msg", + "error.type", + "error.stack", + "env", + "graphql.field", + "graphql.query", + "graphql.type", + "graphql.operation.name", + "grpc.code", + "grpc.method", + "grpc.request", + "http.status_code", + "http.method", + "runtime-id", + "out.host", + "out.port", + "sampling.priority", + "span.type", + "span.name", + "service.name", + "service", + "sql.query", + "version": + // these tags are known to not be credit card numbers + return val + } + if strings.HasPrefix(key, "_") { + return val + } + if _, ok := o.ccObfuscator.keepValues[key]; ok { + return val + } + if o.ccObfuscator.IsCardNumber(val) { + return "?" + } + return val +} + // IsCardNumber checks if b could be a credit card number by checking the digit count and IIN prefix. // If validateLuhn is true, the Luhn checksum is also applied to potential candidates. -func IsCardNumber(b string, validateLuhn bool) (ok bool) { +func (cc *creditCard) IsCardNumber(b string) (ok bool) { // // Just credit card numbers for now, based on: // • https://baymard.com/checkout-usability/credit-card-patterns @@ -28,7 +95,7 @@ func IsCardNumber(b string, validateLuhn bool) (ok bool) { count := 0 // counts digits encountered foundPrefix := false // reports whether we've detected a valid prefix recdigit := func(_ byte) {} // callback on each found digit; no-op by default (we only need this for Luhn) - if validateLuhn { + if cc.luhn { // we need Luhn checksum validation, so we have to take additional action // and record all digits found buf := make([]byte, 0, len(b)) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/http.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/http.go index d9a00084..0fdec821 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/http.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/http.go @@ -56,5 +56,5 @@ func (o *Obfuscator) ObfuscateURLString(val string) string { u.Path = strings.Join(segs, "/") } } - return strings.Replace(u.String(), "/REDACTED/", "?", -1) + return strings.ReplaceAll(u.String(), "/REDACTED/", "?") } diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/ip_address.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/ip_address.go new file mode 100644 index 00000000..7289849b --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/ip_address.go @@ -0,0 +1,237 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package obfuscate + +import ( + "net/netip" + "strings" +) + +// QuantizePeerIPAddresses quantizes a comma separated list of hosts. Each entry which is an IP address is replaced using quantizeIP. +// Duplicate entries post-quantization or collapsed into a single unique value. +// Entries which are not IP addresses are left unchanged. +// Comma-separated host lists are common for peer tags like peer.cassandra.contact.points, peer.couchbase.seed.nodes, peer.kafka.bootstrap.servers +func QuantizePeerIPAddresses(raw string) string { + values := strings.Split(raw, ",") + uniq := values[:0] + uniqSet := make(map[string]bool) + for _, v := range values { + q := quantizeIP(v) + if !uniqSet[q] { + uniqSet[q] = true + uniq = append(uniq, q) + } + } + return strings.Join(uniq, ",") +} + +var schemes = []string{"dnspoll", "ftp", "file", "http", "https"} + +var allowedIPAddresses = map[string]bool{ + // localhost + "127.0.0.1": true, + "::1": true, + // link-local cloud provider metadata server addresses + "169.254.169.254": true, + "fd00:ec2::254": true, + // ECS task metadata + "169.254.170.2": true, +} + +func splitPrefix(raw string) (prefix, after string) { + if after, ok := strings.CutPrefix(raw, "ip-"); ok { // AWS EC2 hostnames e.g. ip-10-123-4-567.ec2.internal + return "ip-", after + } + + for _, scheme := range schemes { + schemeIndex := strings.Index(raw, scheme) + if schemeIndex < 0 { + continue + } + schemeEnd := schemeIndex + len(scheme) + 4 + if schemeEnd < len(raw) && raw[schemeIndex+len(scheme):schemeEnd] == ":///" { + return raw[schemeIndex:schemeEnd], raw[schemeEnd:] + } + schemeEnd-- + if schemeEnd < len(raw) && raw[schemeIndex+len(scheme):schemeEnd] == "://" { + return raw[schemeIndex:schemeEnd], raw[schemeEnd:] + } + } + + return "", raw +} + +// quantizeIP quantizes the ip address in the provided string, only if it exactly matches an ip with an optional port +// if the string is not an ip then empty string is returned +func quantizeIP(raw string) string { + prefix, rawNoPrefix := splitPrefix(raw) + host, port, suffix := parseIPAndPort(rawNoPrefix) + if host == "" { + // not an ip address + return raw + } + if allowedIPAddresses[host] { + return raw + } + replacement := prefix + "blocked-ip-address" + if port != "" { + // we're keeping the original port as part of the key because ports are much lower cardinality + // than ip addresses, and they also tend to correspond more closely to a protocol (i.e. 443 is HTTPS) + // so it's likely safe and probably also useful to leave them in + replacement = replacement + ":" + port + } + return replacement + suffix +} + +// parseIPAndPort returns (host, port) if the host is a valid ip address with an optional port, else returns empty strings. +func parseIPAndPort(input string) (host, port, suffix string) { + host, port, valid := splitHostPort(input) + if !valid { + host = input + } + if ok, i := isParseableIP(host); ok { + return host[:i], port, host[i:] + } + return "", "", "" +} + +func isParseableIP(s string) (parsed bool, lastIndex int) { + if len(s) == 0 { + return false, -1 + } + // Must start with a hex digit, or IPv6 can have a preceding ':' + switch s[0] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'a', 'b', 'c', 'd', 'e', 'f', + 'A', 'B', 'C', 'D', 'E', 'F', + ':': + default: + return false, -1 + } + for i := 0; i < len(s); i++ { + switch s[i] { + case '.', '_', '-': + return parseIPv4(s, s[i]) + case ':': + // IPv6 + if _, err := netip.ParseAddr(s); err == nil { + return true, len(s) + } + return false, -1 + case '%': + // Assume that this was trying to be an IPv6 address with + // a zone specifier, but the address is missing. + return false, -1 + } + } + return false, -1 +} + +// parseIsIPv4 parses s as an IPv4 address and returns whether it is an IP address +// modified from netip to accept alternate separators besides '.' +// also modified to return true if s is an IPv4 address with trailing characters +func parseIPv4(s string, sep byte) (parsed bool, lastIndex int) { + var fields [4]uint8 + var val, pos int + var digLen int // number of digits in current octet + for i := 0; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + if digLen == 1 && val == 0 { + return false, -1 + } + val = val*10 + int(s[i]) - '0' + digLen++ + if val > 255 { + return false, -1 + } + } else if s[i] == sep { + // .1.2.3 + // 1.2.3. + // 1..2.3 + if i == 0 || i == len(s)-1 || s[i-1] == sep { + return false, -1 + } + // 1.2.3.4.5 + if pos == 3 { + return true, i + } + fields[pos] = uint8(val) + pos++ + val = 0 + digLen = 0 + } else { + if pos == 3 && digLen > 0 { + fields[3] = uint8(val) + return true, i + } + return false, -1 + } + } + if pos < 3 { + return false, -1 + } + fields[3] = uint8(val) + return true, len(s) +} + +// SplitHostPort splits a network address of the form "host:port", +// "host%zone:port", "[host]:port" or "[host%zone]:port" into host or +// host%zone and port. +// +// A literal IPv6 address in hostport must be enclosed in square +// brackets, as in "[::1]:80", "[::1%lo0]:80". +// +// See func Dial for a description of the hostport parameter, and host +// and port results. +// This function is a lightly modified net.SplitHostPort where we avoid +// allocating an error on failure to parse to improve performance. +func splitHostPort(hostport string) (host, port string, valid bool) { + j, k := 0, 0 + + // The port starts after the last colon. + i := strings.LastIndexByte(hostport, ':') + if i < 0 { + return "", "", false + } + + if hostport[0] == '[' { + // Expect the first ']' just before the last ':'. + end := strings.IndexByte(hostport, ']') + if end < 0 { + return "", "", false + } + switch end + 1 { + case len(hostport): + // There can't be a ':' behind the ']' now. + return "", "", false + case i: + // The expected result. + default: + // Either ']' isn't followed by a colon, or it is + // followed by a colon that is not the last one. + if hostport[end+1] == ':' { + return "", "", false + } + return "", "", false + } + host = hostport[1:end] + j, k = 1, end+1 // there can't be a '[' resp. ']' before these positions + } else { + host = hostport[:i] + if strings.IndexByte(host, ':') >= 0 { + return "", "", false + } + } + if strings.IndexByte(hostport[j:], '[') >= 0 { + return "", "", false + } + if strings.IndexByte(hostport[k:], ']') >= 0 { + return "", "", false + } + + port = hostport[i+1:] + return host, port, true +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json.go index 8252a9f0..d2a60ec6 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json.go @@ -22,6 +22,11 @@ func (o *Obfuscator) ObfuscateElasticSearchString(cmd string) string { return obfuscateJSONString(cmd, o.es) } +// ObfuscateOpenSearchString obfuscates the given OpenSearch JSON query. +func (o *Obfuscator) ObfuscateOpenSearchString(cmd string) string { + return obfuscateJSONString(cmd, o.openSearch) +} + // obfuscateJSONString obfuscates the given span's tag using the given obfuscator. If the obfuscator is // nil it is considered disabled. func obfuscateJSONString(cmd string, obfuscator *jsonObfuscator) string { diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go index ab3ce4a8..6c490bbc 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go @@ -533,9 +533,7 @@ func stateNul(s *scanner, c byte) int { // stateError is the state after reaching a syntax error, // such as after reading `[1}` or `5.1.2`. -// -//nolint:revive // TODO(APM) Fix revive linter -func stateError(s *scanner, c byte) int { +func stateError(_ *scanner, _ byte) int { return scanError } diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go index ed30de60..c6b70a69 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go @@ -15,18 +15,25 @@ package obfuscate import ( "bytes" - "github.com/DataDog/datadog-go/v5/statsd" "go.uber.org/atomic" + + "github.com/DataDog/datadog-go/v5/statsd" ) +// Version is an incrementing integer to identify this "version" of obfuscation logic. This is used to avoid obfuscation +// conflicts and ensure that clients of the obfuscator can decide where obfuscation should occur. +const Version = 1 + // Obfuscator quantizes and obfuscates spans. The obfuscator is not safe for // concurrent use. type Obfuscator struct { opts *Config es *jsonObfuscator // nil if disabled + openSearch *jsonObfuscator // nil if disabled mongo *jsonObfuscator // nil if disabled sqlExecPlan *jsonObfuscator // nil if disabled sqlExecPlanNormalize *jsonObfuscator // nil if disabled + ccObfuscator *creditCard // nil if disabled // sqlLiteralEscapes reports whether we should treat escape characters literally or as escape characters. // Different SQL engines behave in different ways and the tokenizer needs to be generic. sqlLiteralEscapes *atomic.Bool @@ -67,26 +74,35 @@ type Config struct { SQL SQLConfig // ES holds the obfuscation configuration for ElasticSearch bodies. - ES JSONConfig + ES JSONConfig `mapstructure:"elasticsearch"` + + // OpenSearch holds the obfuscation configuration for OpenSearch bodies. + OpenSearch JSONConfig `mapstructure:"opensearch"` // Mongo holds the obfuscation configuration for MongoDB queries. - Mongo JSONConfig + Mongo JSONConfig `mapstructure:"mongodb"` // SQLExecPlan holds the obfuscation configuration for SQL Exec Plans. This is strictly for safety related obfuscation, // not normalization. Normalization of exec plans is configured in SQLExecPlanNormalize. - SQLExecPlan JSONConfig + SQLExecPlan JSONConfig `mapstructure:"sql_exec_plan"` // SQLExecPlanNormalize holds the normalization configuration for SQL Exec Plans. - SQLExecPlanNormalize JSONConfig + SQLExecPlanNormalize JSONConfig `mapstructure:"sql_exec_plan_normalize"` // HTTP holds the obfuscation settings for HTTP URLs. - HTTP HTTPConfig + HTTP HTTPConfig `mapstructure:"http"` // Redis holds the obfuscation settings for Redis commands. - Redis RedisConfig + Redis RedisConfig `mapstructure:"redis"` + + // Valkey holds the obfuscation settings for Valkey commands. + Valkey ValkeyConfig `mapstructure:"valkey"` // Memcached holds the obfuscation settings for Memcached commands. - Memcached MemcachedConfig + Memcached MemcachedConfig `mapstructure:"memcached"` + + // Memcached holds the obfuscation settings for obfuscation of CC numbers in meta. + CreditCard CreditCardsConfig `mapstructure:"credit_cards"` // Statsd specifies the statsd client to use for reporting metrics. Statsd StatsClient @@ -94,6 +110,9 @@ type Config struct { // Logger specifies the logger to use when outputting messages. // If unset, no logs will be outputted. Logger Logger + + // Cache enables the query cache for obfuscation for SQL and MongoDB queries. + Cache CacheConfig `mapstructure:"cache"` } // StatsClient implementations are able to emit stats. @@ -107,6 +126,7 @@ type ObfuscationMode string // ObfuscationMode valid values const ( + NormalizeOnly = ObfuscationMode("normalize_only") ObfuscateOnly = ObfuscationMode("obfuscate_only") ObfuscateAndNormalize = ObfuscationMode("obfuscate_and_normalize") ) @@ -145,12 +165,12 @@ type SQLConfig struct { // ObfuscationMode specifies the obfuscation mode to use for go-sqllexer pkg. // When specified, obfuscator will attempt to use go-sqllexer pkg to obfuscate (and normalize) SQL queries. - // Valid values are "obfuscate_only", "obfuscate_and_normalize" + // Valid values are "normalize_only", "obfuscate_only", "obfuscate_and_normalize" ObfuscationMode ObfuscationMode `json:"obfuscation_mode" yaml:"obfuscation_mode"` // RemoveSpaceBetweenParentheses specifies whether to remove spaces between parentheses. // By default, spaces are inserted between parentheses during normalization. - // This option is only valid when ObfuscationMode is "obfuscate_and_normalize". + // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize". RemoveSpaceBetweenParentheses bool `json:"remove_space_between_parentheses" yaml:"remove_space_between_parentheses"` // KeepNull specifies whether to disable obfuscate NULL value with ?. @@ -167,16 +187,21 @@ type SQLConfig struct { // KeepTrailingSemicolon specifies whether to keep trailing semicolon. // By default, trailing semicolon is removed during normalization. - // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize". KeepTrailingSemicolon bool `json:"keep_trailing_semicolon" yaml:"keep_trailing_semicolon"` // KeepIdentifierQuotation specifies whether to keep identifier quotation, e.g. "my_table" or [my_table]. // By default, identifier quotation is removed during normalization. - // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize". KeepIdentifierQuotation bool `json:"keep_identifier_quotation" yaml:"keep_identifier_quotation"` - // Cache reports whether the obfuscator should use a LRU look-up cache for SQL obfuscations. - Cache bool + // KeepJSONPath specifies whether to keep JSON paths following JSON operators in SQL statements in obfuscation. + // By default, JSON paths are treated as literals and are obfuscated to ?, e.g. "data::jsonb -> 'name'" -> "data::jsonb -> ?". + // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize". + KeepJSONPath bool `json:"keep_json_path" yaml:"keep_json_path"` + + // Cache is deprecated. Please use `apm_config.obfuscation.cache` instead. + Cache bool `json:"cache" yaml:"cache"` } // SQLMetadata holds metadata collected throughout the obfuscation of an SQL statement. It is only @@ -214,6 +239,16 @@ type RedisConfig struct { RemoveAllArgs bool `mapstructure:"remove_all_args"` } +// ValkeyConfig holds the configuration settings for Valkey obfuscation +type ValkeyConfig struct { + // Enabled specifies whether this feature should be enabled. + Enabled bool `mapstructure:"enabled"` + + // RemoveAllArgs specifies whether all arguments to a given Valkey + // command should be obfuscated. + RemoveAllArgs bool `mapstructure:"remove_all_args"` +} + // MemcachedConfig holds the configuration settings for Memcached obfuscation type MemcachedConfig struct { // Enabled specifies whether this feature should be enabled. @@ -239,6 +274,31 @@ type JSONConfig struct { ObfuscateSQLValues []string `mapstructure:"obfuscate_sql_values"` } +// CreditCardsConfig holds the configuration for credit card obfuscation in +// (Meta) tags. +type CreditCardsConfig struct { + // Enabled specifies whether this feature should be enabled. + Enabled bool `mapstructure:"enabled"` + + // Luhn specifies whether Luhn checksum validation should be enabled. + // https://dev.to/shiraazm/goluhn-a-simple-library-for-generating-calculating-and-verifying-luhn-numbers-588j + // It reduces false positives, but increases the CPU time X3. + Luhn bool `mapstructure:"luhn"` + + // KeepValues specifies tag keys that are known to not ever contain credit cards + // and therefore their values can be kept. + KeepValues []string `mapstructure:"keep_values"` +} + +// CacheConfig holds the configuration for caching obfuscated queries. +type CacheConfig struct { + // Enabled specifies whether caching should be enabled. + Enabled bool `mapstructure:"enabled"` + + // MaxSize is the maximum size of the cache in bytes. + MaxSize int64 `mapstructure:"max_size"` +} + // NewObfuscator creates a new obfuscator func NewObfuscator(cfg Config) *Obfuscator { if cfg.Logger == nil { @@ -246,13 +306,16 @@ func NewObfuscator(cfg Config) *Obfuscator { } o := Obfuscator{ opts: &cfg, - queryCache: newMeasuredCache(cacheOptions{On: cfg.SQL.Cache, Statsd: cfg.Statsd}), + queryCache: newMeasuredCache(cacheOptions{On: cfg.Cache.Enabled, Statsd: cfg.Statsd, MaxSize: cfg.Cache.MaxSize}), sqlLiteralEscapes: atomic.NewBool(false), log: cfg.Logger, } if cfg.ES.Enabled { o.es = newJSONObfuscator(&cfg.ES, &o) } + if cfg.OpenSearch.Enabled { + o.openSearch = newJSONObfuscator(&cfg.OpenSearch, &o) + } if cfg.Mongo.Enabled { o.mongo = newJSONObfuscator(&cfg.Mongo, &o) } @@ -262,6 +325,9 @@ func NewObfuscator(cfg Config) *Obfuscator { if cfg.SQLExecPlanNormalize.Enabled { o.sqlExecPlanNormalize = newJSONObfuscator(&cfg.SQLExecPlanNormalize, &o) } + if cfg.CreditCard.Enabled { + o.ccObfuscator = newCCObfuscator(&cfg.CreditCard) + } if cfg.Statsd == nil { cfg.Statsd = &statsd.NoOpClient{} } @@ -270,7 +336,9 @@ func NewObfuscator(cfg Config) *Obfuscator { // Stop cleans up after a finished Obfuscator. func (o *Obfuscator) Stop() { - o.queryCache.Close() + if o.queryCache != nil { + o.queryCache.Close() + } } // compactWhitespaces compacts all whitespaces in t. diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go index e9700c40..70a1323e 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go @@ -245,10 +245,8 @@ func obfuscateRedisCmd(out *strings.Builder, cmd string, args ...string) { out.WriteString(strings.Join(args, " ")) } -// removeAllRedisArgs will take in a command and obfuscate all arguments following +// RemoveAllRedisArgs will take in a command and obfuscate all arguments following // the command, regardless of if the command is valid Redis or not -// -//nolint:revive // TODO(APM) Fix revive linter func (*Obfuscator) RemoveAllRedisArgs(rediscmd string) string { fullCmd := strings.Fields(rediscmd) if len(fullCmd) == 0 { diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go index 807c9fb5..30575dc2 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go @@ -41,7 +41,7 @@ type metadataFinderFilter struct { func (f *metadataFinderFilter) Filter(token, lastToken TokenKind, buffer []byte) (TokenKind, []byte, error) { if f.collectComments && token == Comment { // A comment with line-breaks will be brought to a single line. - comment := strings.TrimSpace(strings.Replace(string(buffer), "\n", " ", -1)) + comment := strings.TrimSpace(strings.ReplaceAll(string(buffer), "\n", " ")) f.size += int64(len(comment)) f.comments = append(f.comments, comment) } @@ -287,6 +287,10 @@ func (f *groupingFilter) Reset() { f.groupMulti = 0 } +func isSQLLexer(obfuscationMode ObfuscationMode) bool { + return obfuscationMode != "" +} + // ObfuscateSQLString quantizes and obfuscates the given input SQL query string. Quantization removes // some elements such as comments and aliases and obfuscation attempts to hide sensitive information // in strings and numbers by redacting them. @@ -294,24 +298,43 @@ func (o *Obfuscator) ObfuscateSQLString(in string) (*ObfuscatedQuery, error) { return o.ObfuscateSQLStringWithOptions(in, &o.opts.SQL) } +// ObfuscateSQLStringForDBMS quantizes and obfuscates the given input SQL query string for a specific DBMS. +func (o *Obfuscator) ObfuscateSQLStringForDBMS(in string, dbms string) (*ObfuscatedQuery, error) { + if isSQLLexer(o.opts.SQL.ObfuscationMode) { + o.opts.SQL.DBMS = dbms + } + return o.ObfuscateSQLStringWithOptions(in, &o.opts.SQL) +} + // ObfuscateSQLStringWithOptions accepts an optional SQLOptions to change the behavior of the obfuscator // to quantize and obfuscate the given input SQL query string. Quantization removes some elements such as comments // and aliases and obfuscation attempts to hide sensitive information in strings and numbers by redacting them. -func (o *Obfuscator) ObfuscateSQLStringWithOptions(in string, opts *SQLConfig) (*ObfuscatedQuery, error) { +func (o *Obfuscator) ObfuscateSQLStringWithOptions(in string, opts *SQLConfig) (oq *ObfuscatedQuery, err error) { + if o.queryCache.Cache != nil { + cacheKey := fmt.Sprintf("%v:%s", opts, in) + if v, ok := o.queryCache.Get(cacheKey); ok { + return v.(*ObfuscatedQuery), nil + } + + defer func() { + if oq != nil && err == nil { + o.queryCache.Set(cacheKey, oq, oq.Cost()) + } + }() + } + if opts.ObfuscationMode != "" { // If obfuscation mode is specified, we will use go-sqllexer pkg // to obfuscate (and normalize) the query. - return o.ObfuscateWithSQLLexer(in, opts) + oq, err = o.ObfuscateWithSQLLexer(in, opts) + } else { + oq, err = o.obfuscateSQLString(in, opts) } - if v, ok := o.queryCache.Get(in); ok { - return v.(*ObfuscatedQuery), nil - } - oq, err := o.obfuscateSQLString(in, opts) if err != nil { return oq, err } - o.queryCache.Set(in, oq, oq.Cost()) + return oq, nil } @@ -342,7 +365,16 @@ type ObfuscatedQuery struct { // Cost returns the number of bytes needed to store all the fields // of this ObfuscatedQuery. func (oq *ObfuscatedQuery) Cost() int64 { - return int64(len(oq.Query)) + oq.Metadata.Size + // The cost of the ObfuscatedQuery struct is the sum of the length of the query string, + // the size of the metadata content, and the size of the struct itself and its fields headers. + // 320 bytes come from + // - 112 bytes for the ObfuscatedQuery struct itself, measured by unsafe.Sizeof(ObfuscatedQuery{}) + // - 96 bytes for the Metadata struct itself, measured by unsafe.Sizeof(SQLMetadata{}) + // - 16 bytes for the Query string header + // - 16 bytes for the TablesCSV string header + // - 24 * 3 bytes for the Comments, Commands, and Procedures slices headers + // - 8 bytes for the Size int64 field + return int64(len(oq.Query)) + oq.Metadata.Size + 320 } // attemptObfuscation attempts to obfuscate the SQL query loaded into the tokenizer, using the given set of filters. @@ -426,17 +458,23 @@ func (o *Obfuscator) ObfuscateSQLExecPlan(jsonPlan string, normalize bool) (stri // ObfuscateWithSQLLexer obfuscates the given SQL query using the go-sqllexer package. // If ObfuscationMode is set to ObfuscateOnly, the query will be obfuscated without normalizing it. func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*ObfuscatedQuery, error) { - if opts.ObfuscationMode != ObfuscateOnly && opts.ObfuscationMode != ObfuscateAndNormalize { + if opts.ObfuscationMode != NormalizeOnly && opts.ObfuscationMode != ObfuscateOnly && opts.ObfuscationMode != ObfuscateAndNormalize { return nil, fmt.Errorf("invalid obfuscation mode: %s", opts.ObfuscationMode) } - obfuscator := sqllexer.NewObfuscator( - sqllexer.WithReplaceDigits(opts.ReplaceDigits), - sqllexer.WithDollarQuotedFunc(opts.DollarQuotedFunc), - sqllexer.WithReplacePositionalParameter(!opts.KeepPositionalParameter), - sqllexer.WithReplaceBoolean(!opts.KeepBoolean), - sqllexer.WithReplaceNull(!opts.KeepNull), - ) + var obfuscator *sqllexer.Obfuscator + + if opts.ObfuscationMode == ObfuscateOnly || opts.ObfuscationMode == ObfuscateAndNormalize { + obfuscator = sqllexer.NewObfuscator( + sqllexer.WithReplaceDigits(opts.ReplaceDigits), + sqllexer.WithDollarQuotedFunc(opts.DollarQuotedFunc), + sqllexer.WithReplacePositionalParameter(!opts.KeepPositionalParameter), + sqllexer.WithReplaceBoolean(!opts.KeepBoolean), + sqllexer.WithReplaceNull(!opts.KeepNull), + sqllexer.WithKeepJsonPath(opts.KeepJSONPath), + ) + } + if opts.ObfuscationMode == ObfuscateOnly { // Obfuscate the query without normalizing it. out := obfuscator.Obfuscate(in, sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS))) @@ -445,11 +483,6 @@ func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*Obfusca }, nil } - // we only want to cache normalized queries - if v, ok := o.queryCache.Get(in); ok { - return v.(*ObfuscatedQuery), nil - } - // Obfuscate the query and normalize it. normalizer := sqllexer.NewNormalizer( sqllexer.WithCollectComments(opts.CollectComments), @@ -461,12 +494,22 @@ func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*Obfusca sqllexer.WithKeepTrailingSemicolon(opts.KeepTrailingSemicolon), sqllexer.WithKeepIdentifierQuotation(opts.KeepIdentifierQuotation), ) - out, statementMetadata, err := sqllexer.ObfuscateAndNormalize( - in, - obfuscator, - normalizer, - sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS)), - ) + + var out string + var statementMetadata *sqllexer.StatementMetadata + var err error + + if opts.ObfuscationMode == NormalizeOnly { + // Normalize the query without obfuscating it. + out, statementMetadata, err = normalizer.Normalize(in, sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS))) + } else { + out, statementMetadata, err = sqllexer.ObfuscateAndNormalize( + in, + obfuscator, + normalizer, + sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS)), + ) + } if err != nil { return nil, err } @@ -481,7 +524,5 @@ func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*Obfusca }, } - o.queryCache.Set(in, oq, oq.Cost()) - return oq, nil } diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go index 9ab99288..190801d1 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go @@ -473,10 +473,15 @@ func (tkn *SQLTokenizer) Scan() (TokenKind, []byte) { // modulo operator (e.g. 'id % 8') return TokenKind(ch), tkn.bytes() case '$': - if isDigit(tkn.lastChar) { - // TODO(gbbr): the first digit after $ does not necessarily guarantee - // that this isn't a dollar-quoted string constant. We might eventually - // want to cover for this use-case too (e.g. $1$some text$1$). + if isDigit(tkn.lastChar) || tkn.lastChar == '?' { + // TODO(knusbaum): Valid dollar quote tags start with alpha characters and contain no symbols. + // See: https://www.postgresql.org/docs/15/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS + // See also: https://pgpedia.info/d/dollar-quoting.html instead. + // + // Instances of $[integer] or $? are prepared statement variables. + // We may eventually want to expand this to check for symbols other than numbers and '?', + // since other symbols are not valid dollar quote tags, but for now this covers prepared statement + // variables without exposing us to more risk of not obfuscating something than necessary. return tkn.scanPreparedStatement('$') } @@ -610,9 +615,9 @@ func (tkn *SQLTokenizer) scanIdentifier() (TokenKind, []byte) { return ID, t } -//nolint:revive // TODO(APM) Fix revive linter -func (tkn *SQLTokenizer) scanVariableIdentifier(prefix rune) (TokenKind, []byte) { +func (tkn *SQLTokenizer) scanVariableIdentifier(_ rune) (TokenKind, []byte) { for tkn.advance(); tkn.lastChar != ')' && tkn.lastChar != EndChar; tkn.advance() { + continue } tkn.advance() if !isLetter(tkn.lastChar) { @@ -623,8 +628,7 @@ func (tkn *SQLTokenizer) scanVariableIdentifier(prefix rune) (TokenKind, []byte) return Variable, tkn.bytes() } -//nolint:revive // TODO(APM) Fix revive linter -func (tkn *SQLTokenizer) scanFormatParameter(prefix rune) (TokenKind, []byte) { +func (tkn *SQLTokenizer) scanFormatParameter(_ rune) (TokenKind, []byte) { tkn.advance() return Variable, tkn.bytes() } @@ -677,14 +681,18 @@ func (tkn *SQLTokenizer) scanDollarQuotedString() (TokenKind, []byte) { return DollarQuotedString, buf.Bytes() } -//nolint:revive // TODO(APM) Fix revive linter -func (tkn *SQLTokenizer) scanPreparedStatement(prefix rune) (TokenKind, []byte) { +func (tkn *SQLTokenizer) scanPreparedStatement(_ rune) (TokenKind, []byte) { // a prepared statement expect a digit identifier like $1 - if !isDigit(tkn.lastChar) { + if !isDigit(tkn.lastChar) && tkn.lastChar != '?' { tkn.setErr(`prepared statements must start with digits, got "%c" (%d)`, tkn.lastChar, tkn.lastChar) return LexError, tkn.bytes() } + if tkn.lastChar == '?' { + tkn.advance() + return PreparedStatement, tkn.bytes() + } + // scanNumber keeps the prefix rune intact. // read numbers and return an error if any token, buff := tkn.scanNumber(false) @@ -695,8 +703,7 @@ func (tkn *SQLTokenizer) scanPreparedStatement(prefix rune) (TokenKind, []byte) return PreparedStatement, buff } -//nolint:revive // TODO(APM) Fix revive linter -func (tkn *SQLTokenizer) scanEscapeSequence(braces rune) (TokenKind, []byte) { +func (tkn *SQLTokenizer) scanEscapeSequence(_ rune) (TokenKind, []byte) { for tkn.lastChar != '}' && tkn.lastChar != EndChar { tkn.advance() } @@ -825,8 +832,7 @@ func (tkn *SQLTokenizer) scanString(delim rune, kind TokenKind) (TokenKind, []by return kind, buf.Bytes() } -//nolint:revive // TODO(APM) Fix revive linter -func (tkn *SQLTokenizer) scanCommentType1(prefix string) (TokenKind, []byte) { +func (tkn *SQLTokenizer) scanCommentType1(_ string) (TokenKind, []byte) { for tkn.lastChar != EndChar { if tkn.lastChar == '\n' { tkn.advance() diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/proto/LICENSE new file mode 100644 index 00000000..b370545b --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/LICENSE @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-present Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload.pb.go new file mode 100644 index 00000000..e67728a5 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload.pb.go @@ -0,0 +1,205 @@ +// protoc -I. -I$GOPATH/src --gogofaster_out=. span.proto tracer_payload.proto agent_payload.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v5.29.3 +// source: datadog/trace/agent_payload.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// AgentPayload represents payload the agent sends to the intake. +type AgentPayload struct { + state protoimpl.MessageState `protogen:"open.v1"` + // hostName specifies hostname of where the agent is running. + HostName string `protobuf:"bytes,1,opt,name=hostName,proto3" json:"hostName,omitempty"` + // env specifies `env` set in agent configuration. + Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` + // tracerPayloads specifies list of the payloads received from tracers. + TracerPayloads []*TracerPayload `protobuf:"bytes,5,rep,name=tracerPayloads,proto3" json:"tracerPayloads,omitempty"` + // tags specifies tags common in all `tracerPayloads`. + Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // agentVersion specifies version of the agent. + AgentVersion string `protobuf:"bytes,7,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` + // targetTPS holds `TargetTPS` value in AgentConfig. + TargetTPS float64 `protobuf:"fixed64,8,opt,name=targetTPS,proto3" json:"targetTPS,omitempty"` + // errorTPS holds `ErrorTPS` value in AgentConfig. + ErrorTPS float64 `protobuf:"fixed64,9,opt,name=errorTPS,proto3" json:"errorTPS,omitempty"` + // rareSamplerEnabled holds `RareSamplerEnabled` value in AgentConfig + RareSamplerEnabled bool `protobuf:"varint,10,opt,name=rareSamplerEnabled,proto3" json:"rareSamplerEnabled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AgentPayload) Reset() { + *x = AgentPayload{} + mi := &file_datadog_trace_agent_payload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AgentPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentPayload) ProtoMessage() {} + +func (x *AgentPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_agent_payload_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentPayload.ProtoReflect.Descriptor instead. +func (*AgentPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_agent_payload_proto_rawDescGZIP(), []int{0} +} + +func (x *AgentPayload) GetHostName() string { + if x != nil { + return x.HostName + } + return "" +} + +func (x *AgentPayload) GetEnv() string { + if x != nil { + return x.Env + } + return "" +} + +func (x *AgentPayload) GetTracerPayloads() []*TracerPayload { + if x != nil { + return x.TracerPayloads + } + return nil +} + +func (x *AgentPayload) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *AgentPayload) GetAgentVersion() string { + if x != nil { + return x.AgentVersion + } + return "" +} + +func (x *AgentPayload) GetTargetTPS() float64 { + if x != nil { + return x.TargetTPS + } + return 0 +} + +func (x *AgentPayload) GetErrorTPS() float64 { + if x != nil { + return x.ErrorTPS + } + return 0 +} + +func (x *AgentPayload) GetRareSamplerEnabled() bool { + if x != nil { + return x.RareSamplerEnabled + } + return false +} + +var File_datadog_trace_agent_payload_proto protoreflect.FileDescriptor + +const file_datadog_trace_agent_payload_proto_rawDesc = "" + + "\n" + + "!datadog/trace/agent_payload.proto\x12\rdatadog.trace\x1a\"datadog/trace/tracer_payload.proto\"\x84\x03\n" + + "\fAgentPayload\x12\x1a\n" + + "\bhostName\x18\x01 \x01(\tR\bhostName\x12\x10\n" + + "\x03env\x18\x02 \x01(\tR\x03env\x12D\n" + + "\x0etracerPayloads\x18\x05 \x03(\v2\x1c.datadog.trace.TracerPayloadR\x0etracerPayloads\x129\n" + + "\x04tags\x18\x06 \x03(\v2%.datadog.trace.AgentPayload.TagsEntryR\x04tags\x12\"\n" + + "\fagentVersion\x18\a \x01(\tR\fagentVersion\x12\x1c\n" + + "\ttargetTPS\x18\b \x01(\x01R\ttargetTPS\x12\x1a\n" + + "\berrorTPS\x18\t \x01(\x01R\berrorTPS\x12.\n" + + "\x12rareSamplerEnabled\x18\n" + + " \x01(\bR\x12rareSamplerEnabled\x1a7\n" + + "\tTagsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B\x16Z\x14pkg/proto/pbgo/traceb\x06proto3" + +var ( + file_datadog_trace_agent_payload_proto_rawDescOnce sync.Once + file_datadog_trace_agent_payload_proto_rawDescData []byte +) + +func file_datadog_trace_agent_payload_proto_rawDescGZIP() []byte { + file_datadog_trace_agent_payload_proto_rawDescOnce.Do(func() { + file_datadog_trace_agent_payload_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_datadog_trace_agent_payload_proto_rawDesc), len(file_datadog_trace_agent_payload_proto_rawDesc))) + }) + return file_datadog_trace_agent_payload_proto_rawDescData +} + +var file_datadog_trace_agent_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_datadog_trace_agent_payload_proto_goTypes = []any{ + (*AgentPayload)(nil), // 0: datadog.trace.AgentPayload + nil, // 1: datadog.trace.AgentPayload.TagsEntry + (*TracerPayload)(nil), // 2: datadog.trace.TracerPayload +} +var file_datadog_trace_agent_payload_proto_depIdxs = []int32{ + 2, // 0: datadog.trace.AgentPayload.tracerPayloads:type_name -> datadog.trace.TracerPayload + 1, // 1: datadog.trace.AgentPayload.tags:type_name -> datadog.trace.AgentPayload.TagsEntry + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_datadog_trace_agent_payload_proto_init() } +func file_datadog_trace_agent_payload_proto_init() { + if File_datadog_trace_agent_payload_proto != nil { + return + } + file_datadog_trace_tracer_payload_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_datadog_trace_agent_payload_proto_rawDesc), len(file_datadog_trace_agent_payload_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_agent_payload_proto_goTypes, + DependencyIndexes: file_datadog_trace_agent_payload_proto_depIdxs, + MessageInfos: file_datadog_trace_agent_payload_proto_msgTypes, + }.Build() + File_datadog_trace_agent_payload_proto = out.File + file_datadog_trace_agent_payload_proto_goTypes = nil + file_datadog_trace_agent_payload_proto_depIdxs = nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_gen.go new file mode 100644 index 00000000..26cefad5 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_gen.go @@ -0,0 +1,200 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *AgentPayload) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 8 + // string "HostName" + o = append(o, 0x88, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.HostName) + // string "Env" + o = append(o, 0xa3, 0x45, 0x6e, 0x76) + o = msgp.AppendString(o, z.Env) + // string "TracerPayloads" + o = append(o, 0xae, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.TracerPayloads))) + for za0001 := range z.TracerPayloads { + if z.TracerPayloads[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.TracerPayloads[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "TracerPayloads", za0001) + return + } + } + } + // string "Tags" + o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) + for za0002, za0003 := range z.Tags { + o = msgp.AppendString(o, za0002) + o = msgp.AppendString(o, za0003) + } + // string "AgentVersion" + o = append(o, 0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AgentVersion) + // string "TargetTPS" + o = append(o, 0xa9, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53) + o = msgp.AppendFloat64(o, z.TargetTPS) + // string "ErrorTPS" + o = append(o, 0xa8, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53) + o = msgp.AppendFloat64(o, z.ErrorTPS) + // string "RareSamplerEnabled" + o = append(o, 0xb2, 0x52, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64) + o = msgp.AppendBool(o, z.RareSamplerEnabled) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AgentPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "HostName": + z.HostName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "HostName") + return + } + case "Env": + z.Env, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "TracerPayloads": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerPayloads") + return + } + if cap(z.TracerPayloads) >= int(zb0002) { + z.TracerPayloads = (z.TracerPayloads)[:zb0002] + } else { + z.TracerPayloads = make([]*TracerPayload, zb0002) + } + for za0001 := range z.TracerPayloads { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TracerPayloads[za0001] = nil + } else { + if z.TracerPayloads[za0001] == nil { + z.TracerPayloads[za0001] = new(TracerPayload) + } + bts, err = z.TracerPayloads[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "TracerPayloads", za0001) + return + } + } + } + case "Tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0003) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0003 > 0 { + var za0002 string + var za0003 string + zb0003-- + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + z.Tags[za0002] = za0003 + } + case "AgentVersion": + z.AgentVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + case "TargetTPS": + z.TargetTPS, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetTPS") + return + } + case "ErrorTPS": + z.ErrorTPS, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErrorTPS") + return + } + case "RareSamplerEnabled": + z.RareSamplerEnabled, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RareSamplerEnabled") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AgentPayload) Msgsize() (s int) { + s = 1 + 9 + msgp.StringPrefixSize + len(z.HostName) + 4 + msgp.StringPrefixSize + len(z.Env) + 15 + msgp.ArrayHeaderSize + for za0001 := range z.TracerPayloads { + if z.TracerPayloads[za0001] == nil { + s += msgp.NilSize + } else { + s += z.TracerPayloads[za0001].Msgsize() + } + } + s += 5 + msgp.MapHeaderSize + if z.Tags != nil { + for za0002, za0003 := range z.Tags { + _ = za0003 + s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) + } + } + s += 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 10 + msgp.Float64Size + 9 + msgp.Float64Size + 19 + msgp.BoolSize + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go new file mode 100644 index 00000000..9433b709 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go @@ -0,0 +1,524 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10 +// source: datadog/trace/agent_payload.proto + +package trace + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AgentPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AgentPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AgentPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RareSamplerEnabled { + i-- + if m.RareSamplerEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.ErrorTPS != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ErrorTPS)))) + i-- + dAtA[i] = 0x49 + } + if m.TargetTPS != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TargetTPS)))) + i-- + dAtA[i] = 0x41 + } + if len(m.AgentVersion) > 0 { + i -= len(m.AgentVersion) + copy(dAtA[i:], m.AgentVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AgentVersion))) + i-- + dAtA[i] = 0x3a + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.TracerPayloads) > 0 { + for iNdEx := len(m.TracerPayloads) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TracerPayloads[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Env) > 0 { + i -= len(m.Env) + copy(dAtA[i:], m.Env) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Env))) + i-- + dAtA[i] = 0x12 + } + if len(m.HostName) > 0 { + i -= len(m.HostName) + copy(dAtA[i:], m.HostName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AgentPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Env) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TracerPayloads) > 0 { + for _, e := range m.TracerPayloads { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.AgentVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TargetTPS != 0 { + n += 9 + } + if m.ErrorTPS != 0 { + n += 9 + } + if m.RareSamplerEnabled { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *AgentPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TracerPayloads", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TracerPayloads = append(m.TracerPayloads, &TracerPayload{}) + if err := m.TracerPayloads[len(m.TracerPayloads)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTPS", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.TargetTPS = float64(math.Float64frombits(v)) + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorTPS", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ErrorTPS = float64(math.Float64frombits(v)) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RareSamplerEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RareSamplerEnabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_bytes.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_bytes.go new file mode 100644 index 00000000..d50cf8d7 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_bytes.go @@ -0,0 +1,275 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package trace defines the types and functions to encode/decode traces. +package trace + +import ( + "bytes" + "errors" + "math" + "strings" + "unicode/utf8" + + "github.com/tinylib/msgp/msgp" +) + +// repairUTF8 ensures all characters in s are UTF-8 by replacing non-UTF-8 characters +// with the replacement char � +func repairUTF8(s string) string { + in := strings.NewReader(s) + var out bytes.Buffer + out.Grow(len(s)) + + for { + r, _, err := in.ReadRune() + if err != nil { + // note: by contract, if `in` contains non-valid utf-8, no error is returned. Rather the utf-8 replacement + // character is returned. Therefore, the only error should usually be io.EOF indicating end of string. + // If any other error is returned by chance, we quit as well, outputting whatever part of the string we + // had already constructed. + return out.String() + } + out.WriteRune(r) + } +} + +// parseStringBytes reads the next type in the msgpack payload and +// converts the BinType or the StrType in a valid string. +func parseStringBytes(bts []byte) (string, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return "", bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var ( + err error + i []byte + ) + switch t { + case msgp.BinType: + i, bts, err = msgp.ReadBytesZC(bts) + case msgp.StrType: + i, bts, err = msgp.ReadStringZC(bts) + default: + return "", bts, msgp.TypeError{Encoded: t, Method: msgp.StrType} + } + if err != nil { + return "", bts, err + } + if utf8.Valid(i) { + return string(i), bts, nil + } + return repairUTF8(msgp.UnsafeString(i)), bts, nil +} + +// parseFloat64Bytes parses a float64 even if the sent value is an int64 or an uint64; +// this is required because the encoding library could remove bytes from the encoded +// payload to reduce the size, if they're not needed. +func parseFloat64Bytes(bts []byte) (float64, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return 0, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var err error + switch t { + case msgp.IntType: + var i int64 + i, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return 0, bts, err + } + + return float64(i), bts, nil + case msgp.UintType: + var i uint64 + i, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + return 0, bts, err + } + + return float64(i), bts, nil + case msgp.Float64Type: + var f float64 + f, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + return 0, bts, err + } + + return f, bts, nil + default: + return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.Float64Type} + } +} + +// cast to int64 values that are int64 but that are sent in uint64 +// over the wire. Set to 0 if they overflow the MaxInt64 size. This +// cast should be used ONLY while decoding int64 values that are +// sent as uint64 to reduce the payload size, otherwise the approach +// is not correct in the general sense. +func castInt64(v uint64) (int64, bool) { + if v > math.MaxInt64 { + return 0, false + } + return int64(v), true +} + +// parseInt64Bytes parses an int64 even if the sent value is an uint64; +// this is required because the encoding library could remove bytes from the encoded +// payload to reduce the size, if they're not needed. +func parseInt64Bytes(bts []byte) (int64, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return 0, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var ( + i int64 + u uint64 + err error + ) + switch t { + case msgp.IntType: + i, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return 0, bts, err + } + return i, bts, nil + case msgp.UintType: + u, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + return 0, bts, err + } + + // force-cast + i, ok := castInt64(u) + if !ok { + return 0, bts, errors.New("found uint64, overflows int64") + } + return i, bts, nil + default: + return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType} + } +} + +// parseUint64Bytes parses an uint64 even if the sent value is an int64; +// this is required because the language used for the encoding library +// may not have unsigned types. An example is early version of Java +// (and so JRuby interpreter) that encodes uint64 as int64: +// http://docs.oracle.com/javase/tutorial/java/nutsandbolts/datatypes.html +func parseUint64Bytes(bts []byte) (uint64, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return 0, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var ( + i int64 + u uint64 + err error + ) + switch t { + case msgp.UintType: + u, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + return 0, bts, err + } + return u, bts, err + case msgp.IntType: + i, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return 0, bts, err + } + return uint64(i), bts, nil + default: + return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType} + } +} + +// cast to int32 values that are int32 but that are sent in uint32 +// over the wire. Set to 0 if they overflow the MaxInt32 size. This +// cast should be used ONLY while decoding int32 values that are +// sent as uint32 to reduce the payload size, otherwise the approach +// is not correct in the general sense. +func castInt32(v uint32) (int32, bool) { + if v > math.MaxInt32 { + return 0, false + } + return int32(v), true +} + +// parseInt32Bytes parses an int32 even if the sent value is an uint32; +// this is required because the encoding library could remove bytes from the encoded +// payload to reduce the size, if they're not needed. +func parseInt32Bytes(bts []byte) (int32, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return 0, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var ( + i int32 + u uint32 + err error + ) + switch t { + case msgp.IntType: + i, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + return 0, bts, err + } + return i, bts, nil + case msgp.UintType: + u, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + return 0, bts, err + } + + // force-cast + i, ok := castInt32(u) + if !ok { + return 0, bts, errors.New("found uint32, overflows int32") + } + return i, bts, nil + default: + return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType} + } +} + +// parseBytes reads the next BinType in the msgpack payload. +// +//nolint:unused // potentially useful; was used with prior proto definitions +func parseBytes(bts []byte) ([]byte, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return nil, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + switch t { + case msgp.BinType: + unsafeBytes, bts, err := msgp.ReadBytesZC(bts) + if err != nil { + return nil, bts, err + } + safeBytes := make([]byte, len(unsafeBytes)) + copy(safeBytes, unsafeBytes) + return safeBytes, bts, nil + default: + return nil, bts, msgp.TypeError{Encoded: t, Method: msgp.BinType} + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_v05.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_v05.go new file mode 100644 index 00000000..f88e6cc8 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_v05.go @@ -0,0 +1,223 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +import ( + "errors" + "fmt" + + "github.com/tinylib/msgp/msgp" +) + +// dictionaryString reads an int from decoder dc and returns the string +// at that index from dict. +func dictionaryString(bts []byte, dict []string) (string, []byte, error) { + var ( + ui uint32 + err error + ) + ui, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + return "", bts, err + } + idx := int(ui) + if idx >= len(dict) { + return "", bts, fmt.Errorf("dictionary index %d out of range", idx) + } + return dict[idx], bts, nil +} + +// UnmarshalMsgDictionary decodes a trace using the specification from the v0.5 endpoint. +// For details, see the documentation for endpoint v0.5 in pkg/trace/api/version.go +func (t *Traces) UnmarshalMsgDictionary(bts []byte) error { + var err error + if _, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes); err != nil { + return err + } + // read dictionary + var sz uint32 + if sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes); err != nil { + return err + } + dict := make([]string, sz) + for i := range dict { + var str string + str, bts, err = parseStringBytes(bts) + if err != nil { + return err + } + dict[i] = str + } + // read traces + sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes) + if err != nil { + return err + } + if cap(*t) >= int(sz) { + *t = (*t)[:sz] + } else { + *t = make(Traces, sz) + } + for i := range *t { + sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes) + if err != nil { + return err + } + if cap((*t)[i]) >= int(sz) { + (*t)[i] = (*t)[i][:sz] + } else { + (*t)[i] = make(Trace, sz) + } + for j := range (*t)[i] { + if (*t)[i][j] == nil { + (*t)[i][j] = new(Span) + } + if bts, err = (*t)[i][j].UnmarshalMsgDictionary(bts, dict); err != nil { + return err + } + } + } + return nil +} + +// spanPropertyCount specifies the number of top-level properties that a span +// has. +const spanPropertyCount = 12 + +// UnmarshalMsgDictionary decodes a span from the given decoder dc, looking up strings +// in the given dictionary dict. For details, see the documentation for endpoint v0.5 +// in pkg/trace/api/version.go +func (z *Span) UnmarshalMsgDictionary(bts []byte, dict []string) ([]byte, error) { + var ( + sz uint32 + err error + ) + sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes) + if err != nil { + return bts, err + } + if sz != spanPropertyCount { + return bts, errors.New("encoded span needs exactly 12 elements in array") + } + // Service (0) + z.Service, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + // Name (1) + z.Name, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + // Resource (2) + z.Resource, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + // TraceID (3) + z.TraceID, bts, err = parseUint64Bytes(bts) + if err != nil { + return bts, err + } + // SpanID (4) + z.SpanID, bts, err = parseUint64Bytes(bts) + if err != nil { + return bts, err + } + // ParentID (5) + z.ParentID, bts, err = parseUint64Bytes(bts) + if err != nil { + return bts, err + } + // Start (6) + z.Start, bts, err = parseInt64Bytes(bts) + if err != nil { + return bts, err + } + // Duration (7) + z.Duration, bts, err = parseInt64Bytes(bts) + if err != nil { + return bts, err + } + // Error (8) + z.Error, bts, err = parseInt32Bytes(bts) + if err != nil { + return bts, err + } + // Meta (9) + sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadMapHeaderBytes) + if err != nil { + return bts, err + } + if z.Meta == nil && sz > 0 { + z.Meta = make(map[string]string, sz) + } else if len(z.Meta) > 0 { + for key := range z.Meta { + delete(z.Meta, key) + } + } + for sz > 0 { + sz-- + var key, val string + key, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + val, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + z.Meta[key] = val + } + // Metrics (10) + sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadMapHeaderBytes) + if err != nil { + return bts, err + } + if z.Metrics == nil && sz > 0 { + z.Metrics = make(map[string]float64, sz) + } else if len(z.Metrics) > 0 { + for key := range z.Metrics { + delete(z.Metrics, key) + } + } + for sz > 0 { + sz-- + var ( + key string + val float64 + ) + key, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + val, bts, err = parseFloat64Bytes(bts) + if err != nil { + return bts, err + } + z.Metrics[key] = val + } + // Type (11) + z.Type, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + return bts, nil +} + +// safeReadHeaderBytes wraps msgp header readers (typically ReadArrayHeaderBytes and ReadMapHeaderBytes). +// It enforces the dictionary max size of 25MB and protects the caller from making unbounded allocations through `make(any, sz)`. +func safeReadHeaderBytes(b []byte, read func([]byte) (uint32, []byte, error)) (uint32, []byte, error) { + sz, bts, err := read(b) + if err != nil { + return 0, nil, err + } + if sz > 25*1e6 { + // Dictionary can't be larger than 25 MB + return 0, nil, errors.New("too long payload") + } + return sz, bts, err +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span.pb.go new file mode 100644 index 00000000..f2c16f1a --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span.pb.go @@ -0,0 +1,866 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v5.29.3 +// source: datadog/trace/span.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AttributeAnyValue_AttributeAnyValueType int32 + +const ( + AttributeAnyValue_STRING_VALUE AttributeAnyValue_AttributeAnyValueType = 0 + AttributeAnyValue_BOOL_VALUE AttributeAnyValue_AttributeAnyValueType = 1 + AttributeAnyValue_INT_VALUE AttributeAnyValue_AttributeAnyValueType = 2 + AttributeAnyValue_DOUBLE_VALUE AttributeAnyValue_AttributeAnyValueType = 3 + AttributeAnyValue_ARRAY_VALUE AttributeAnyValue_AttributeAnyValueType = 4 +) + +// Enum value maps for AttributeAnyValue_AttributeAnyValueType. +var ( + AttributeAnyValue_AttributeAnyValueType_name = map[int32]string{ + 0: "STRING_VALUE", + 1: "BOOL_VALUE", + 2: "INT_VALUE", + 3: "DOUBLE_VALUE", + 4: "ARRAY_VALUE", + } + AttributeAnyValue_AttributeAnyValueType_value = map[string]int32{ + "STRING_VALUE": 0, + "BOOL_VALUE": 1, + "INT_VALUE": 2, + "DOUBLE_VALUE": 3, + "ARRAY_VALUE": 4, + } +) + +func (x AttributeAnyValue_AttributeAnyValueType) Enum() *AttributeAnyValue_AttributeAnyValueType { + p := new(AttributeAnyValue_AttributeAnyValueType) + *p = x + return p +} + +func (x AttributeAnyValue_AttributeAnyValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AttributeAnyValue_AttributeAnyValueType) Descriptor() protoreflect.EnumDescriptor { + return file_datadog_trace_span_proto_enumTypes[0].Descriptor() +} + +func (AttributeAnyValue_AttributeAnyValueType) Type() protoreflect.EnumType { + return &file_datadog_trace_span_proto_enumTypes[0] +} + +func (x AttributeAnyValue_AttributeAnyValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AttributeAnyValue_AttributeAnyValueType.Descriptor instead. +func (AttributeAnyValue_AttributeAnyValueType) EnumDescriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{2, 0} +} + +type AttributeArrayValue_AttributeArrayValueType int32 + +const ( + AttributeArrayValue_STRING_VALUE AttributeArrayValue_AttributeArrayValueType = 0 + AttributeArrayValue_BOOL_VALUE AttributeArrayValue_AttributeArrayValueType = 1 + AttributeArrayValue_INT_VALUE AttributeArrayValue_AttributeArrayValueType = 2 + AttributeArrayValue_DOUBLE_VALUE AttributeArrayValue_AttributeArrayValueType = 3 +) + +// Enum value maps for AttributeArrayValue_AttributeArrayValueType. +var ( + AttributeArrayValue_AttributeArrayValueType_name = map[int32]string{ + 0: "STRING_VALUE", + 1: "BOOL_VALUE", + 2: "INT_VALUE", + 3: "DOUBLE_VALUE", + } + AttributeArrayValue_AttributeArrayValueType_value = map[string]int32{ + "STRING_VALUE": 0, + "BOOL_VALUE": 1, + "INT_VALUE": 2, + "DOUBLE_VALUE": 3, + } +) + +func (x AttributeArrayValue_AttributeArrayValueType) Enum() *AttributeArrayValue_AttributeArrayValueType { + p := new(AttributeArrayValue_AttributeArrayValueType) + *p = x + return p +} + +func (x AttributeArrayValue_AttributeArrayValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AttributeArrayValue_AttributeArrayValueType) Descriptor() protoreflect.EnumDescriptor { + return file_datadog_trace_span_proto_enumTypes[1].Descriptor() +} + +func (AttributeArrayValue_AttributeArrayValueType) Type() protoreflect.EnumType { + return &file_datadog_trace_span_proto_enumTypes[1] +} + +func (x AttributeArrayValue_AttributeArrayValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AttributeArrayValue_AttributeArrayValueType.Descriptor instead. +func (AttributeArrayValue_AttributeArrayValueType) EnumDescriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{4, 0} +} + +type SpanLink struct { + state protoimpl.MessageState `protogen:"open.v1"` + // @gotags: json:"trace_id" msg:"trace_id" + TraceID uint64 `protobuf:"varint,1,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"` // Required. + // @gotags: json:"trace_id_high" msg:"trace_id_high,omitempty" + TraceIDHigh uint64 `protobuf:"varint,2,opt,name=traceID_high,json=traceIDHigh,proto3" json:"trace_id_high" msg:"trace_id_high,omitempty"` // Optional. The high 64 bits of a referenced trace id. + // @gotags: json:"span_id" msg:"span_id" + SpanID uint64 `protobuf:"varint,3,opt,name=spanID,proto3" json:"span_id" msg:"span_id"` // Required. + // @gotags: msg:"attributes,omitempty" + Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"attributes,omitempty"` // Optional. Simple mapping of keys to string values. + // @gotags: msg:"tracestate,omitempty" + Tracestate string `protobuf:"bytes,5,opt,name=tracestate,proto3" json:"tracestate,omitempty" msg:"tracestate,omitempty"` // Optional. W3C tracestate. + // @gotags: msg:"flags,omitempty" + Flags uint32 `protobuf:"varint,6,opt,name=flags,proto3" json:"flags,omitempty" msg:"flags,omitempty"` // Optional. W3C trace flags. If set, the high bit (bit 31) must be set. + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SpanLink) Reset() { + *x = SpanLink{} + mi := &file_datadog_trace_span_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SpanLink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpanLink) ProtoMessage() {} + +func (x *SpanLink) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpanLink.ProtoReflect.Descriptor instead. +func (*SpanLink) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{0} +} + +func (x *SpanLink) GetTraceID() uint64 { + if x != nil { + return x.TraceID + } + return 0 +} + +func (x *SpanLink) GetTraceIDHigh() uint64 { + if x != nil { + return x.TraceIDHigh + } + return 0 +} + +func (x *SpanLink) GetSpanID() uint64 { + if x != nil { + return x.SpanID + } + return 0 +} + +func (x *SpanLink) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *SpanLink) GetTracestate() string { + if x != nil { + return x.Tracestate + } + return "" +} + +func (x *SpanLink) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +type SpanEvent struct { + state protoimpl.MessageState `protogen:"open.v1"` + // @gotags: json:"time_unix_nano" msg:"time_unix_nano" + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano" msg:"time_unix_nano"` // time is the number of nanoseconds between the Unix epoch and this event. + // @gotags: json:"name" msg:"name" + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" msg:"name"` // name is this event's name. + // attributes is a mapping from attribute key string to any value. + // The order of attributes should be preserved in the key/value map. + // The supported values match the OpenTelemetry attributes specification: + // https://github.com/open-telemetry/opentelemetry-proto/blob/a8f08fc49d60538f97ffabcc7feac92f832976dd/opentelemetry/proto/common/v1/common.proto + // @gotags: json:"attributes" msg:"attributes" + Attributes map[string]*AttributeAnyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"attributes"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SpanEvent) Reset() { + *x = SpanEvent{} + mi := &file_datadog_trace_span_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SpanEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpanEvent) ProtoMessage() {} + +func (x *SpanEvent) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpanEvent.ProtoReflect.Descriptor instead. +func (*SpanEvent) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{1} +} + +func (x *SpanEvent) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *SpanEvent) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SpanEvent) GetAttributes() map[string]*AttributeAnyValue { + if x != nil { + return x.Attributes + } + return nil +} + +// AttributeAnyValue is used to represent any type of attribute value. AttributeAnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AttributeAnyValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // We implement a union manually here because Go's MessagePack generator does not support + // Protobuf `oneof` unions: https://github.com/tinylib/msgp/issues/184 + // Despite this, the format represented here is binary compatible with `oneof`, if we choose + // to migrate to that in the future. + // @gotags: json:"type" msg:"type" + Type AttributeAnyValue_AttributeAnyValueType `protobuf:"varint,1,opt,name=type,proto3,enum=datadog.trace.AttributeAnyValue_AttributeAnyValueType" json:"type" msg:"type"` + // @gotags: json:"string_value" msg:"string_value" + StringValue string `protobuf:"bytes,2,opt,name=string_value,json=stringValue,proto3" json:"string_value" msg:"string_value"` + // @gotags: json:"bool_value" msg:"bool_value" + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3" json:"bool_value" msg:"bool_value"` + // @gotags: json:"int_value" msg:"int_value" + IntValue int64 `protobuf:"varint,4,opt,name=int_value,json=intValue,proto3" json:"int_value" msg:"int_value"` + // @gotags: json:"double_value" msg:"double_value" + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3" json:"double_value" msg:"double_value"` + // @gotags: json:"array_value" msg:"array_value" + ArrayValue *AttributeArray `protobuf:"bytes,6,opt,name=array_value,json=arrayValue,proto3" json:"array_value" msg:"array_value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AttributeAnyValue) Reset() { + *x = AttributeAnyValue{} + mi := &file_datadog_trace_span_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AttributeAnyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributeAnyValue) ProtoMessage() {} + +func (x *AttributeAnyValue) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributeAnyValue.ProtoReflect.Descriptor instead. +func (*AttributeAnyValue) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{2} +} + +func (x *AttributeAnyValue) GetType() AttributeAnyValue_AttributeAnyValueType { + if x != nil { + return x.Type + } + return AttributeAnyValue_STRING_VALUE +} + +func (x *AttributeAnyValue) GetStringValue() string { + if x != nil { + return x.StringValue + } + return "" +} + +func (x *AttributeAnyValue) GetBoolValue() bool { + if x != nil { + return x.BoolValue + } + return false +} + +func (x *AttributeAnyValue) GetIntValue() int64 { + if x != nil { + return x.IntValue + } + return 0 +} + +func (x *AttributeAnyValue) GetDoubleValue() float64 { + if x != nil { + return x.DoubleValue + } + return 0 +} + +func (x *AttributeAnyValue) GetArrayValue() *AttributeArray { + if x != nil { + return x.ArrayValue + } + return nil +} + +// AttributeArray is a list of AttributeArrayValue messages. We need this as a message since `oneof` in AttributeAnyValue does not allow repeated fields. +type AttributeArray struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Array of values. The array may be empty (contain 0 elements). + // @gotags: json:"values" msg:"values" + Values []*AttributeArrayValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values" msg:"values"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AttributeArray) Reset() { + *x = AttributeArray{} + mi := &file_datadog_trace_span_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AttributeArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributeArray) ProtoMessage() {} + +func (x *AttributeArray) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributeArray.ProtoReflect.Descriptor instead. +func (*AttributeArray) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{3} +} + +func (x *AttributeArray) GetValues() []*AttributeArrayValue { + if x != nil { + return x.Values + } + return nil +} + +// An element in the homogeneous AttributeArray. +// Compared to AttributeAnyValue, it only supports scalar values. +type AttributeArrayValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // We implement a union manually here because Go's MessagePack generator does not support + // Protobuf `oneof` unions: https://github.com/tinylib/msgp/issues/184 + // Despite this, the format represented here is binary compatible with `oneof`, if we choose + // to migrate to that in the future. + // @gotags: json:"type" msg:"type" + Type AttributeArrayValue_AttributeArrayValueType `protobuf:"varint,1,opt,name=type,proto3,enum=datadog.trace.AttributeArrayValue_AttributeArrayValueType" json:"type" msg:"type"` + // @gotags: json:"string_value" msg:"string_value" + StringValue string `protobuf:"bytes,2,opt,name=string_value,json=stringValue,proto3" json:"string_value" msg:"string_value"` + // @gotags: json:"bool_value" msg:"bool_value" + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3" json:"bool_value" msg:"bool_value"` + // @gotags: json:"int_value" msg:"int_value" + IntValue int64 `protobuf:"varint,4,opt,name=int_value,json=intValue,proto3" json:"int_value" msg:"int_value"` + // @gotags: json:"double_value" msg:"double_value" + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3" json:"double_value" msg:"double_value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AttributeArrayValue) Reset() { + *x = AttributeArrayValue{} + mi := &file_datadog_trace_span_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AttributeArrayValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributeArrayValue) ProtoMessage() {} + +func (x *AttributeArrayValue) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributeArrayValue.ProtoReflect.Descriptor instead. +func (*AttributeArrayValue) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{4} +} + +func (x *AttributeArrayValue) GetType() AttributeArrayValue_AttributeArrayValueType { + if x != nil { + return x.Type + } + return AttributeArrayValue_STRING_VALUE +} + +func (x *AttributeArrayValue) GetStringValue() string { + if x != nil { + return x.StringValue + } + return "" +} + +func (x *AttributeArrayValue) GetBoolValue() bool { + if x != nil { + return x.BoolValue + } + return false +} + +func (x *AttributeArrayValue) GetIntValue() int64 { + if x != nil { + return x.IntValue + } + return 0 +} + +func (x *AttributeArrayValue) GetDoubleValue() float64 { + if x != nil { + return x.DoubleValue + } + return 0 +} + +type Span struct { + state protoimpl.MessageState `protogen:"open.v1"` + // service is the name of the service with which this span is associated. + // @gotags: json:"service" msg:"service" + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service" msg:"service"` + // name is the operation name of this span. + // @gotags: json:"name" msg:"name" + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" msg:"name"` + // resource is the resource name of this span, also sometimes called the endpoint (for web spans). + // @gotags: json:"resource" msg:"resource" + Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource" msg:"resource"` + // traceID is the ID of the trace to which this span belongs. + // @gotags: json:"trace_id" msg:"trace_id" + TraceID uint64 `protobuf:"varint,4,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"` + // spanID is the ID of this span. + // @gotags: json:"span_id" msg:"span_id" + SpanID uint64 `protobuf:"varint,5,opt,name=spanID,proto3" json:"span_id" msg:"span_id"` + // parentID is the ID of this span's parent, or zero if this span has no parent. + // @gotags: json:"parent_id" msg:"parent_id" + ParentID uint64 `protobuf:"varint,6,opt,name=parentID,proto3" json:"parent_id" msg:"parent_id"` + // start is the number of nanoseconds between the Unix epoch and the beginning of this span. + // @gotags: json:"start" msg:"start" + Start int64 `protobuf:"varint,7,opt,name=start,proto3" json:"start" msg:"start"` + // duration is the time length of this span in nanoseconds. + // @gotags: json:"duration" msg:"duration" + Duration int64 `protobuf:"varint,8,opt,name=duration,proto3" json:"duration" msg:"duration"` + // error is 1 if there is an error associated with this span, or 0 if there is not. + // @gotags: json:"error" msg:"error" + Error int32 `protobuf:"varint,9,opt,name=error,proto3" json:"error" msg:"error"` + // meta is a mapping from tag name to tag value for string-valued tags. + // @gotags: json:"meta,omitempty" msg:"meta,omitempty" + Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"meta,omitempty"` + // metrics is a mapping from tag name to tag value for numeric-valued tags. + // @gotags: json:"metrics,omitempty" msg:"metrics,omitempty" + Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value" msg:"metrics,omitempty"` + // type is the type of the service with which this span is associated. Example values: web, db, lambda. + // @gotags: json:"type" msg:"type" + Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type" msg:"type"` + // meta_struct is a registry of structured "other" data used by, e.g., AppSec. + // @gotags: json:"meta_struct,omitempty" msg:"meta_struct,omitempty" + MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"meta_struct,omitempty"` + // span_links represents a collection of links, where each link defines a causal relationship between two spans. + // @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" + SpanLinks []*SpanLink `protobuf:"bytes,14,rep,name=spanLinks,proto3" json:"span_links,omitempty" msg:"span_links,omitempty"` + // spanEvents represent an event at an instant in time related to this span, but not necessarily during the span. + // @gotags: json:"span_events,omitempty" msg:"span_events,omitempty" + SpanEvents []*SpanEvent `protobuf:"bytes,15,rep,name=spanEvents,proto3" json:"span_events,omitempty" msg:"span_events,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Span) Reset() { + *x = Span{} + mi := &file_datadog_trace_span_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Span) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span) ProtoMessage() {} + +func (x *Span) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span.ProtoReflect.Descriptor instead. +func (*Span) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{5} +} + +func (x *Span) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *Span) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *Span) GetTraceID() uint64 { + if x != nil { + return x.TraceID + } + return 0 +} + +func (x *Span) GetSpanID() uint64 { + if x != nil { + return x.SpanID + } + return 0 +} + +func (x *Span) GetParentID() uint64 { + if x != nil { + return x.ParentID + } + return 0 +} + +func (x *Span) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Span) GetDuration() int64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *Span) GetError() int32 { + if x != nil { + return x.Error + } + return 0 +} + +func (x *Span) GetMeta() map[string]string { + if x != nil { + return x.Meta + } + return nil +} + +func (x *Span) GetMetrics() map[string]float64 { + if x != nil { + return x.Metrics + } + return nil +} + +func (x *Span) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Span) GetMetaStruct() map[string][]byte { + if x != nil { + return x.MetaStruct + } + return nil +} + +func (x *Span) GetSpanLinks() []*SpanLink { + if x != nil { + return x.SpanLinks + } + return nil +} + +func (x *Span) GetSpanEvents() []*SpanEvent { + if x != nil { + return x.SpanEvents + } + return nil +} + +var File_datadog_trace_span_proto protoreflect.FileDescriptor + +const file_datadog_trace_span_proto_rawDesc = "" + + "\n" + + "\x18datadog/trace/span.proto\x12\rdatadog.trace\"\x9d\x02\n" + + "\bSpanLink\x12\x18\n" + + "\atraceID\x18\x01 \x01(\x04R\atraceID\x12!\n" + + "\ftraceID_high\x18\x02 \x01(\x04R\vtraceIDHigh\x12\x16\n" + + "\x06spanID\x18\x03 \x01(\x04R\x06spanID\x12G\n" + + "\n" + + "attributes\x18\x04 \x03(\v2'.datadog.trace.SpanLink.AttributesEntryR\n" + + "attributes\x12\x1e\n" + + "\n" + + "tracestate\x18\x05 \x01(\tR\n" + + "tracestate\x12\x14\n" + + "\x05flags\x18\x06 \x01(\rR\x05flags\x1a=\n" + + "\x0fAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xf0\x01\n" + + "\tSpanEvent\x12$\n" + + "\x0etime_unix_nano\x18\x01 \x01(\x06R\ftimeUnixNano\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12H\n" + + "\n" + + "attributes\x18\x03 \x03(\v2(.datadog.trace.SpanEvent.AttributesEntryR\n" + + "attributes\x1a_\n" + + "\x0fAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x126\n" + + "\x05value\x18\x02 \x01(\v2 .datadog.trace.AttributeAnyValueR\x05value:\x028\x01\"\x8e\x03\n" + + "\x11AttributeAnyValue\x12J\n" + + "\x04type\x18\x01 \x01(\x0e26.datadog.trace.AttributeAnyValue.AttributeAnyValueTypeR\x04type\x12!\n" + + "\fstring_value\x18\x02 \x01(\tR\vstringValue\x12\x1d\n" + + "\n" + + "bool_value\x18\x03 \x01(\bR\tboolValue\x12\x1b\n" + + "\tint_value\x18\x04 \x01(\x03R\bintValue\x12!\n" + + "\fdouble_value\x18\x05 \x01(\x01R\vdoubleValue\x12>\n" + + "\varray_value\x18\x06 \x01(\v2\x1d.datadog.trace.AttributeArrayR\n" + + "arrayValue\"k\n" + + "\x15AttributeAnyValueType\x12\x10\n" + + "\fSTRING_VALUE\x10\x00\x12\x0e\n" + + "\n" + + "BOOL_VALUE\x10\x01\x12\r\n" + + "\tINT_VALUE\x10\x02\x12\x10\n" + + "\fDOUBLE_VALUE\x10\x03\x12\x0f\n" + + "\vARRAY_VALUE\x10\x04\"L\n" + + "\x0eAttributeArray\x12:\n" + + "\x06values\x18\x01 \x03(\v2\".datadog.trace.AttributeArrayValueR\x06values\"\xc5\x02\n" + + "\x13AttributeArrayValue\x12N\n" + + "\x04type\x18\x01 \x01(\x0e2:.datadog.trace.AttributeArrayValue.AttributeArrayValueTypeR\x04type\x12!\n" + + "\fstring_value\x18\x02 \x01(\tR\vstringValue\x12\x1d\n" + + "\n" + + "bool_value\x18\x03 \x01(\bR\tboolValue\x12\x1b\n" + + "\tint_value\x18\x04 \x01(\x03R\bintValue\x12!\n" + + "\fdouble_value\x18\x05 \x01(\x01R\vdoubleValue\"\\\n" + + "\x17AttributeArrayValueType\x12\x10\n" + + "\fSTRING_VALUE\x10\x00\x12\x0e\n" + + "\n" + + "BOOL_VALUE\x10\x01\x12\r\n" + + "\tINT_VALUE\x10\x02\x12\x10\n" + + "\fDOUBLE_VALUE\x10\x03\"\xd4\x05\n" + + "\x04Span\x12\x18\n" + + "\aservice\x18\x01 \x01(\tR\aservice\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12\x1a\n" + + "\bresource\x18\x03 \x01(\tR\bresource\x12\x18\n" + + "\atraceID\x18\x04 \x01(\x04R\atraceID\x12\x16\n" + + "\x06spanID\x18\x05 \x01(\x04R\x06spanID\x12\x1a\n" + + "\bparentID\x18\x06 \x01(\x04R\bparentID\x12\x14\n" + + "\x05start\x18\a \x01(\x03R\x05start\x12\x1a\n" + + "\bduration\x18\b \x01(\x03R\bduration\x12\x14\n" + + "\x05error\x18\t \x01(\x05R\x05error\x121\n" + + "\x04meta\x18\n" + + " \x03(\v2\x1d.datadog.trace.Span.MetaEntryR\x04meta\x12:\n" + + "\ametrics\x18\v \x03(\v2 .datadog.trace.Span.MetricsEntryR\ametrics\x12\x12\n" + + "\x04type\x18\f \x01(\tR\x04type\x12D\n" + + "\vmeta_struct\x18\r \x03(\v2#.datadog.trace.Span.MetaStructEntryR\n" + + "metaStruct\x125\n" + + "\tspanLinks\x18\x0e \x03(\v2\x17.datadog.trace.SpanLinkR\tspanLinks\x128\n" + + "\n" + + "spanEvents\x18\x0f \x03(\v2\x18.datadog.trace.SpanEventR\n" + + "spanEvents\x1a7\n" + + "\tMetaEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1a:\n" + + "\fMetricsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x01R\x05value:\x028\x01\x1a=\n" + + "\x0fMetaStructEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value:\x028\x01B\x16Z\x14pkg/proto/pbgo/traceb\x06proto3" + +var ( + file_datadog_trace_span_proto_rawDescOnce sync.Once + file_datadog_trace_span_proto_rawDescData []byte +) + +func file_datadog_trace_span_proto_rawDescGZIP() []byte { + file_datadog_trace_span_proto_rawDescOnce.Do(func() { + file_datadog_trace_span_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_datadog_trace_span_proto_rawDesc), len(file_datadog_trace_span_proto_rawDesc))) + }) + return file_datadog_trace_span_proto_rawDescData +} + +var file_datadog_trace_span_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_datadog_trace_span_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_datadog_trace_span_proto_goTypes = []any{ + (AttributeAnyValue_AttributeAnyValueType)(0), // 0: datadog.trace.AttributeAnyValue.AttributeAnyValueType + (AttributeArrayValue_AttributeArrayValueType)(0), // 1: datadog.trace.AttributeArrayValue.AttributeArrayValueType + (*SpanLink)(nil), // 2: datadog.trace.SpanLink + (*SpanEvent)(nil), // 3: datadog.trace.SpanEvent + (*AttributeAnyValue)(nil), // 4: datadog.trace.AttributeAnyValue + (*AttributeArray)(nil), // 5: datadog.trace.AttributeArray + (*AttributeArrayValue)(nil), // 6: datadog.trace.AttributeArrayValue + (*Span)(nil), // 7: datadog.trace.Span + nil, // 8: datadog.trace.SpanLink.AttributesEntry + nil, // 9: datadog.trace.SpanEvent.AttributesEntry + nil, // 10: datadog.trace.Span.MetaEntry + nil, // 11: datadog.trace.Span.MetricsEntry + nil, // 12: datadog.trace.Span.MetaStructEntry +} +var file_datadog_trace_span_proto_depIdxs = []int32{ + 8, // 0: datadog.trace.SpanLink.attributes:type_name -> datadog.trace.SpanLink.AttributesEntry + 9, // 1: datadog.trace.SpanEvent.attributes:type_name -> datadog.trace.SpanEvent.AttributesEntry + 0, // 2: datadog.trace.AttributeAnyValue.type:type_name -> datadog.trace.AttributeAnyValue.AttributeAnyValueType + 5, // 3: datadog.trace.AttributeAnyValue.array_value:type_name -> datadog.trace.AttributeArray + 6, // 4: datadog.trace.AttributeArray.values:type_name -> datadog.trace.AttributeArrayValue + 1, // 5: datadog.trace.AttributeArrayValue.type:type_name -> datadog.trace.AttributeArrayValue.AttributeArrayValueType + 10, // 6: datadog.trace.Span.meta:type_name -> datadog.trace.Span.MetaEntry + 11, // 7: datadog.trace.Span.metrics:type_name -> datadog.trace.Span.MetricsEntry + 12, // 8: datadog.trace.Span.meta_struct:type_name -> datadog.trace.Span.MetaStructEntry + 2, // 9: datadog.trace.Span.spanLinks:type_name -> datadog.trace.SpanLink + 3, // 10: datadog.trace.Span.spanEvents:type_name -> datadog.trace.SpanEvent + 4, // 11: datadog.trace.SpanEvent.AttributesEntry.value:type_name -> datadog.trace.AttributeAnyValue + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_datadog_trace_span_proto_init() } +func file_datadog_trace_span_proto_init() { + if File_datadog_trace_span_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_datadog_trace_span_proto_rawDesc), len(file_datadog_trace_span_proto_rawDesc)), + NumEnums: 2, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_span_proto_goTypes, + DependencyIndexes: file_datadog_trace_span_proto_depIdxs, + EnumInfos: file_datadog_trace_span_proto_enumTypes, + MessageInfos: file_datadog_trace_span_proto_msgTypes, + }.Build() + File_datadog_trace_span_proto = out.File + file_datadog_trace_span_proto_goTypes = nil + file_datadog_trace_span_proto_depIdxs = nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_gen.go new file mode 100644 index 00000000..dbda8158 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_gen.go @@ -0,0 +1,1198 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *AttributeAnyValue) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 6 + // string "type" + o = append(o, 0x86, 0xa4, 0x74, 0x79, 0x70, 0x65) + o = msgp.AppendInt32(o, int32(z.Type)) + // string "string_value" + o = append(o, 0xac, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendString(o, z.StringValue) + // string "bool_value" + o = append(o, 0xaa, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendBool(o, z.BoolValue) + // string "int_value" + o = append(o, 0xa9, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendInt64(o, z.IntValue) + // string "double_value" + o = append(o, 0xac, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.DoubleValue) + // string "array_value" + o = append(o, 0xab, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if z.ArrayValue == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 1 + // string "values" + o = append(o, 0x81, 0xa6, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.ArrayValue.Values))) + for za0001 := range z.ArrayValue.Values { + if z.ArrayValue.Values[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ArrayValue.Values[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values", za0001) + return + } + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeAnyValue) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "type": + { + var zb0002 int32 + zb0002, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = AttributeAnyValue_AttributeAnyValueType(zb0002) + } + case "string_value": + z.StringValue, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "StringValue") + return + } + case "bool_value": + z.BoolValue, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BoolValue") + return + } + case "int_value": + z.IntValue, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "double_value": + z.DoubleValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DoubleValue") + return + } + case "array_value": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ArrayValue = nil + } else { + if z.ArrayValue == nil { + z.ArrayValue = new(AttributeArray) + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + switch msgp.UnsafeString(field) { + case "values": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values") + return + } + if cap(z.ArrayValue.Values) >= int(zb0004) { + z.ArrayValue.Values = (z.ArrayValue.Values)[:zb0004] + } else { + z.ArrayValue.Values = make([]*AttributeArrayValue, zb0004) + } + for za0001 := range z.ArrayValue.Values { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ArrayValue.Values[za0001] = nil + } else { + if z.ArrayValue.Values[za0001] == nil { + z.ArrayValue.Values[za0001] = new(AttributeArrayValue) + } + bts, err = z.ArrayValue.Values[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values", za0001) + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AttributeAnyValue) Msgsize() (s int) { + s = 1 + 5 + msgp.Int32Size + 13 + msgp.StringPrefixSize + len(z.StringValue) + 11 + msgp.BoolSize + 10 + msgp.Int64Size + 13 + msgp.Float64Size + 12 + if z.ArrayValue == nil { + s += msgp.NilSize + } else { + s += 1 + 7 + msgp.ArrayHeaderSize + for za0001 := range z.ArrayValue.Values { + if z.ArrayValue.Values[za0001] == nil { + s += msgp.NilSize + } else { + s += z.ArrayValue.Values[za0001].Msgsize() + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z AttributeAnyValue_AttributeAnyValueType) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendInt32(o, int32(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeAnyValue_AttributeAnyValueType) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 int32 + zb0001, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = AttributeAnyValue_AttributeAnyValueType(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z AttributeAnyValue_AttributeAnyValueType) Msgsize() (s int) { + s = msgp.Int32Size + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *AttributeArray) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 1 + // string "values" + o = append(o, 0x81, 0xa6, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Values))) + for za0001 := range z.Values { + if z.Values[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Values[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Values", za0001) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeArray) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "values": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Values") + return + } + if cap(z.Values) >= int(zb0002) { + z.Values = (z.Values)[:zb0002] + } else { + z.Values = make([]*AttributeArrayValue, zb0002) + } + for za0001 := range z.Values { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Values[za0001] = nil + } else { + if z.Values[za0001] == nil { + z.Values[za0001] = new(AttributeArrayValue) + } + bts, err = z.Values[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Values", za0001) + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AttributeArray) Msgsize() (s int) { + s = 1 + 7 + msgp.ArrayHeaderSize + for za0001 := range z.Values { + if z.Values[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Values[za0001].Msgsize() + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *AttributeArrayValue) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "type" + o = append(o, 0x85, 0xa4, 0x74, 0x79, 0x70, 0x65) + o = msgp.AppendInt32(o, int32(z.Type)) + // string "string_value" + o = append(o, 0xac, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendString(o, z.StringValue) + // string "bool_value" + o = append(o, 0xaa, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendBool(o, z.BoolValue) + // string "int_value" + o = append(o, 0xa9, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendInt64(o, z.IntValue) + // string "double_value" + o = append(o, 0xac, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.DoubleValue) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeArrayValue) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "type": + { + var zb0002 int32 + zb0002, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = AttributeArrayValue_AttributeArrayValueType(zb0002) + } + case "string_value": + z.StringValue, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "StringValue") + return + } + case "bool_value": + z.BoolValue, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BoolValue") + return + } + case "int_value": + z.IntValue, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "double_value": + z.DoubleValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DoubleValue") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AttributeArrayValue) Msgsize() (s int) { + s = 1 + 5 + msgp.Int32Size + 13 + msgp.StringPrefixSize + len(z.StringValue) + 11 + msgp.BoolSize + 10 + msgp.Int64Size + 13 + msgp.Float64Size + return +} + +// MarshalMsg implements msgp.Marshaler +func (z AttributeArrayValue_AttributeArrayValueType) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendInt32(o, int32(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeArrayValue_AttributeArrayValueType) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 int32 + zb0001, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = AttributeArrayValue_AttributeArrayValueType(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z AttributeArrayValue_AttributeArrayValueType) Msgsize() (s int) { + s = msgp.Int32Size + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Span) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // check for omitted fields + zb0001Len := uint32(15) + var zb0001Mask uint16 /* 15 bits */ + _ = zb0001Mask + if z.Meta == nil { + zb0001Len-- + zb0001Mask |= 0x200 + } + if z.Metrics == nil { + zb0001Len-- + zb0001Mask |= 0x400 + } + if z.MetaStruct == nil { + zb0001Len-- + zb0001Mask |= 0x1000 + } + if z.SpanLinks == nil { + zb0001Len-- + zb0001Mask |= 0x2000 + } + if z.SpanEvents == nil { + zb0001Len-- + zb0001Mask |= 0x4000 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "service" + o = append(o, 0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = msgp.AppendString(o, z.Service) + // string "name" + o = append(o, 0xa4, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "resource" + o = append(o, 0xa8, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) + o = msgp.AppendString(o, z.Resource) + // string "trace_id" + o = append(o, 0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64) + o = msgp.AppendUint64(o, z.TraceID) + // string "span_id" + o = append(o, 0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64) + o = msgp.AppendUint64(o, z.SpanID) + // string "parent_id" + o = append(o, 0xa9, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64) + o = msgp.AppendUint64(o, z.ParentID) + // string "start" + o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) + o = msgp.AppendInt64(o, z.Start) + // string "duration" + o = append(o, 0xa8, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendInt64(o, z.Duration) + // string "error" + o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72) + o = msgp.AppendInt32(o, z.Error) + if (zb0001Mask & 0x200) == 0 { // if not omitted + // string "meta" + o = append(o, 0xa4, 0x6d, 0x65, 0x74, 0x61) + o = msgp.AppendMapHeader(o, uint32(len(z.Meta))) + for za0001, za0002 := range z.Meta { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + } + if (zb0001Mask & 0x400) == 0 { // if not omitted + // string "metrics" + o = append(o, 0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Metrics))) + for za0003, za0004 := range z.Metrics { + o = msgp.AppendString(o, za0003) + o = msgp.AppendFloat64(o, za0004) + } + } + // string "type" + o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, z.Type) + if (zb0001Mask & 0x1000) == 0 { // if not omitted + // string "meta_struct" + o = append(o, 0xab, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74) + o = msgp.AppendMapHeader(o, uint32(len(z.MetaStruct))) + for za0005, za0006 := range z.MetaStruct { + o = msgp.AppendString(o, za0005) + o = msgp.AppendBytes(o, za0006) + } + } + if (zb0001Mask & 0x2000) == 0 { // if not omitted + // string "span_links" + o = append(o, 0xaa, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.SpanLinks))) + for za0007 := range z.SpanLinks { + if z.SpanLinks[za0007] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.SpanLinks[za0007].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "SpanLinks", za0007) + return + } + } + } + } + if (zb0001Mask & 0x4000) == 0 { // if not omitted + // string "span_events" + o = append(o, 0xab, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.SpanEvents))) + for za0008 := range z.SpanEvents { + if z.SpanEvents[za0008] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.SpanEvents[za0008].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "SpanEvents", za0008) + return + } + } + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "service": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Service = "" + break + } + z.Service, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "name": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Name = "" + break + } + z.Name, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "resource": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Resource = "" + break + } + z.Resource, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "trace_id": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.TraceID = 0 + break + } + z.TraceID, bts, err = parseUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TraceID") + return + } + case "span_id": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.SpanID = 0 + break + } + z.SpanID, bts, err = parseUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "SpanID") + return + } + case "parent_id": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.ParentID = 0 + break + } + z.ParentID, bts, err = parseUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ParentID") + return + } + case "start": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Start = 0 + break + } + z.Start, bts, err = parseInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Start") + return + } + case "duration": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Duration = 0 + break + } + z.Duration, bts, err = parseInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "error": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Error = 0 + break + } + z.Error, bts, err = parseInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + case "meta": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Meta = nil + break + } + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + if z.Meta == nil && zb0002 > 0 { + z.Meta = make(map[string]string, zb0002) + } else if len(z.Meta) > 0 { + for key := range z.Meta { + delete(z.Meta, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + za0002, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return + } + z.Meta[za0001] = za0002 + } + case "metrics": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Metrics = nil + break + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metrics") + return + } + if z.Metrics == nil && zb0003 > 0{ + z.Metrics = make(map[string]float64, zb0003) + } else if len(z.Metrics) > 0 { + for key := range z.Metrics { + delete(z.Metrics, key) + } + } + for zb0003 > 0 { + var za0003 string + var za0004 float64 + zb0003-- + za0003, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metrics") + return + } + za0004, bts, err = parseFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metrics", za0003) + return + } + z.Metrics[za0003] = za0004 + } + case "type": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Type = "" + break + } + z.Type, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + case "meta_struct": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaStruct") + return + } + if z.MetaStruct == nil { + z.MetaStruct = make(map[string][]byte, zb0004) + } else if len(z.MetaStruct) > 0 { + for key := range z.MetaStruct { + delete(z.MetaStruct, key) + } + } + for zb0004 > 0 { + var za0005 string + var za0006 []byte + zb0004-- + za0005, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaStruct") + return + } + za0006, bts, err = msgp.ReadBytesBytes(bts, za0006) + if err != nil { + err = msgp.WrapError(err, "MetaStruct", za0005) + return + } + z.MetaStruct[za0005] = za0006 + } + case "span_links": + var zb0005 uint32 + zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SpanLinks") + return + } + if cap(z.SpanLinks) >= int(zb0005) { + z.SpanLinks = (z.SpanLinks)[:zb0005] + } else { + z.SpanLinks = make([]*SpanLink, zb0005) + } + for za0007 := range z.SpanLinks { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.SpanLinks[za0007] = nil + } else { + if z.SpanLinks[za0007] == nil { + z.SpanLinks[za0007] = new(SpanLink) + } + bts, err = z.SpanLinks[za0007].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "SpanLinks", za0007) + return + } + } + } + case "span_events": + var zb0006 uint32 + zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SpanEvents") + return + } + if cap(z.SpanEvents) >= int(zb0006) { + z.SpanEvents = (z.SpanEvents)[:zb0006] + } else { + z.SpanEvents = make([]*SpanEvent, zb0006) + } + for za0008 := range z.SpanEvents { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.SpanEvents[za0008] = nil + } else { + if z.SpanEvents[za0008] == nil { + z.SpanEvents[za0008] = new(SpanEvent) + } + bts, err = z.SpanEvents[za0008].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "SpanEvents", za0008) + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Span) Msgsize() (s int) { + s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 9 + msgp.Uint64Size + 8 + msgp.Uint64Size + 10 + msgp.Uint64Size + 6 + msgp.Int64Size + 9 + msgp.Int64Size + 6 + msgp.Int32Size + 5 + msgp.MapHeaderSize + if z.Meta != nil { + for za0001, za0002 := range z.Meta { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 8 + msgp.MapHeaderSize + if z.Metrics != nil { + for za0003, za0004 := range z.Metrics { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + msgp.Float64Size + } + } + s += 5 + msgp.StringPrefixSize + len(z.Type) + 12 + msgp.MapHeaderSize + if z.MetaStruct != nil { + for za0005, za0006 := range z.MetaStruct { + _ = za0006 + s += msgp.StringPrefixSize + len(za0005) + msgp.BytesPrefixSize + len(za0006) + } + } + s += 11 + msgp.ArrayHeaderSize + for za0007 := range z.SpanLinks { + if z.SpanLinks[za0007] == nil { + s += msgp.NilSize + } else { + s += z.SpanLinks[za0007].Msgsize() + } + } + s += 12 + msgp.ArrayHeaderSize + for za0008 := range z.SpanEvents { + if z.SpanEvents[za0008] == nil { + s += msgp.NilSize + } else { + s += z.SpanEvents[za0008].Msgsize() + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *SpanEvent) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "time_unix_nano" + o = append(o, 0x83, 0xae, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f) + o = msgp.AppendUint64(o, z.TimeUnixNano) + // string "name" + o = append(o, 0xa4, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "attributes" + o = append(o, 0xaa, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Attributes))) + for za0001, za0002 := range z.Attributes { + o = msgp.AppendString(o, za0001) + if za0002 == nil { + o = msgp.AppendNil(o) + } else { + o, err = za0002.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *SpanEvent) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "time_unix_nano": + z.TimeUnixNano, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TimeUnixNano") + return + } + case "name": + z.Name, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "attributes": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + if z.Attributes == nil { + z.Attributes = make(map[string]*AttributeAnyValue, zb0002) + } else if len(z.Attributes) > 0 { + for key := range z.Attributes { + delete(z.Attributes, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 *AttributeAnyValue + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + za0002 = nil + } else { + if za0002 == nil { + za0002 = new(AttributeAnyValue) + } + bts, err = za0002.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + } + z.Attributes[za0001] = za0002 + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SpanEvent) Msgsize() (s int) { + s = 1 + 15 + msgp.Uint64Size + 5 + msgp.StringPrefixSize + len(z.Name) + 11 + msgp.MapHeaderSize + if z.Attributes != nil { + for za0001, za0002 := range z.Attributes { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + if za0002 == nil { + s += msgp.NilSize + } else { + s += za0002.Msgsize() + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *SpanLink) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // check for omitted fields + zb0001Len := uint32(6) + var zb0001Mask uint8 /* 6 bits */ + _ = zb0001Mask + if z.TraceIDHigh == 0 { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.Attributes == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + if z.Tracestate == "" { + zb0001Len-- + zb0001Mask |= 0x10 + } + if z.Flags == 0 { + zb0001Len-- + zb0001Mask |= 0x20 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "trace_id" + o = append(o, 0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64) + o = msgp.AppendUint64(o, z.TraceID) + if (zb0001Mask & 0x2) == 0 { // if not omitted + // string "trace_id_high" + o = append(o, 0xad, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x68, 0x69, 0x67, 0x68) + o = msgp.AppendUint64(o, z.TraceIDHigh) + } + // string "span_id" + o = append(o, 0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64) + o = msgp.AppendUint64(o, z.SpanID) + if (zb0001Mask & 0x8) == 0 { // if not omitted + // string "attributes" + o = append(o, 0xaa, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Attributes))) + for za0001, za0002 := range z.Attributes { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + } + if (zb0001Mask & 0x10) == 0 { // if not omitted + // string "tracestate" + o = append(o, 0xaa, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65) + o = msgp.AppendString(o, z.Tracestate) + } + if (zb0001Mask & 0x20) == 0 { // if not omitted + // string "flags" + o = append(o, 0xa5, 0x66, 0x6c, 0x61, 0x67, 0x73) + o = msgp.AppendUint32(o, z.Flags) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *SpanLink) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "trace_id": + z.TraceID, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TraceID") + return + } + case "trace_id_high": + z.TraceIDHigh, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TraceIDHigh") + return + } + case "span_id": + z.SpanID, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "SpanID") + return + } + case "attributes": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + if z.Attributes == nil { + z.Attributes = make(map[string]string, zb0002) + } else if len(z.Attributes) > 0 { + for key := range z.Attributes { + delete(z.Attributes, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + z.Attributes[za0001] = za0002 + } + case "tracestate": + z.Tracestate, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tracestate") + return + } + case "flags": + z.Flags, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Flags") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SpanLink) Msgsize() (s int) { + s = 1 + 9 + msgp.Uint64Size + 14 + msgp.Uint64Size + 8 + msgp.Uint64Size + 11 + msgp.MapHeaderSize + if z.Attributes != nil { + for za0001, za0002 := range z.Attributes { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 11 + msgp.StringPrefixSize + len(z.Tracestate) + 6 + msgp.Uint32Size + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_utils.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_utils.go new file mode 100644 index 00000000..3594e870 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_utils.go @@ -0,0 +1,55 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +// spanCopiedFields records the fields that are copied in ShallowCopy. +// This should match exactly the fields set in (*Span).ShallowCopy. +// This is used by tests to enforce the correctness of ShallowCopy. +var spanCopiedFields = map[string]struct{}{ + "Service": {}, + "Name": {}, + "Resource": {}, + "TraceID": {}, + "SpanID": {}, + "ParentID": {}, + "Start": {}, + "Duration": {}, + "Error": {}, + "Meta": {}, + "Metrics": {}, + "Type": {}, + "MetaStruct": {}, + "SpanLinks": {}, + "SpanEvents": {}, +} + +// ShallowCopy returns a shallow copy of the copy-able portion of a Span. These are the +// public fields which will have a Get* method for them. The completeness of this +// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier, +// which incurs heavy reflection cost for every copy at runtime, we use reflection once at +// startup to ensure our method is complete. +func (s *Span) ShallowCopy() *Span { + if s == nil { + return &Span{} + } + return &Span{ + Service: s.Service, + Name: s.Name, + Resource: s.Resource, + TraceID: s.TraceID, + SpanID: s.SpanID, + ParentID: s.ParentID, + Start: s.Start, + Duration: s.Duration, + Error: s.Error, + Meta: s.Meta, + Metrics: s.Metrics, + Type: s.Type, + MetaStruct: s.MetaStruct, + SpanLinks: s.SpanLinks, + SpanEvents: s.SpanEvents, + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_vtproto.pb.go new file mode 100644 index 00000000..027bcdfe --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_vtproto.pb.go @@ -0,0 +1,2399 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10 +// source: datadog/trace/span.proto + +package trace + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SpanLink) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpanLink) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SpanLink) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Flags != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x30 + } + if len(m.Tracestate) > 0 { + i -= len(m.Tracestate) + copy(dAtA[i:], m.Tracestate) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tracestate))) + i-- + dAtA[i] = 0x2a + } + if len(m.Attributes) > 0 { + for k := range m.Attributes { + v := m.Attributes[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.SpanID != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SpanID)) + i-- + dAtA[i] = 0x18 + } + if m.TraceIDHigh != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TraceIDHigh)) + i-- + dAtA[i] = 0x10 + } + if m.TraceID != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TraceID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SpanEvent) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpanEvent) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SpanEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Attributes) > 0 { + for k := range m.Attributes { + v := m.Attributes[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.TimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *AttributeAnyValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributeAnyValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AttributeAnyValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ArrayValue != nil { + size, err := m.ArrayValue.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.DoubleValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) + i-- + dAtA[i] = 0x29 + } + if m.IntValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x20 + } + if m.BoolValue { + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.StringValue) > 0 { + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AttributeArray) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributeArray) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AttributeArray) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Values[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AttributeArrayValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributeArrayValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AttributeArrayValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DoubleValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) + i-- + dAtA[i] = 0x29 + } + if m.IntValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x20 + } + if m.BoolValue { + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.StringValue) > 0 { + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Span) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Span) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SpanEvents) > 0 { + for iNdEx := len(m.SpanEvents) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SpanEvents[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + } + if len(m.SpanLinks) > 0 { + for iNdEx := len(m.SpanLinks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SpanLinks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + } + if len(m.MetaStruct) > 0 { + for k := range m.MetaStruct { + v := m.MetaStruct[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x62 + } + if len(m.Metrics) > 0 { + for k := range m.Metrics { + v := m.Metrics[k] + baseI := i + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) + i-- + dAtA[i] = 0x11 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if len(m.Meta) > 0 { + for k := range m.Meta { + v := m.Meta[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if m.Error != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Error)) + i-- + dAtA[i] = 0x48 + } + if m.Duration != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x40 + } + if m.Start != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x38 + } + if m.ParentID != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ParentID)) + i-- + dAtA[i] = 0x30 + } + if m.SpanID != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SpanID)) + i-- + dAtA[i] = 0x28 + } + if m.TraceID != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TraceID)) + i-- + dAtA[i] = 0x20 + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SpanLink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceID != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TraceID)) + } + if m.TraceIDHigh != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TraceIDHigh)) + } + if m.SpanID != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SpanID)) + } + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.Tracestate) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Flags != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Flags)) + } + n += len(m.unknownFields) + return n +} + +func (m *SpanEvent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeUnixNano != 0 { + n += 9 + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AttributeAnyValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + l = len(m.StringValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BoolValue { + n += 2 + } + if m.IntValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) + } + if m.DoubleValue != 0 { + n += 9 + } + if m.ArrayValue != nil { + l = m.ArrayValue.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AttributeArray) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AttributeArrayValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + l = len(m.StringValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BoolValue { + n += 2 + } + if m.IntValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) + } + if m.DoubleValue != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *Span) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TraceID != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TraceID)) + } + if m.SpanID != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SpanID)) + } + if m.ParentID != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ParentID)) + } + if m.Start != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + } + if m.Duration != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Duration)) + } + if m.Error != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Error)) + } + if len(m.Meta) > 0 { + for k, v := range m.Meta { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.Metrics) > 0 { + for k, v := range m.Metrics { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + 8 + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.Type) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.MetaStruct) > 0 { + for k, v := range m.MetaStruct { + _ = k + _ = v + l = 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.SpanLinks) > 0 { + for _, e := range m.SpanLinks { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.SpanEvents) > 0 { + for _, e := range m.SpanEvents { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SpanLink) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpanLink: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpanLink: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + m.TraceID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TraceID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceIDHigh", wireType) + } + m.TraceIDHigh = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TraceIDHigh |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) + } + m.SpanID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SpanID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tracestate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tracestate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SpanEvent) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpanEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpanEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[string]*AttributeAnyValue) + } + var mapkey string + var mapvalue *AttributeAnyValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &AttributeAnyValue{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributeAnyValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributeAnyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributeAnyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= AttributeAnyValue_AttributeAnyValueType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StringValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BoolValue = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + m.IntValue = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntValue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.DoubleValue = float64(math.Float64frombits(v)) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArrayValue == nil { + m.ArrayValue = &AttributeArray{} + } + if err := m.ArrayValue.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributeArray) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributeArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributeArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &AttributeArrayValue{}) + if err := m.Values[len(m.Values)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributeArrayValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributeArrayValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributeArrayValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= AttributeArrayValue_AttributeArrayValueType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StringValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BoolValue = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + m.IntValue = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntValue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.DoubleValue = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Span) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Span: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + m.TraceID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TraceID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) + } + m.SpanID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SpanID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentID", wireType) + } + m.ParentID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ParentID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + m.Error = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Error |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Meta == nil { + m.Meta = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Meta[mapkey] = mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = make(map[string]float64) + } + var mapkey string + var mapvalue float64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + mapvaluetemp = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + mapvalue = math.Float64frombits(mapvaluetemp) + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metrics[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetaStruct", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetaStruct == nil { + m.MetaStruct = make(map[string][]byte) + } + var mapkey string + var mapvalue []byte + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return protohelpers.ErrInvalidLength + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetaStruct[mapkey] = mapvalue + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanLinks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpanLinks = append(m.SpanLinks, &SpanLink{}) + if err := m.SpanLinks[len(m.SpanLinks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanEvents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpanEvents = append(m.SpanEvents, &SpanEvent{}) + if err := m.SpanEvents[len(m.SpanEvents)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go new file mode 100644 index 00000000..77441daf --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go @@ -0,0 +1,761 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v5.29.3 +// source: datadog/trace/stats.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Trilean is an expanded boolean type that is meant to differentiate between being unset and false. +type Trilean int32 + +const ( + Trilean_NOT_SET Trilean = 0 + Trilean_TRUE Trilean = 1 + Trilean_FALSE Trilean = 2 +) + +// Enum value maps for Trilean. +var ( + Trilean_name = map[int32]string{ + 0: "NOT_SET", + 1: "TRUE", + 2: "FALSE", + } + Trilean_value = map[string]int32{ + "NOT_SET": 0, + "TRUE": 1, + "FALSE": 2, + } +) + +func (x Trilean) Enum() *Trilean { + p := new(Trilean) + *p = x + return p +} + +func (x Trilean) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Trilean) Descriptor() protoreflect.EnumDescriptor { + return file_datadog_trace_stats_proto_enumTypes[0].Descriptor() +} + +func (Trilean) Type() protoreflect.EnumType { + return &file_datadog_trace_stats_proto_enumTypes[0] +} + +func (x Trilean) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Trilean.Descriptor instead. +func (Trilean) EnumDescriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{0} +} + +type TraceRootFlag int32 + +const ( + TraceRootFlag_DEPRECATED_NOT_SET TraceRootFlag = 0 + TraceRootFlag_DEPRECATED_TRUE TraceRootFlag = 1 + TraceRootFlag_DEPRECATED_FALSE TraceRootFlag = 2 +) + +// Enum value maps for TraceRootFlag. +var ( + TraceRootFlag_name = map[int32]string{ + 0: "DEPRECATED_NOT_SET", + 1: "DEPRECATED_TRUE", + 2: "DEPRECATED_FALSE", + } + TraceRootFlag_value = map[string]int32{ + "DEPRECATED_NOT_SET": 0, + "DEPRECATED_TRUE": 1, + "DEPRECATED_FALSE": 2, + } +) + +func (x TraceRootFlag) Enum() *TraceRootFlag { + p := new(TraceRootFlag) + *p = x + return p +} + +func (x TraceRootFlag) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TraceRootFlag) Descriptor() protoreflect.EnumDescriptor { + return file_datadog_trace_stats_proto_enumTypes[1].Descriptor() +} + +func (TraceRootFlag) Type() protoreflect.EnumType { + return &file_datadog_trace_stats_proto_enumTypes[1] +} + +func (x TraceRootFlag) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TraceRootFlag.Descriptor instead. +func (TraceRootFlag) EnumDescriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{1} +} + +// StatsPayload is the payload used to send stats from the agent to the backend. +type StatsPayload struct { + state protoimpl.MessageState `protogen:"open.v1"` + AgentHostname string `protobuf:"bytes,1,opt,name=agentHostname,proto3" json:"agentHostname,omitempty"` + AgentEnv string `protobuf:"bytes,2,opt,name=agentEnv,proto3" json:"agentEnv,omitempty"` + // @gotags: json:"stats,omitempty" msg:"Stats,omitempty" + Stats []*ClientStatsPayload `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"` + AgentVersion string `protobuf:"bytes,4,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` + ClientComputed bool `protobuf:"varint,5,opt,name=clientComputed,proto3" json:"clientComputed,omitempty"` + // splitPayload indicates if the payload is actually one of several payloads split out from a larger payload. + // This field can be used in the backend to signal if re-aggregation is necessary. + SplitPayload bool `protobuf:"varint,6,opt,name=splitPayload,proto3" json:"splitPayload,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatsPayload) Reset() { + *x = StatsPayload{} + mi := &file_datadog_trace_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatsPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsPayload) ProtoMessage() {} + +func (x *StatsPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsPayload.ProtoReflect.Descriptor instead. +func (*StatsPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *StatsPayload) GetAgentHostname() string { + if x != nil { + return x.AgentHostname + } + return "" +} + +func (x *StatsPayload) GetAgentEnv() string { + if x != nil { + return x.AgentEnv + } + return "" +} + +func (x *StatsPayload) GetStats() []*ClientStatsPayload { + if x != nil { + return x.Stats + } + return nil +} + +func (x *StatsPayload) GetAgentVersion() string { + if x != nil { + return x.AgentVersion + } + return "" +} + +func (x *StatsPayload) GetClientComputed() bool { + if x != nil { + return x.ClientComputed + } + return false +} + +func (x *StatsPayload) GetSplitPayload() bool { + if x != nil { + return x.SplitPayload + } + return false +} + +// ClientStatsPayload is the first layer of span stats aggregation. It is also +// the payload sent by tracers to the agent when stats in tracer are enabled. +type ClientStatsPayload struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Hostname is the tracer hostname. It's extracted from spans with "_dd.hostname" meta + // or set by tracer stats payload when hostname reporting is enabled. + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` // env tag set on spans or in the tracers, used for aggregation + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` // version tag set on spans or in the tracers, used for aggregation + // @gotags: json:"stats,omitempty" msg:"Stats,omitempty" + Stats []*ClientStatsBucket `protobuf:"bytes,4,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"` + Lang string `protobuf:"bytes,5,opt,name=lang,proto3" json:"lang,omitempty"` // informative field not used for aggregation + TracerVersion string `protobuf:"bytes,6,opt,name=tracerVersion,proto3" json:"tracerVersion,omitempty"` // informative field not used for aggregation + RuntimeID string `protobuf:"bytes,7,opt,name=runtimeID,proto3" json:"runtimeID,omitempty"` // used on stats payloads sent by the tracer to identify uniquely a message + Sequence uint64 `protobuf:"varint,8,opt,name=sequence,proto3" json:"sequence,omitempty"` // used on stats payloads sent by the tracer to identify uniquely a message + // AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer + // characterizes counts only and distributions only payloads + AgentAggregation string `protobuf:"bytes,9,opt,name=agentAggregation,proto3" json:"agentAggregation,omitempty"` + // Service is the main service of the tracer. + // It is part of unified tagging: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging + Service string `protobuf:"bytes,10,opt,name=service,proto3" json:"service,omitempty"` + // ContainerID specifies the origin container ID. It is meant to be populated by the client and may + // be enhanced by the agent to ensure it is unique. + ContainerID string `protobuf:"bytes,11,opt,name=containerID,proto3" json:"containerID,omitempty"` + // Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. + // This field should be left empty by the client. It only applies to some specific environment. + Tags []string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty"` + // The git commit SHA is obtained from a trace, where it may be set through a tracer <-> source code integration. + GitCommitSha string `protobuf:"bytes,13,opt,name=git_commit_sha,json=gitCommitSha,proto3" json:"git_commit_sha,omitempty"` + // The image tag is obtained from a container's set of tags. + ImageTag string `protobuf:"bytes,14,opt,name=image_tag,json=imageTag,proto3" json:"image_tag,omitempty"` + // The process tags hash is used as a key for agent stats agregation. + ProcessTagsHash uint64 `protobuf:"varint,15,opt,name=process_tags_hash,json=processTagsHash,proto3" json:"process_tags_hash,omitempty"` + // The process tags contains a list of tags that are specific to the process. + ProcessTags string `protobuf:"bytes,16,opt,name=process_tags,json=processTags,proto3" json:"process_tags,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClientStatsPayload) Reset() { + *x = ClientStatsPayload{} + mi := &file_datadog_trace_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClientStatsPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatsPayload) ProtoMessage() {} + +func (x *ClientStatsPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatsPayload.ProtoReflect.Descriptor instead. +func (*ClientStatsPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientStatsPayload) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *ClientStatsPayload) GetEnv() string { + if x != nil { + return x.Env + } + return "" +} + +func (x *ClientStatsPayload) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientStatsPayload) GetStats() []*ClientStatsBucket { + if x != nil { + return x.Stats + } + return nil +} + +func (x *ClientStatsPayload) GetLang() string { + if x != nil { + return x.Lang + } + return "" +} + +func (x *ClientStatsPayload) GetTracerVersion() string { + if x != nil { + return x.TracerVersion + } + return "" +} + +func (x *ClientStatsPayload) GetRuntimeID() string { + if x != nil { + return x.RuntimeID + } + return "" +} + +func (x *ClientStatsPayload) GetSequence() uint64 { + if x != nil { + return x.Sequence + } + return 0 +} + +func (x *ClientStatsPayload) GetAgentAggregation() string { + if x != nil { + return x.AgentAggregation + } + return "" +} + +func (x *ClientStatsPayload) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *ClientStatsPayload) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *ClientStatsPayload) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *ClientStatsPayload) GetGitCommitSha() string { + if x != nil { + return x.GitCommitSha + } + return "" +} + +func (x *ClientStatsPayload) GetImageTag() string { + if x != nil { + return x.ImageTag + } + return "" +} + +func (x *ClientStatsPayload) GetProcessTagsHash() uint64 { + if x != nil { + return x.ProcessTagsHash + } + return 0 +} + +func (x *ClientStatsPayload) GetProcessTags() string { + if x != nil { + return x.ProcessTags + } + return "" +} + +// ClientStatsBucket is a time bucket containing aggregated stats. +type ClientStatsBucket struct { + state protoimpl.MessageState `protogen:"open.v1"` + Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` // bucket start in nanoseconds + Duration uint64 `protobuf:"varint,2,opt,name=duration,proto3" json:"duration,omitempty"` // bucket duration in nanoseconds + // @gotags: json:"stats,omitempty" msg:"Stats,omitempty" + Stats []*ClientGroupedStats `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"` + // AgentTimeShift is the shift applied by the agent stats aggregator on bucket start + // when the received bucket start is outside of the agent aggregation window + AgentTimeShift int64 `protobuf:"varint,4,opt,name=agentTimeShift,proto3" json:"agentTimeShift,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClientStatsBucket) Reset() { + *x = ClientStatsBucket{} + mi := &file_datadog_trace_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClientStatsBucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatsBucket) ProtoMessage() {} + +func (x *ClientStatsBucket) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatsBucket.ProtoReflect.Descriptor instead. +func (*ClientStatsBucket) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{2} +} + +func (x *ClientStatsBucket) GetStart() uint64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *ClientStatsBucket) GetDuration() uint64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *ClientStatsBucket) GetStats() []*ClientGroupedStats { + if x != nil { + return x.Stats + } + return nil +} + +func (x *ClientStatsBucket) GetAgentTimeShift() int64 { + if x != nil { + return x.AgentTimeShift + } + return 0 +} + +// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type +type ClientGroupedStats struct { + state protoimpl.MessageState `protogen:"open.v1"` + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + HTTPStatusCode uint32 `protobuf:"varint,4,opt,name=HTTP_status_code,json=HTTPStatusCode,proto3" json:"HTTP_status_code,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + DBType string `protobuf:"bytes,6,opt,name=DB_type,json=DBType,proto3" json:"DB_type,omitempty"` // db_type might be used in the future to help in the obfuscation step + Hits uint64 `protobuf:"varint,7,opt,name=hits,proto3" json:"hits,omitempty"` // count of all spans aggregated in the groupedstats + Errors uint64 `protobuf:"varint,8,opt,name=errors,proto3" json:"errors,omitempty"` // count of error spans aggregated in the groupedstats + Duration uint64 `protobuf:"varint,9,opt,name=duration,proto3" json:"duration,omitempty"` // total duration in nanoseconds of spans aggregated in the bucket + OkSummary []byte `protobuf:"bytes,10,opt,name=okSummary,proto3" json:"okSummary,omitempty"` // ddsketch summary of ok spans latencies encoded in protobuf + ErrorSummary []byte `protobuf:"bytes,11,opt,name=errorSummary,proto3" json:"errorSummary,omitempty"` // ddsketch summary of error spans latencies encoded in protobuf + Synthetics bool `protobuf:"varint,12,opt,name=synthetics,proto3" json:"synthetics,omitempty"` // set to true on spans generated by synthetics traffic + TopLevelHits uint64 `protobuf:"varint,13,opt,name=topLevelHits,proto3" json:"topLevelHits,omitempty"` // count of top level spans aggregated in the groupedstats + SpanKind string `protobuf:"bytes,15,opt,name=span_kind,json=spanKind,proto3" json:"span_kind,omitempty"` // value of the span.kind tag on the span + // peer_tags are supplementary tags that further describe a peer entity + // E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the name of peer DB + PeerTags []string `protobuf:"bytes,16,rep,name=peer_tags,json=peerTags,proto3" json:"peer_tags,omitempty"` + IsTraceRoot Trilean `protobuf:"varint,17,opt,name=is_trace_root,json=isTraceRoot,proto3,enum=datadog.trace.Trilean" json:"is_trace_root,omitempty"` // this field's value is equal to span's ParentID == 0. + GRPCStatusCode string `protobuf:"bytes,18,opt,name=GRPC_status_code,json=GRPCStatusCode,proto3" json:"GRPC_status_code,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClientGroupedStats) Reset() { + *x = ClientGroupedStats{} + mi := &file_datadog_trace_stats_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClientGroupedStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientGroupedStats) ProtoMessage() {} + +func (x *ClientGroupedStats) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientGroupedStats.ProtoReflect.Descriptor instead. +func (*ClientGroupedStats) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{3} +} + +func (x *ClientGroupedStats) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *ClientGroupedStats) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClientGroupedStats) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *ClientGroupedStats) GetHTTPStatusCode() uint32 { + if x != nil { + return x.HTTPStatusCode + } + return 0 +} + +func (x *ClientGroupedStats) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ClientGroupedStats) GetDBType() string { + if x != nil { + return x.DBType + } + return "" +} + +func (x *ClientGroupedStats) GetHits() uint64 { + if x != nil { + return x.Hits + } + return 0 +} + +func (x *ClientGroupedStats) GetErrors() uint64 { + if x != nil { + return x.Errors + } + return 0 +} + +func (x *ClientGroupedStats) GetDuration() uint64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *ClientGroupedStats) GetOkSummary() []byte { + if x != nil { + return x.OkSummary + } + return nil +} + +func (x *ClientGroupedStats) GetErrorSummary() []byte { + if x != nil { + return x.ErrorSummary + } + return nil +} + +func (x *ClientGroupedStats) GetSynthetics() bool { + if x != nil { + return x.Synthetics + } + return false +} + +func (x *ClientGroupedStats) GetTopLevelHits() uint64 { + if x != nil { + return x.TopLevelHits + } + return 0 +} + +func (x *ClientGroupedStats) GetSpanKind() string { + if x != nil { + return x.SpanKind + } + return "" +} + +func (x *ClientGroupedStats) GetPeerTags() []string { + if x != nil { + return x.PeerTags + } + return nil +} + +func (x *ClientGroupedStats) GetIsTraceRoot() Trilean { + if x != nil { + return x.IsTraceRoot + } + return Trilean_NOT_SET +} + +func (x *ClientGroupedStats) GetGRPCStatusCode() string { + if x != nil { + return x.GRPCStatusCode + } + return "" +} + +var File_datadog_trace_stats_proto protoreflect.FileDescriptor + +const file_datadog_trace_stats_proto_rawDesc = "" + + "\n" + + "\x19datadog/trace/stats.proto\x12\rdatadog.trace\"\xf9\x01\n" + + "\fStatsPayload\x12$\n" + + "\ragentHostname\x18\x01 \x01(\tR\ragentHostname\x12\x1a\n" + + "\bagentEnv\x18\x02 \x01(\tR\bagentEnv\x127\n" + + "\x05stats\x18\x03 \x03(\v2!.datadog.trace.ClientStatsPayloadR\x05stats\x12\"\n" + + "\fagentVersion\x18\x04 \x01(\tR\fagentVersion\x12&\n" + + "\x0eclientComputed\x18\x05 \x01(\bR\x0eclientComputed\x12\"\n" + + "\fsplitPayload\x18\x06 \x01(\bR\fsplitPayload\"\x96\x04\n" + + "\x12ClientStatsPayload\x12\x1a\n" + + "\bhostname\x18\x01 \x01(\tR\bhostname\x12\x10\n" + + "\x03env\x18\x02 \x01(\tR\x03env\x12\x18\n" + + "\aversion\x18\x03 \x01(\tR\aversion\x126\n" + + "\x05stats\x18\x04 \x03(\v2 .datadog.trace.ClientStatsBucketR\x05stats\x12\x12\n" + + "\x04lang\x18\x05 \x01(\tR\x04lang\x12$\n" + + "\rtracerVersion\x18\x06 \x01(\tR\rtracerVersion\x12\x1c\n" + + "\truntimeID\x18\a \x01(\tR\truntimeID\x12\x1a\n" + + "\bsequence\x18\b \x01(\x04R\bsequence\x12*\n" + + "\x10agentAggregation\x18\t \x01(\tR\x10agentAggregation\x12\x18\n" + + "\aservice\x18\n" + + " \x01(\tR\aservice\x12 \n" + + "\vcontainerID\x18\v \x01(\tR\vcontainerID\x12\x12\n" + + "\x04tags\x18\f \x03(\tR\x04tags\x12$\n" + + "\x0egit_commit_sha\x18\r \x01(\tR\fgitCommitSha\x12\x1b\n" + + "\timage_tag\x18\x0e \x01(\tR\bimageTag\x12*\n" + + "\x11process_tags_hash\x18\x0f \x01(\x04R\x0fprocessTagsHash\x12!\n" + + "\fprocess_tags\x18\x10 \x01(\tR\vprocessTags\"\xa6\x01\n" + + "\x11ClientStatsBucket\x12\x14\n" + + "\x05start\x18\x01 \x01(\x04R\x05start\x12\x1a\n" + + "\bduration\x18\x02 \x01(\x04R\bduration\x127\n" + + "\x05stats\x18\x03 \x03(\v2!.datadog.trace.ClientGroupedStatsR\x05stats\x12&\n" + + "\x0eagentTimeShift\x18\x04 \x01(\x03R\x0eagentTimeShift\"\xa9\x04\n" + + "\x12ClientGroupedStats\x12\x18\n" + + "\aservice\x18\x01 \x01(\tR\aservice\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12\x1a\n" + + "\bresource\x18\x03 \x01(\tR\bresource\x12(\n" + + "\x10HTTP_status_code\x18\x04 \x01(\rR\x0eHTTPStatusCode\x12\x12\n" + + "\x04type\x18\x05 \x01(\tR\x04type\x12\x17\n" + + "\aDB_type\x18\x06 \x01(\tR\x06DBType\x12\x12\n" + + "\x04hits\x18\a \x01(\x04R\x04hits\x12\x16\n" + + "\x06errors\x18\b \x01(\x04R\x06errors\x12\x1a\n" + + "\bduration\x18\t \x01(\x04R\bduration\x12\x1c\n" + + "\tokSummary\x18\n" + + " \x01(\fR\tokSummary\x12\"\n" + + "\ferrorSummary\x18\v \x01(\fR\ferrorSummary\x12\x1e\n" + + "\n" + + "synthetics\x18\f \x01(\bR\n" + + "synthetics\x12\"\n" + + "\ftopLevelHits\x18\r \x01(\x04R\ftopLevelHits\x12\x1b\n" + + "\tspan_kind\x18\x0f \x01(\tR\bspanKind\x12\x1b\n" + + "\tpeer_tags\x18\x10 \x03(\tR\bpeerTags\x12:\n" + + "\ris_trace_root\x18\x11 \x01(\x0e2\x16.datadog.trace.TrileanR\visTraceRoot\x12(\n" + + "\x10GRPC_status_code\x18\x12 \x01(\tR\x0eGRPCStatusCodeJ\x04\b\x0e\x10\x0f*+\n" + + "\aTrilean\x12\v\n" + + "\aNOT_SET\x10\x00\x12\b\n" + + "\x04TRUE\x10\x01\x12\t\n" + + "\x05FALSE\x10\x02*R\n" + + "\rTraceRootFlag\x12\x16\n" + + "\x12DEPRECATED_NOT_SET\x10\x00\x12\x13\n" + + "\x0fDEPRECATED_TRUE\x10\x01\x12\x14\n" + + "\x10DEPRECATED_FALSE\x10\x02B\x16Z\x14pkg/proto/pbgo/traceb\x06proto3" + +var ( + file_datadog_trace_stats_proto_rawDescOnce sync.Once + file_datadog_trace_stats_proto_rawDescData []byte +) + +func file_datadog_trace_stats_proto_rawDescGZIP() []byte { + file_datadog_trace_stats_proto_rawDescOnce.Do(func() { + file_datadog_trace_stats_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_datadog_trace_stats_proto_rawDesc), len(file_datadog_trace_stats_proto_rawDesc))) + }) + return file_datadog_trace_stats_proto_rawDescData +} + +var file_datadog_trace_stats_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_datadog_trace_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_datadog_trace_stats_proto_goTypes = []any{ + (Trilean)(0), // 0: datadog.trace.Trilean + (TraceRootFlag)(0), // 1: datadog.trace.TraceRootFlag + (*StatsPayload)(nil), // 2: datadog.trace.StatsPayload + (*ClientStatsPayload)(nil), // 3: datadog.trace.ClientStatsPayload + (*ClientStatsBucket)(nil), // 4: datadog.trace.ClientStatsBucket + (*ClientGroupedStats)(nil), // 5: datadog.trace.ClientGroupedStats +} +var file_datadog_trace_stats_proto_depIdxs = []int32{ + 3, // 0: datadog.trace.StatsPayload.stats:type_name -> datadog.trace.ClientStatsPayload + 4, // 1: datadog.trace.ClientStatsPayload.stats:type_name -> datadog.trace.ClientStatsBucket + 5, // 2: datadog.trace.ClientStatsBucket.stats:type_name -> datadog.trace.ClientGroupedStats + 0, // 3: datadog.trace.ClientGroupedStats.is_trace_root:type_name -> datadog.trace.Trilean + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_datadog_trace_stats_proto_init() } +func file_datadog_trace_stats_proto_init() { + if File_datadog_trace_stats_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_datadog_trace_stats_proto_rawDesc), len(file_datadog_trace_stats_proto_rawDesc)), + NumEnums: 2, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_stats_proto_goTypes, + DependencyIndexes: file_datadog_trace_stats_proto_depIdxs, + EnumInfos: file_datadog_trace_stats_proto_enumTypes, + MessageInfos: file_datadog_trace_stats_proto_msgTypes, + }.Build() + File_datadog_trace_stats_proto = out.File + file_datadog_trace_stats_proto_goTypes = nil + file_datadog_trace_stats_proto_depIdxs = nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go new file mode 100644 index 00000000..1102c84c --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go @@ -0,0 +1,1931 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *ClientGroupedStats) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Service": + z.Service, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "Name": + z.Name, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Resource": + z.Resource, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Resource") + return + } + case "HTTPStatusCode": + z.HTTPStatusCode, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "HTTPStatusCode") + return + } + case "Type": + z.Type, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + case "DBType": + z.DBType, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DBType") + return + } + case "Hits": + z.Hits, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Hits") + return + } + case "Errors": + z.Errors, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Errors") + return + } + case "Duration": + z.Duration, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "OkSummary": + z.OkSummary, err = dc.ReadBytes(z.OkSummary) + if err != nil { + err = msgp.WrapError(err, "OkSummary") + return + } + case "ErrorSummary": + z.ErrorSummary, err = dc.ReadBytes(z.ErrorSummary) + if err != nil { + err = msgp.WrapError(err, "ErrorSummary") + return + } + case "Synthetics": + z.Synthetics, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Synthetics") + return + } + case "TopLevelHits": + z.TopLevelHits, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "TopLevelHits") + return + } + case "SpanKind": + z.SpanKind, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "SpanKind") + return + } + case "PeerTags": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PeerTags") + return + } + if cap(z.PeerTags) >= int(zb0002) { + z.PeerTags = (z.PeerTags)[:zb0002] + } else { + z.PeerTags = make([]string, zb0002) + } + for za0001 := range z.PeerTags { + z.PeerTags[za0001], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "PeerTags", za0001) + return + } + } + case "IsTraceRoot": + { + var zb0003 int32 + zb0003, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "IsTraceRoot") + return + } + z.IsTraceRoot = Trilean(zb0003) + } + case "GRPCStatusCode": + z.GRPCStatusCode, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "GRPCStatusCode") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ClientGroupedStats) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 17 + // write "Service" + err = en.Append(0xde, 0x0, 0x11, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Service) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + // write "Name" + err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Name) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + // write "Resource" + err = en.Append(0xa8, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Resource) + if err != nil { + err = msgp.WrapError(err, "Resource") + return + } + // write "HTTPStatusCode" + err = en.Append(0xae, 0x48, 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) + if err != nil { + return + } + err = en.WriteUint32(z.HTTPStatusCode) + if err != nil { + err = msgp.WrapError(err, "HTTPStatusCode") + return + } + // write "Type" + err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Type) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + // write "DBType" + err = en.Append(0xa6, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteString(z.DBType) + if err != nil { + err = msgp.WrapError(err, "DBType") + return + } + // write "Hits" + err = en.Append(0xa4, 0x48, 0x69, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.Hits) + if err != nil { + err = msgp.WrapError(err, "Hits") + return + } + // write "Errors" + err = en.Append(0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.Errors) + if err != nil { + err = msgp.WrapError(err, "Errors") + return + } + // write "Duration" + err = en.Append(0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteUint64(z.Duration) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + // write "OkSummary" + err = en.Append(0xa9, 0x4f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) + if err != nil { + return + } + err = en.WriteBytes(z.OkSummary) + if err != nil { + err = msgp.WrapError(err, "OkSummary") + return + } + // write "ErrorSummary" + err = en.Append(0xac, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) + if err != nil { + return + } + err = en.WriteBytes(z.ErrorSummary) + if err != nil { + err = msgp.WrapError(err, "ErrorSummary") + return + } + // write "Synthetics" + err = en.Append(0xaa, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x73) + if err != nil { + return + } + err = en.WriteBool(z.Synthetics) + if err != nil { + err = msgp.WrapError(err, "Synthetics") + return + } + // write "TopLevelHits" + err = en.Append(0xac, 0x54, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.TopLevelHits) + if err != nil { + err = msgp.WrapError(err, "TopLevelHits") + return + } + // write "SpanKind" + err = en.Append(0xa8, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64) + if err != nil { + return + } + err = en.WriteString(z.SpanKind) + if err != nil { + err = msgp.WrapError(err, "SpanKind") + return + } + // write "PeerTags" + err = en.Append(0xa8, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PeerTags))) + if err != nil { + err = msgp.WrapError(err, "PeerTags") + return + } + for za0001 := range z.PeerTags { + err = en.WriteString(z.PeerTags[za0001]) + if err != nil { + err = msgp.WrapError(err, "PeerTags", za0001) + return + } + } + // write "IsTraceRoot" + err = en.Append(0xab, 0x49, 0x73, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74) + if err != nil { + return + } + err = en.WriteInt32(int32(z.IsTraceRoot)) + if err != nil { + err = msgp.WrapError(err, "IsTraceRoot") + return + } + // write "GRPCStatusCode" + err = en.Append(0xae, 0x47, 0x52, 0x50, 0x43, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) + if err != nil { + return + } + err = en.WriteString(z.GRPCStatusCode) + if err != nil { + err = msgp.WrapError(err, "GRPCStatusCode") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientGroupedStats) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 17 + // string "Service" + o = append(o, 0xde, 0x0, 0x11, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = msgp.AppendString(o, z.Service) + // string "Name" + o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "Resource" + o = append(o, 0xa8, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) + o = msgp.AppendString(o, z.Resource) + // string "HTTPStatusCode" + o = append(o, 0xae, 0x48, 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) + o = msgp.AppendUint32(o, z.HTTPStatusCode) + // string "Type" + o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, z.Type) + // string "DBType" + o = append(o, 0xa6, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, z.DBType) + // string "Hits" + o = append(o, 0xa4, 0x48, 0x69, 0x74, 0x73) + o = msgp.AppendUint64(o, z.Hits) + // string "Errors" + o = append(o, 0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73) + o = msgp.AppendUint64(o, z.Errors) + // string "Duration" + o = append(o, 0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Duration) + // string "OkSummary" + o = append(o, 0xa9, 0x4f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) + o = msgp.AppendBytes(o, z.OkSummary) + // string "ErrorSummary" + o = append(o, 0xac, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) + o = msgp.AppendBytes(o, z.ErrorSummary) + // string "Synthetics" + o = append(o, 0xaa, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x73) + o = msgp.AppendBool(o, z.Synthetics) + // string "TopLevelHits" + o = append(o, 0xac, 0x54, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73) + o = msgp.AppendUint64(o, z.TopLevelHits) + // string "SpanKind" + o = append(o, 0xa8, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64) + o = msgp.AppendString(o, z.SpanKind) + // string "PeerTags" + o = append(o, 0xa8, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PeerTags))) + for za0001 := range z.PeerTags { + o = msgp.AppendString(o, z.PeerTags[za0001]) + } + // string "IsTraceRoot" + o = append(o, 0xab, 0x49, 0x73, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74) + o = msgp.AppendInt32(o, int32(z.IsTraceRoot)) + // string "GRPCStatusCode" + o = append(o, 0xae, 0x47, 0x52, 0x50, 0x43, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) + o = msgp.AppendString(o, z.GRPCStatusCode) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientGroupedStats) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Service": + z.Service, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "Name": + z.Name, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Resource": + z.Resource, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Resource") + return + } + case "HTTPStatusCode": + z.HTTPStatusCode, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "HTTPStatusCode") + return + } + case "Type": + z.Type, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + case "DBType": + z.DBType, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DBType") + return + } + case "Hits": + z.Hits, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hits") + return + } + case "Errors": + z.Errors, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Errors") + return + } + case "Duration": + z.Duration, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "OkSummary": + z.OkSummary, bts, err = msgp.ReadBytesBytes(bts, z.OkSummary) + if err != nil { + err = msgp.WrapError(err, "OkSummary") + return + } + case "ErrorSummary": + z.ErrorSummary, bts, err = msgp.ReadBytesBytes(bts, z.ErrorSummary) + if err != nil { + err = msgp.WrapError(err, "ErrorSummary") + return + } + case "Synthetics": + z.Synthetics, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Synthetics") + return + } + case "TopLevelHits": + z.TopLevelHits, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TopLevelHits") + return + } + case "SpanKind": + z.SpanKind, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SpanKind") + return + } + case "PeerTags": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PeerTags") + return + } + if cap(z.PeerTags) >= int(zb0002) { + z.PeerTags = (z.PeerTags)[:zb0002] + } else { + z.PeerTags = make([]string, zb0002) + } + for za0001 := range z.PeerTags { + z.PeerTags[za0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PeerTags", za0001) + return + } + } + case "IsTraceRoot": + { + var zb0003 int32 + zb0003, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsTraceRoot") + return + } + z.IsTraceRoot = Trilean(zb0003) + } + case "GRPCStatusCode": + z.GRPCStatusCode, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "GRPCStatusCode") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientGroupedStats) Msgsize() (s int) { + s = 3 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size + 9 + msgp.StringPrefixSize + len(z.SpanKind) + 9 + msgp.ArrayHeaderSize + for za0001 := range z.PeerTags { + s += msgp.StringPrefixSize + len(z.PeerTags[za0001]) + } + s += 12 + msgp.Int32Size + 15 + msgp.StringPrefixSize + len(z.GRPCStatusCode) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ClientStatsBucket) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Start": + z.Start, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Start") + return + } + case "Duration": + z.Duration, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "Stats": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientGroupedStats, zb0002) + } + for za0001 := range z.Stats { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientGroupedStats) + } + err = z.Stats[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "AgentTimeShift": + z.AgentTimeShift, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "AgentTimeShift") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ClientStatsBucket) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + _ = zb0001Mask + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "Start" + err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) + if err != nil { + return + } + err = en.WriteUint64(z.Start) + if err != nil { + err = msgp.WrapError(err, "Start") + return + } + // write "Duration" + err = en.Append(0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteUint64(z.Duration) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + if (zb0001Mask & 0x4) == 0 { // if not omitted + // write "Stats" + err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Stats))) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Stats[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // write "AgentTimeShift" + err = en.Append(0xae, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74) + if err != nil { + return + } + err = en.WriteInt64(z.AgentTimeShift) + if err != nil { + err = msgp.WrapError(err, "AgentTimeShift") + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientStatsBucket) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // check for omitted fields + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + _ = zb0001Mask + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "Start" + o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) + o = msgp.AppendUint64(o, z.Start) + // string "Duration" + o = append(o, 0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Duration) + if (zb0001Mask & 0x4) == 0 { // if not omitted + // string "Stats" + o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Stats[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // string "AgentTimeShift" + o = append(o, 0xae, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74) + o = msgp.AppendInt64(o, z.AgentTimeShift) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientStatsBucket) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Start": + z.Start, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Start") + return + } + case "Duration": + z.Duration, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "Stats": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientGroupedStats, zb0002) + } + for za0001 := range z.Stats { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientGroupedStats) + } + bts, err = z.Stats[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "AgentTimeShift": + z.AgentTimeShift, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentTimeShift") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientStatsBucket) Msgsize() (s int) { + s = 1 + 6 + msgp.Uint64Size + 9 + msgp.Uint64Size + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Stats[za0001].Msgsize() + } + } + s += 15 + msgp.Int64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ClientStatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Hostname": + z.Hostname, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + case "Env": + z.Env, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "Version": + z.Version, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Stats": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientStatsBucket, zb0002) + } + for za0001 := range z.Stats { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsBucket) + } + err = z.Stats[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "Lang": + z.Lang, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Lang") + return + } + case "TracerVersion": + z.TracerVersion, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "RuntimeID": + z.RuntimeID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + case "Sequence": + z.Sequence, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Sequence") + return + } + case "AgentAggregation": + z.AgentAggregation, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AgentAggregation") + return + } + case "Service": + z.Service, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "ContainerID": + z.ContainerID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ContainerID") + return + } + case "Tags": + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if cap(z.Tags) >= int(zb0003) { + z.Tags = (z.Tags)[:zb0003] + } else { + z.Tags = make([]string, zb0003) + } + for za0002 := range z.Tags { + z.Tags[za0002], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + } + case "GitCommitSha": + z.GitCommitSha, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "GitCommitSha") + return + } + case "ImageTag": + z.ImageTag, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ImageTag") + return + } + case "ProcessTagsHash": + z.ProcessTagsHash, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "ProcessTagsHash") + return + } + case "ProcessTags": + z.ProcessTags, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ProcessTags") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ClientStatsPayload) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(16) + var zb0001Mask uint16 /* 16 bits */ + _ = zb0001Mask + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + err = en.WriteMapHeader(zb0001Len) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "Hostname" + err = en.Append(0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Hostname) + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + // write "Env" + err = en.Append(0xa3, 0x45, 0x6e, 0x76) + if err != nil { + return + } + err = en.WriteString(z.Env) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + // write "Version" + err = en.Append(0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Version) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + if (zb0001Mask & 0x8) == 0 { // if not omitted + // write "Stats" + err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Stats))) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Stats[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // write "Lang" + err = en.Append(0xa4, 0x4c, 0x61, 0x6e, 0x67) + if err != nil { + return + } + err = en.WriteString(z.Lang) + if err != nil { + err = msgp.WrapError(err, "Lang") + return + } + // write "TracerVersion" + err = en.Append(0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.TracerVersion) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + // write "RuntimeID" + err = en.Append(0xa9, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteString(z.RuntimeID) + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + // write "Sequence" + err = en.Append(0xa8, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteUint64(z.Sequence) + if err != nil { + err = msgp.WrapError(err, "Sequence") + return + } + // write "AgentAggregation" + err = en.Append(0xb0, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.AgentAggregation) + if err != nil { + err = msgp.WrapError(err, "AgentAggregation") + return + } + // write "Service" + err = en.Append(0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Service) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + // write "ContainerID" + err = en.Append(0xab, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteString(z.ContainerID) + if err != nil { + err = msgp.WrapError(err, "ContainerID") + return + } + // write "Tags" + err = en.Append(0xa4, 0x54, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Tags))) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + for za0002 := range z.Tags { + err = en.WriteString(z.Tags[za0002]) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + } + // write "GitCommitSha" + err = en.Append(0xac, 0x47, 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x68, 0x61) + if err != nil { + return + } + err = en.WriteString(z.GitCommitSha) + if err != nil { + err = msgp.WrapError(err, "GitCommitSha") + return + } + // write "ImageTag" + err = en.Append(0xa8, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x54, 0x61, 0x67) + if err != nil { + return + } + err = en.WriteString(z.ImageTag) + if err != nil { + err = msgp.WrapError(err, "ImageTag") + return + } + // write "ProcessTagsHash" + err = en.Append(0xaf, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x61, 0x67, 0x73, 0x48, 0x61, 0x73, 0x68) + if err != nil { + return + } + err = en.WriteUint64(z.ProcessTagsHash) + if err != nil { + err = msgp.WrapError(err, "ProcessTagsHash") + return + } + // write "ProcessTags" + err = en.Append(0xab, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteString(z.ProcessTags) + if err != nil { + err = msgp.WrapError(err, "ProcessTags") + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientStatsPayload) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // check for omitted fields + zb0001Len := uint32(16) + var zb0001Mask uint16 /* 16 bits */ + _ = zb0001Mask + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + o = msgp.AppendMapHeader(o, zb0001Len) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "Hostname" + o = append(o, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Hostname) + // string "Env" + o = append(o, 0xa3, 0x45, 0x6e, 0x76) + o = msgp.AppendString(o, z.Env) + // string "Version" + o = append(o, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.Version) + if (zb0001Mask & 0x8) == 0 { // if not omitted + // string "Stats" + o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Stats[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // string "Lang" + o = append(o, 0xa4, 0x4c, 0x61, 0x6e, 0x67) + o = msgp.AppendString(o, z.Lang) + // string "TracerVersion" + o = append(o, 0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.TracerVersion) + // string "RuntimeID" + o = append(o, 0xa9, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44) + o = msgp.AppendString(o, z.RuntimeID) + // string "Sequence" + o = append(o, 0xa8, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65) + o = msgp.AppendUint64(o, z.Sequence) + // string "AgentAggregation" + o = append(o, 0xb0, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AgentAggregation) + // string "Service" + o = append(o, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = msgp.AppendString(o, z.Service) + // string "ContainerID" + o = append(o, 0xab, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44) + o = msgp.AppendString(o, z.ContainerID) + // string "Tags" + o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Tags))) + for za0002 := range z.Tags { + o = msgp.AppendString(o, z.Tags[za0002]) + } + // string "GitCommitSha" + o = append(o, 0xac, 0x47, 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x68, 0x61) + o = msgp.AppendString(o, z.GitCommitSha) + // string "ImageTag" + o = append(o, 0xa8, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x54, 0x61, 0x67) + o = msgp.AppendString(o, z.ImageTag) + // string "ProcessTagsHash" + o = append(o, 0xaf, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x61, 0x67, 0x73, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendUint64(o, z.ProcessTagsHash) + // string "ProcessTags" + o = append(o, 0xab, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendString(o, z.ProcessTags) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientStatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Hostname": + z.Hostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + case "Env": + z.Env, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "Version": + z.Version, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Stats": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientStatsBucket, zb0002) + } + for za0001 := range z.Stats { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsBucket) + } + bts, err = z.Stats[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "Lang": + z.Lang, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Lang") + return + } + case "TracerVersion": + z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "RuntimeID": + z.RuntimeID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + case "Sequence": + z.Sequence, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Sequence") + return + } + case "AgentAggregation": + z.AgentAggregation, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentAggregation") + return + } + case "Service": + z.Service, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "ContainerID": + z.ContainerID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ContainerID") + return + } + case "Tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if cap(z.Tags) >= int(zb0003) { + z.Tags = (z.Tags)[:zb0003] + } else { + z.Tags = make([]string, zb0003) + } + for za0002 := range z.Tags { + z.Tags[za0002], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + } + case "GitCommitSha": + z.GitCommitSha, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "GitCommitSha") + return + } + case "ImageTag": + z.ImageTag, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ImageTag") + return + } + case "ProcessTagsHash": + z.ProcessTagsHash, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ProcessTagsHash") + return + } + case "ProcessTags": + z.ProcessTags, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ProcessTags") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientStatsPayload) Msgsize() (s int) { + s = 3 + 9 + msgp.StringPrefixSize + len(z.Hostname) + 4 + msgp.StringPrefixSize + len(z.Env) + 8 + msgp.StringPrefixSize + len(z.Version) + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Stats[za0001].Msgsize() + } + } + s += 5 + msgp.StringPrefixSize + len(z.Lang) + 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 10 + msgp.StringPrefixSize + len(z.RuntimeID) + 9 + msgp.Uint64Size + 17 + msgp.StringPrefixSize + len(z.AgentAggregation) + 8 + msgp.StringPrefixSize + len(z.Service) + 12 + msgp.StringPrefixSize + len(z.ContainerID) + 5 + msgp.ArrayHeaderSize + for za0002 := range z.Tags { + s += msgp.StringPrefixSize + len(z.Tags[za0002]) + } + s += 13 + msgp.StringPrefixSize + len(z.GitCommitSha) + 9 + msgp.StringPrefixSize + len(z.ImageTag) + 16 + msgp.Uint64Size + 12 + msgp.StringPrefixSize + len(z.ProcessTags) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *StatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "AgentHostname": + z.AgentHostname, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AgentHostname") + return + } + case "AgentEnv": + z.AgentEnv, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AgentEnv") + return + } + case "Stats": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientStatsPayload, zb0002) + } + for za0001 := range z.Stats { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsPayload) + } + err = z.Stats[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "AgentVersion": + z.AgentVersion, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + case "ClientComputed": + z.ClientComputed, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "ClientComputed") + return + } + case "SplitPayload": + z.SplitPayload, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SplitPayload") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *StatsPayload) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(6) + var zb0001Mask uint8 /* 6 bits */ + _ = zb0001Mask + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "AgentHostname" + err = en.Append(0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.AgentHostname) + if err != nil { + err = msgp.WrapError(err, "AgentHostname") + return + } + // write "AgentEnv" + err = en.Append(0xa8, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76) + if err != nil { + return + } + err = en.WriteString(z.AgentEnv) + if err != nil { + err = msgp.WrapError(err, "AgentEnv") + return + } + if (zb0001Mask & 0x4) == 0 { // if not omitted + // write "Stats" + err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Stats))) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Stats[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // write "AgentVersion" + err = en.Append(0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.AgentVersion) + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + // write "ClientComputed" + err = en.Append(0xae, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64) + if err != nil { + return + } + err = en.WriteBool(z.ClientComputed) + if err != nil { + err = msgp.WrapError(err, "ClientComputed") + return + } + // write "SplitPayload" + err = en.Append(0xac, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64) + if err != nil { + return + } + err = en.WriteBool(z.SplitPayload) + if err != nil { + err = msgp.WrapError(err, "SplitPayload") + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *StatsPayload) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // check for omitted fields + zb0001Len := uint32(6) + var zb0001Mask uint8 /* 6 bits */ + _ = zb0001Mask + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "AgentHostname" + o = append(o, 0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.AgentHostname) + // string "AgentEnv" + o = append(o, 0xa8, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76) + o = msgp.AppendString(o, z.AgentEnv) + if (zb0001Mask & 0x4) == 0 { // if not omitted + // string "Stats" + o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Stats[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // string "AgentVersion" + o = append(o, 0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AgentVersion) + // string "ClientComputed" + o = append(o, 0xae, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64) + o = msgp.AppendBool(o, z.ClientComputed) + // string "SplitPayload" + o = append(o, 0xac, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64) + o = msgp.AppendBool(o, z.SplitPayload) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *StatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "AgentHostname": + z.AgentHostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentHostname") + return + } + case "AgentEnv": + z.AgentEnv, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentEnv") + return + } + case "Stats": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientStatsPayload, zb0002) + } + for za0001 := range z.Stats { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsPayload) + } + bts, err = z.Stats[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "AgentVersion": + z.AgentVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + case "ClientComputed": + z.ClientComputed, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ClientComputed") + return + } + case "SplitPayload": + z.SplitPayload, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SplitPayload") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *StatsPayload) Msgsize() (s int) { + s = 1 + 14 + msgp.StringPrefixSize + len(z.AgentHostname) + 9 + msgp.StringPrefixSize + len(z.AgentEnv) + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Stats[za0001].Msgsize() + } + } + s += 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 15 + msgp.BoolSize + 13 + msgp.BoolSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *TraceRootFlag) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 int32 + zb0001, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = TraceRootFlag(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z TraceRootFlag) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteInt32(int32(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z TraceRootFlag) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendInt32(o, int32(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TraceRootFlag) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 int32 + zb0001, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = TraceRootFlag(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z TraceRootFlag) Msgsize() (s int) { + s = msgp.Int32Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *Trilean) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 int32 + zb0001, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = Trilean(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z Trilean) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteInt32(int32(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z Trilean) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendInt32(o, int32(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Trilean) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 int32 + zb0001, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = Trilean(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z Trilean) Msgsize() (s int) { + s = msgp.Int32Size + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go new file mode 100644 index 00000000..0e6fa088 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go @@ -0,0 +1,2086 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10 +// source: datadog/trace/stats.proto + +package trace + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StatsPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StatsPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SplitPayload { + i-- + if m.SplitPayload { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.ClientComputed { + i-- + if m.ClientComputed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.AgentVersion) > 0 { + i -= len(m.AgentVersion) + copy(dAtA[i:], m.AgentVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AgentVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.AgentEnv) > 0 { + i -= len(m.AgentEnv) + copy(dAtA[i:], m.AgentEnv) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AgentEnv))) + i-- + dAtA[i] = 0x12 + } + if len(m.AgentHostname) > 0 { + i -= len(m.AgentHostname) + copy(dAtA[i:], m.AgentHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AgentHostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStatsPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatsPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ClientStatsPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ProcessTags) > 0 { + i -= len(m.ProcessTags) + copy(dAtA[i:], m.ProcessTags) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProcessTags))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.ProcessTagsHash != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ProcessTagsHash)) + i-- + dAtA[i] = 0x78 + } + if len(m.ImageTag) > 0 { + i -= len(m.ImageTag) + copy(dAtA[i:], m.ImageTag) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ImageTag))) + i-- + dAtA[i] = 0x72 + } + if len(m.GitCommitSha) > 0 { + i -= len(m.GitCommitSha) + copy(dAtA[i:], m.GitCommitSha) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.GitCommitSha))) + i-- + dAtA[i] = 0x6a + } + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tags[iNdEx]) + copy(dAtA[i:], m.Tags[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tags[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x5a + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0x52 + } + if len(m.AgentAggregation) > 0 { + i -= len(m.AgentAggregation) + copy(dAtA[i:], m.AgentAggregation) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AgentAggregation))) + i-- + dAtA[i] = 0x4a + } + if m.Sequence != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x40 + } + if len(m.RuntimeID) > 0 { + i -= len(m.RuntimeID) + copy(dAtA[i:], m.RuntimeID) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeID))) + i-- + dAtA[i] = 0x3a + } + if len(m.TracerVersion) > 0 { + i -= len(m.TracerVersion) + copy(dAtA[i:], m.TracerVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TracerVersion))) + i-- + dAtA[i] = 0x32 + } + if len(m.Lang) > 0 { + i -= len(m.Lang) + copy(dAtA[i:], m.Lang) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Lang))) + i-- + dAtA[i] = 0x2a + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + } + if len(m.Env) > 0 { + i -= len(m.Env) + copy(dAtA[i:], m.Env) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Env))) + i-- + dAtA[i] = 0x12 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStatsBucket) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatsBucket) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ClientStatsBucket) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AgentTimeShift != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AgentTimeShift)) + i-- + dAtA[i] = 0x20 + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Duration != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClientGroupedStats) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientGroupedStats) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ClientGroupedStats) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.GRPCStatusCode) > 0 { + i -= len(m.GRPCStatusCode) + copy(dAtA[i:], m.GRPCStatusCode) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.GRPCStatusCode))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.IsTraceRoot != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IsTraceRoot)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if len(m.PeerTags) > 0 { + for iNdEx := len(m.PeerTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PeerTags[iNdEx]) + copy(dAtA[i:], m.PeerTags[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PeerTags[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if len(m.SpanKind) > 0 { + i -= len(m.SpanKind) + copy(dAtA[i:], m.SpanKind) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SpanKind))) + i-- + dAtA[i] = 0x7a + } + if m.TopLevelHits != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TopLevelHits)) + i-- + dAtA[i] = 0x68 + } + if m.Synthetics { + i-- + if m.Synthetics { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if len(m.ErrorSummary) > 0 { + i -= len(m.ErrorSummary) + copy(dAtA[i:], m.ErrorSummary) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ErrorSummary))) + i-- + dAtA[i] = 0x5a + } + if len(m.OkSummary) > 0 { + i -= len(m.OkSummary) + copy(dAtA[i:], m.OkSummary) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OkSummary))) + i-- + dAtA[i] = 0x52 + } + if m.Duration != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x48 + } + if m.Errors != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Errors)) + i-- + dAtA[i] = 0x40 + } + if m.Hits != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Hits)) + i-- + dAtA[i] = 0x38 + } + if len(m.DBType) > 0 { + i -= len(m.DBType) + copy(dAtA[i:], m.DBType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DBType))) + i-- + dAtA[i] = 0x32 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + } + if m.HTTPStatusCode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HTTPStatusCode)) + i-- + dAtA[i] = 0x20 + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatsPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AgentHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AgentEnv) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.AgentVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientComputed { + n += 2 + } + if m.SplitPayload { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClientStatsPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Env) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.Lang) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TracerVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeID) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Sequence)) + } + l = len(m.AgentAggregation) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Service) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Tags) > 0 { + for _, s := range m.Tags { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.GitCommitSha) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ImageTag) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProcessTagsHash != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ProcessTagsHash)) + } + l = len(m.ProcessTags) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientStatsBucket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + } + if m.Duration != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Duration)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AgentTimeShift != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AgentTimeShift)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientGroupedStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HTTPStatusCode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HTTPStatusCode)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DBType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Hits != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Hits)) + } + if m.Errors != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Errors)) + } + if m.Duration != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Duration)) + } + l = len(m.OkSummary) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ErrorSummary) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Synthetics { + n += 2 + } + if m.TopLevelHits != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TopLevelHits)) + } + l = len(m.SpanKind) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.PeerTags) > 0 { + for _, s := range m.PeerTags { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IsTraceRoot != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.IsTraceRoot)) + } + l = len(m.GRPCStatusCode) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StatsPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatsPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentHostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentHostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentEnv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentEnv = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ClientStatsPayload{}) + if err := m.Stats[len(m.Stats)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientComputed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClientComputed = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SplitPayload", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SplitPayload = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStatsPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStatsPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ClientStatsBucket{}) + if err := m.Stats[len(m.Stats)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lang", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Lang = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TracerVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentAggregation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentAggregation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitCommitSha", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GitCommitSha = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageTag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageTag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessTagsHash", wireType) + } + m.ProcessTagsHash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProcessTagsHash |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessTags = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStatsBucket) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStatsBucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStatsBucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ClientGroupedStats{}) + if err := m.Stats[len(m.Stats)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentTimeShift", wireType) + } + m.AgentTimeShift = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AgentTimeShift |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientGroupedStats) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientGroupedStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientGroupedStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPStatusCode", wireType) + } + m.HTTPStatusCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HTTPStatusCode |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DBType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DBType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hits", wireType) + } + m.Hits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hits |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + m.Errors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Errors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OkSummary", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OkSummary = append(m.OkSummary[:0], dAtA[iNdEx:postIndex]...) + if m.OkSummary == nil { + m.OkSummary = []byte{} + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorSummary", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorSummary = append(m.ErrorSummary[:0], dAtA[iNdEx:postIndex]...) + if m.ErrorSummary == nil { + m.ErrorSummary = []byte{} + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Synthetics", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Synthetics = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TopLevelHits", wireType) + } + m.TopLevelHits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TopLevelHits |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpanKind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerTags = append(m.PeerTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsTraceRoot", wireType) + } + m.IsTraceRoot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IsTraceRoot |= Trilean(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GRPCStatusCode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GRPCStatusCode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace.go new file mode 100644 index 00000000..94fd0eda --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace.go @@ -0,0 +1,52 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +//go:generate go run github.com/tinylib/msgp -file=span.pb.go -o span_gen.go -io=false +//go:generate go run github.com/tinylib/msgp -file=tracer_payload.pb.go -o tracer_payload_gen.go -io=false +//go:generate go run github.com/tinylib/msgp -io=false + +// Trace is a collection of spans with the same trace ID +type Trace []*Span + +// Traces is a list of traces. This model matters as this is what we unpack from msgp. +type Traces []Trace + +// RemoveChunk removes a chunk by its index. +func (p *TracerPayload) RemoveChunk(i int) { + if i < 0 || i >= len(p.Chunks) { + return + } + p.Chunks[i] = p.Chunks[len(p.Chunks)-1] + p.Chunks = p.Chunks[:len(p.Chunks)-1] +} + +// Cut cuts off a new tracer payload from the `p` with [0, i-1] chunks +// and keeps [i, n-1] chunks in the original payload `p`. +func (p *TracerPayload) Cut(i int) *TracerPayload { + if i < 0 { + i = 0 + } + if i > len(p.Chunks) { + i = len(p.Chunks) + } + newPayload := TracerPayload{ + ContainerID: p.GetContainerID(), + LanguageName: p.GetLanguageName(), + LanguageVersion: p.GetLanguageVersion(), + TracerVersion: p.GetTracerVersion(), + RuntimeID: p.GetRuntimeID(), + Env: p.GetEnv(), + Hostname: p.GetHostname(), + AppVersion: p.GetAppVersion(), + Tags: p.GetTags(), + } + + newPayload.Chunks = p.Chunks[:i] + p.Chunks = p.Chunks[i:] + + return &newPayload +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace_gen.go new file mode 100644 index 00000000..2a2865f3 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace_gen.go @@ -0,0 +1,158 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z Trace) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, uint32(len(z))) + for za0001 := range z { + if z[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, za0001) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Trace) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0002) { + (*z) = (*z)[:zb0002] + } else { + (*z) = make(Trace, zb0002) + } + for zb0001 := range *z { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + (*z)[zb0001] = nil + } else { + if (*z)[zb0001] == nil { + (*z)[zb0001] = new(Span) + } + bts, err = (*z)[zb0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z Trace) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + for zb0003 := range z { + if z[zb0003] == nil { + s += msgp.NilSize + } else { + s += z[zb0003].Msgsize() + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z Traces) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, uint32(len(z))) + for za0001 := range z { + o = msgp.AppendArrayHeader(o, uint32(len(z[za0001]))) + for za0002 := range z[za0001] { + if z[za0001][za0002] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z[za0001][za0002].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, za0001, za0002) + return + } + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Traces) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0003) { + (*z) = (*z)[:zb0003] + } else { + (*z) = make(Traces, zb0003) + } + for zb0001 := range *z { + var zb0004 uint32 + zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + if cap((*z)[zb0001]) >= int(zb0004) { + (*z)[zb0001] = ((*z)[zb0001])[:zb0004] + } else { + (*z)[zb0001] = make(Trace, zb0004) + } + for zb0002 := range (*z)[zb0001] { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + (*z)[zb0001][zb0002] = nil + } else { + if (*z)[zb0001][zb0002] == nil { + (*z)[zb0001][zb0002] = new(Span) + } + bts, err = (*z)[zb0001][zb0002].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, zb0001, zb0002) + return + } + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z Traces) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + for zb0005 := range z { + s += msgp.ArrayHeaderSize + for zb0006 := range z[zb0005] { + if z[zb0005][zb0006] == nil { + s += msgp.NilSize + } else { + s += z[zb0005][zb0006].Msgsize() + } + } + } + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload.pb.go new file mode 100644 index 00000000..5ed5860f --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v5.29.3 +// source: datadog/trace/tracer_payload.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. +type TraceChunk struct { + state protoimpl.MessageState `protogen:"open.v1"` + // priority specifies sampling priority of the trace. + // @gotags: json:"priority" msg:"priority" + Priority int32 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority" msg:"priority"` + // origin specifies origin product ("lambda", "rum", etc.) of the trace. + // @gotags: json:"origin" msg:"origin" + Origin string `protobuf:"bytes,2,opt,name=origin,proto3" json:"origin" msg:"origin"` + // spans specifies list of containing spans. + // @gotags: json:"spans" msg:"spans" + Spans []*Span `protobuf:"bytes,3,rep,name=spans,proto3" json:"spans" msg:"spans"` + // tags specifies tags common in all `spans`. + // @gotags: json:"tags" msg:"tags" + Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"tags"` + // droppedTrace specifies whether the trace was dropped by samplers or not. + // @gotags: json:"dropped_trace" msg:"dropped_trace" + DroppedTrace bool `protobuf:"varint,5,opt,name=droppedTrace,proto3" json:"dropped_trace" msg:"dropped_trace"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TraceChunk) Reset() { + *x = TraceChunk{} + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TraceChunk) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceChunk) ProtoMessage() {} + +func (x *TraceChunk) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceChunk.ProtoReflect.Descriptor instead. +func (*TraceChunk) Descriptor() ([]byte, []int) { + return file_datadog_trace_tracer_payload_proto_rawDescGZIP(), []int{0} +} + +func (x *TraceChunk) GetPriority() int32 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *TraceChunk) GetOrigin() string { + if x != nil { + return x.Origin + } + return "" +} + +func (x *TraceChunk) GetSpans() []*Span { + if x != nil { + return x.Spans + } + return nil +} + +func (x *TraceChunk) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *TraceChunk) GetDroppedTrace() bool { + if x != nil { + return x.DroppedTrace + } + return false +} + +// TracerPayload represents a payload the trace agent receives from tracers. +type TracerPayload struct { + state protoimpl.MessageState `protogen:"open.v1"` + // containerID specifies the ID of the container where the tracer is running on. + // @gotags: json:"container_id" msg:"container_id" + ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"container_id" msg:"container_id"` + // languageName specifies language of the tracer. + // @gotags: json:"language_name" msg:"language_name" + LanguageName string `protobuf:"bytes,2,opt,name=languageName,proto3" json:"language_name" msg:"language_name"` + // languageVersion specifies language version of the tracer. + // @gotags: json:"language_version" msg:"language_version" + LanguageVersion string `protobuf:"bytes,3,opt,name=languageVersion,proto3" json:"language_version" msg:"language_version"` + // tracerVersion specifies version of the tracer. + // @gotags: json:"tracer_version" msg:"tracer_version" + TracerVersion string `protobuf:"bytes,4,opt,name=tracerVersion,proto3" json:"tracer_version" msg:"tracer_version"` + // runtimeID specifies V4 UUID representation of a tracer session. + // @gotags: json:"runtime_id" msg:"runtime_id" + RuntimeID string `protobuf:"bytes,5,opt,name=runtimeID,proto3" json:"runtime_id" msg:"runtime_id"` + // chunks specifies list of containing trace chunks. + // @gotags: json:"chunks" msg:"chunks" + Chunks []*TraceChunk `protobuf:"bytes,6,rep,name=chunks,proto3" json:"chunks" msg:"chunks"` + // tags specifies tags common in all `chunks`. + // @gotags: json:"tags" msg:"tags" + Tags map[string]string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"tags"` + // env specifies `env` tag that set with the tracer. + // @gotags: json:"env" msg:"env" + Env string `protobuf:"bytes,8,opt,name=env,proto3" json:"env" msg:"env"` + // hostname specifies hostname of where the tracer is running. + // @gotags: json:"hostname" msg:"hostname" + Hostname string `protobuf:"bytes,9,opt,name=hostname,proto3" json:"hostname" msg:"hostname"` + // version specifies `version` tag that set with the tracer. + // @gotags: json:"app_version" msg:"app_version" + AppVersion string `protobuf:"bytes,10,opt,name=appVersion,proto3" json:"app_version" msg:"app_version"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TracerPayload) Reset() { + *x = TracerPayload{} + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TracerPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TracerPayload) ProtoMessage() {} + +func (x *TracerPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TracerPayload.ProtoReflect.Descriptor instead. +func (*TracerPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_tracer_payload_proto_rawDescGZIP(), []int{1} +} + +func (x *TracerPayload) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TracerPayload) GetLanguageName() string { + if x != nil { + return x.LanguageName + } + return "" +} + +func (x *TracerPayload) GetLanguageVersion() string { + if x != nil { + return x.LanguageVersion + } + return "" +} + +func (x *TracerPayload) GetTracerVersion() string { + if x != nil { + return x.TracerVersion + } + return "" +} + +func (x *TracerPayload) GetRuntimeID() string { + if x != nil { + return x.RuntimeID + } + return "" +} + +func (x *TracerPayload) GetChunks() []*TraceChunk { + if x != nil { + return x.Chunks + } + return nil +} + +func (x *TracerPayload) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *TracerPayload) GetEnv() string { + if x != nil { + return x.Env + } + return "" +} + +func (x *TracerPayload) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *TracerPayload) GetAppVersion() string { + if x != nil { + return x.AppVersion + } + return "" +} + +var File_datadog_trace_tracer_payload_proto protoreflect.FileDescriptor + +const file_datadog_trace_tracer_payload_proto_rawDesc = "" + + "\n" + + "\"datadog/trace/tracer_payload.proto\x12\rdatadog.trace\x1a\x18datadog/trace/span.proto\"\x81\x02\n" + + "\n" + + "TraceChunk\x12\x1a\n" + + "\bpriority\x18\x01 \x01(\x05R\bpriority\x12\x16\n" + + "\x06origin\x18\x02 \x01(\tR\x06origin\x12)\n" + + "\x05spans\x18\x03 \x03(\v2\x13.datadog.trace.SpanR\x05spans\x127\n" + + "\x04tags\x18\x04 \x03(\v2#.datadog.trace.TraceChunk.TagsEntryR\x04tags\x12\"\n" + + "\fdroppedTrace\x18\x05 \x01(\bR\fdroppedTrace\x1a7\n" + + "\tTagsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb9\x03\n" + + "\rTracerPayload\x12 \n" + + "\vcontainerID\x18\x01 \x01(\tR\vcontainerID\x12\"\n" + + "\flanguageName\x18\x02 \x01(\tR\flanguageName\x12(\n" + + "\x0flanguageVersion\x18\x03 \x01(\tR\x0flanguageVersion\x12$\n" + + "\rtracerVersion\x18\x04 \x01(\tR\rtracerVersion\x12\x1c\n" + + "\truntimeID\x18\x05 \x01(\tR\truntimeID\x121\n" + + "\x06chunks\x18\x06 \x03(\v2\x19.datadog.trace.TraceChunkR\x06chunks\x12:\n" + + "\x04tags\x18\a \x03(\v2&.datadog.trace.TracerPayload.TagsEntryR\x04tags\x12\x10\n" + + "\x03env\x18\b \x01(\tR\x03env\x12\x1a\n" + + "\bhostname\x18\t \x01(\tR\bhostname\x12\x1e\n" + + "\n" + + "appVersion\x18\n" + + " \x01(\tR\n" + + "appVersion\x1a7\n" + + "\tTagsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B\x16Z\x14pkg/proto/pbgo/traceb\x06proto3" + +var ( + file_datadog_trace_tracer_payload_proto_rawDescOnce sync.Once + file_datadog_trace_tracer_payload_proto_rawDescData []byte +) + +func file_datadog_trace_tracer_payload_proto_rawDescGZIP() []byte { + file_datadog_trace_tracer_payload_proto_rawDescOnce.Do(func() { + file_datadog_trace_tracer_payload_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_datadog_trace_tracer_payload_proto_rawDesc), len(file_datadog_trace_tracer_payload_proto_rawDesc))) + }) + return file_datadog_trace_tracer_payload_proto_rawDescData +} + +var file_datadog_trace_tracer_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_datadog_trace_tracer_payload_proto_goTypes = []any{ + (*TraceChunk)(nil), // 0: datadog.trace.TraceChunk + (*TracerPayload)(nil), // 1: datadog.trace.TracerPayload + nil, // 2: datadog.trace.TraceChunk.TagsEntry + nil, // 3: datadog.trace.TracerPayload.TagsEntry + (*Span)(nil), // 4: datadog.trace.Span +} +var file_datadog_trace_tracer_payload_proto_depIdxs = []int32{ + 4, // 0: datadog.trace.TraceChunk.spans:type_name -> datadog.trace.Span + 2, // 1: datadog.trace.TraceChunk.tags:type_name -> datadog.trace.TraceChunk.TagsEntry + 0, // 2: datadog.trace.TracerPayload.chunks:type_name -> datadog.trace.TraceChunk + 3, // 3: datadog.trace.TracerPayload.tags:type_name -> datadog.trace.TracerPayload.TagsEntry + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_datadog_trace_tracer_payload_proto_init() } +func file_datadog_trace_tracer_payload_proto_init() { + if File_datadog_trace_tracer_payload_proto != nil { + return + } + file_datadog_trace_span_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_datadog_trace_tracer_payload_proto_rawDesc), len(file_datadog_trace_tracer_payload_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_tracer_payload_proto_goTypes, + DependencyIndexes: file_datadog_trace_tracer_payload_proto_depIdxs, + MessageInfos: file_datadog_trace_tracer_payload_proto_msgTypes, + }.Build() + File_datadog_trace_tracer_payload_proto = out.File + file_datadog_trace_tracer_payload_proto_goTypes = nil + file_datadog_trace_tracer_payload_proto_depIdxs = nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_gen.go new file mode 100644 index 00000000..cd2b3925 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_gen.go @@ -0,0 +1,384 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *TraceChunk) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "priority" + o = append(o, 0x85, 0xa8, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79) + o = msgp.AppendInt32(o, z.Priority) + // string "origin" + o = append(o, 0xa6, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e) + o = msgp.AppendString(o, z.Origin) + // string "spans" + o = append(o, 0xa5, 0x73, 0x70, 0x61, 0x6e, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Spans))) + for za0001 := range z.Spans { + if z.Spans[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Spans[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Spans", za0001) + return + } + } + } + // string "tags" + o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) + for za0002, za0003 := range z.Tags { + o = msgp.AppendString(o, za0002) + o = msgp.AppendString(o, za0003) + } + // string "dropped_trace" + o = append(o, 0xad, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65) + o = msgp.AppendBool(o, z.DroppedTrace) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TraceChunk) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "priority": + z.Priority, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Priority") + return + } + case "origin": + z.Origin, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Origin") + return + } + case "spans": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Spans") + return + } + if cap(z.Spans) >= int(zb0002) { + z.Spans = (z.Spans)[:zb0002] + } else { + z.Spans = make([]*Span, zb0002) + } + for za0001 := range z.Spans { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Spans[za0001] = nil + } else { + if z.Spans[za0001] == nil { + z.Spans[za0001] = new(Span) + } + bts, err = z.Spans[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Spans", za0001) + return + } + } + } + case "tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0003) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0003 > 0 { + var za0002 string + var za0003 string + zb0003-- + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + z.Tags[za0002] = za0003 + } + case "dropped_trace": + z.DroppedTrace, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DroppedTrace") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TraceChunk) Msgsize() (s int) { + s = 1 + 9 + msgp.Int32Size + 7 + msgp.StringPrefixSize + len(z.Origin) + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Spans { + if z.Spans[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Spans[za0001].Msgsize() + } + } + s += 5 + msgp.MapHeaderSize + if z.Tags != nil { + for za0002, za0003 := range z.Tags { + _ = za0003 + s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) + } + } + s += 14 + msgp.BoolSize + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TracerPayload) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 10 + // string "container_id" + o = append(o, 0x8a, 0xac, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64) + o = msgp.AppendString(o, z.ContainerID) + // string "language_name" + o = append(o, 0xad, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.LanguageName) + // string "language_version" + o = append(o, 0xb0, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.LanguageVersion) + // string "tracer_version" + o = append(o, 0xae, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.TracerVersion) + // string "runtime_id" + o = append(o, 0xaa, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x64) + o = msgp.AppendString(o, z.RuntimeID) + // string "chunks" + o = append(o, 0xa6, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Chunks))) + for za0001 := range z.Chunks { + if z.Chunks[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Chunks[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Chunks", za0001) + return + } + } + } + // string "tags" + o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) + for za0002, za0003 := range z.Tags { + o = msgp.AppendString(o, za0002) + o = msgp.AppendString(o, za0003) + } + // string "env" + o = append(o, 0xa3, 0x65, 0x6e, 0x76) + o = msgp.AppendString(o, z.Env) + // string "hostname" + o = append(o, 0xa8, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Hostname) + // string "app_version" + o = append(o, 0xab, 0x61, 0x70, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AppVersion) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TracerPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "container_id": + z.ContainerID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ContainerID") + return + } + case "language_name": + z.LanguageName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LanguageName") + return + } + case "language_version": + z.LanguageVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LanguageVersion") + return + } + case "tracer_version": + z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "runtime_id": + z.RuntimeID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + case "chunks": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Chunks") + return + } + if cap(z.Chunks) >= int(zb0002) { + z.Chunks = (z.Chunks)[:zb0002] + } else { + z.Chunks = make([]*TraceChunk, zb0002) + } + for za0001 := range z.Chunks { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Chunks[za0001] = nil + } else { + if z.Chunks[za0001] == nil { + z.Chunks[za0001] = new(TraceChunk) + } + bts, err = z.Chunks[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Chunks", za0001) + return + } + } + } + case "tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0003) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0003 > 0 { + var za0002 string + var za0003 string + zb0003-- + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + z.Tags[za0002] = za0003 + } + case "env": + z.Env, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "hostname": + z.Hostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + case "app_version": + z.AppVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AppVersion") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TracerPayload) Msgsize() (s int) { + s = 1 + 13 + msgp.StringPrefixSize + len(z.ContainerID) + 14 + msgp.StringPrefixSize + len(z.LanguageName) + 17 + msgp.StringPrefixSize + len(z.LanguageVersion) + 15 + msgp.StringPrefixSize + len(z.TracerVersion) + 11 + msgp.StringPrefixSize + len(z.RuntimeID) + 7 + msgp.ArrayHeaderSize + for za0001 := range z.Chunks { + if z.Chunks[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Chunks[za0001].Msgsize() + } + } + s += 5 + msgp.MapHeaderSize + if z.Tags != nil { + for za0002, za0003 := range z.Tags { + _ = za0003 + s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) + } + } + s += 4 + msgp.StringPrefixSize + len(z.Env) + 9 + msgp.StringPrefixSize + len(z.Hostname) + 12 + msgp.StringPrefixSize + len(z.AppVersion) + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_utils.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_utils.go new file mode 100644 index 00000000..9f7fabba --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_utils.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +// traceChunkCopiedFields records the fields that are copied in ShallowCopy. +// This should match exactly the fields set in (*TraceChunk).ShallowCopy. +// This is used by tests to enforce the correctness of ShallowCopy. +var traceChunkCopiedFields = map[string]struct{}{ + "Priority": {}, + "Origin": {}, + "Spans": {}, + "Tags": {}, + "DroppedTrace": {}, +} + +// ShallowCopy returns a shallow copy of the copy-able portion of a TraceChunk. These are the +// public fields which will have a Get* method for them. The completeness of this +// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier, +// which incurs heavy reflection cost for every copy at runtime, we use reflection once at +// startup to ensure our method is complete. +func (t *TraceChunk) ShallowCopy() *TraceChunk { + if t == nil { + return nil + } + return &TraceChunk{ + Priority: t.Priority, + Origin: t.Origin, + Spans: t.Spans, + Tags: t.Tags, + DroppedTrace: t.DroppedTrace, + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go new file mode 100644 index 00000000..b63a2fd3 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go @@ -0,0 +1,1067 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10 +// source: datadog/trace/tracer_payload.proto + +package trace + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TraceChunk) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceChunk) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TraceChunk) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DroppedTrace { + i-- + if m.DroppedTrace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Spans) > 0 { + for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Spans[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Origin) > 0 { + i -= len(m.Origin) + copy(dAtA[i:], m.Origin) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Origin))) + i-- + dAtA[i] = 0x12 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TracerPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TracerPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TracerPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AppVersion) > 0 { + i -= len(m.AppVersion) + copy(dAtA[i:], m.AppVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AppVersion))) + i-- + dAtA[i] = 0x52 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x4a + } + if len(m.Env) > 0 { + i -= len(m.Env) + copy(dAtA[i:], m.Env) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Env))) + i-- + dAtA[i] = 0x42 + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Chunks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RuntimeID) > 0 { + i -= len(m.RuntimeID) + copy(dAtA[i:], m.RuntimeID) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeID))) + i-- + dAtA[i] = 0x2a + } + if len(m.TracerVersion) > 0 { + i -= len(m.TracerVersion) + copy(dAtA[i:], m.TracerVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TracerVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.LanguageVersion) > 0 { + i -= len(m.LanguageVersion) + copy(dAtA[i:], m.LanguageVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LanguageVersion))) + i-- + dAtA[i] = 0x1a + } + if len(m.LanguageName) > 0 { + i -= len(m.LanguageName) + copy(dAtA[i:], m.LanguageName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LanguageName))) + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TraceChunk) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + l = len(m.Origin) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Spans) > 0 { + for _, e := range m.Spans { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.DroppedTrace { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *TracerPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LanguageName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LanguageVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TracerVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeID) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.Env) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AppVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TraceChunk) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TraceChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TraceChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Origin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Spans = append(m.Spans, &Span{}) + if err := m.Spans[len(m.Spans)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedTrace", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DroppedTrace = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TracerPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TracerPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TracerPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LanguageName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LanguageName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LanguageVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LanguageVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TracerVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, &TraceChunk{}) + if err := m.Chunks[len(m.Chunks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go index f6e29072..2f2aeb3b 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go @@ -6,34 +6,51 @@ package state var validProducts = map[string]struct{}{ - ProductUpdaterCatalogDD: {}, - ProductUpdaterAgent: {}, - ProductUpdaterTask: {}, - ProductAgentConfig: {}, - ProductAgentFailover: {}, - ProductAgentTask: {}, - ProductAgentIntegrations: {}, - ProductAPMSampling: {}, - ProductCWSDD: {}, - ProductCWSCustom: {}, - ProductCWSProfiles: {}, - ProductASM: {}, - ProductASMFeatures: {}, - ProductASMDD: {}, - ProductASMData: {}, - ProductAPMTracing: {}, - ProductLiveDebugging: {}, - ProductTesting1: {}, - ProductTesting2: {}, + ProductInstallerConfig: {}, + ProductUpdaterCatalogDD: {}, + ProductUpdaterAgent: {}, + ProductUpdaterTask: {}, + ProductActionPlatformRunnerKeys: {}, + ProductAgentConfig: {}, + ProductAgentFailover: {}, + ProductAgentTask: {}, + ProductAgentIntegrations: {}, + ProductAPMSampling: {}, + ProductCWSDD: {}, + ProductCWSCustom: {}, + ProductCWSProfiles: {}, + ProductCSMSideScanning: {}, + ProductASM: {}, + ProductASMFeatures: {}, + ProductASMDD: {}, + ProductASMData: {}, + ProductAPMTracing: {}, + ProductSDSRules: {}, + ProductSDSAgentConfig: {}, + ProductLiveDebugging: {}, + ProductContainerAutoscalingSettings: {}, + ProductContainerAutoscalingValues: {}, + ProductTesting1: {}, + ProductTesting2: {}, + ProductOrchestratorK8sCRDs: {}, + ProductHaAgent: {}, + ProductNDMDeviceProfilesCustom: {}, + ProductMetricControl: {}, + ProductDataStreamsLiveMessages: {}, + ProductLiveDebuggingSymbolDB: {}, } const ( + // ProductInstallerConfig is the product used to receive the installer configuration + ProductInstallerConfig = "INSTALLER_CONFIG" // ProductUpdaterCatalogDD is the product used to receive the package catalog from datadog ProductUpdaterCatalogDD = "UPDATER_CATALOG_DD" // ProductUpdaterAgent is the product used to receive defaults versions to install ProductUpdaterAgent = "UPDATER_AGENT" // ProductUpdaterTask is the product used to receive tasks to execute ProductUpdaterTask = "UPDATER_TASK" + // ProductActionPlatformRunnerKeys is to receive signing keys for the action platform "private action runner" + ProductActionPlatformRunnerKeys = "AP_RUNNER_KEYS" // ProductAgentConfig is to receive agent configurations, like the log level ProductAgentConfig = "AGENT_CONFIG" // ProductAgentFailover is to receive the multi-region failover configuration @@ -50,6 +67,8 @@ const ( ProductCWSCustom = "CWS_CUSTOM" // ProductCWSProfiles is the cloud workload security profile product ProductCWSProfiles = "CWS_SECURITY_PROFILES" + // ProductCSMSideScanning is the side scanning product + ProductCSMSideScanning = "CSM_SIDE_SCANNING" // ProductASM is the ASM product used by customers to issue rules configurations ProductASM = "ASM" // ProductASMFeatures is the ASM product used form ASM activation through remote config @@ -60,10 +79,31 @@ const ( ProductASMData = "ASM_DATA" // ProductAPMTracing is the apm tracing product ProductAPMTracing = "APM_TRACING" + // ProductSDSRules is the SDS definitions product + ProductSDSRules = "SDS_RULES_DD" + // ProductSDSAgentConfig is the user SDS configurations product. + ProductSDSAgentConfig = "SDS_AGENT_CONFIG" // ProductLiveDebugging is the dynamic instrumentation product ProductLiveDebugging = "LIVE_DEBUGGING" + // ProductLiveDebuggingSymbolDB is used by the live debugging product for + // selecting processes to upload symbols to the symbol database. + ProductLiveDebuggingSymbolDB = "LIVE_DEBUGGING_SYMBOL_DB" + // ProductContainerAutoscalingSettings receives definition of container autoscaling + ProductContainerAutoscalingSettings = "CONTAINER_AUTOSCALING_SETTINGS" + // ProductContainerAutoscalingValues receives values for container autoscaling + ProductContainerAutoscalingValues = "CONTAINER_AUTOSCALING_VALUES" // ProductTesting1 is a product used for testing remote config ProductTesting1 = "TESTING1" // ProductTesting2 is a product used for testing remote config ProductTesting2 = "TESTING2" + // ProductOrchestratorK8sCRDs receives values for k8s crds + ProductOrchestratorK8sCRDs = "ORCHESTRATOR_K8S_CRDS" + // ProductHaAgent is the HA Agent product + ProductHaAgent = "HA_AGENT" + // ProductNDMDeviceProfilesCustom receives user-created SNMP profiles for network device monitoring + ProductNDMDeviceProfilesCustom = "NDM_DEVICE_PROFILES_CUSTOM" + // ProductMetricControl receives configuration for the metrics control. + ProductMetricControl = "METRIC_CONTROL" + // ProductDataStreamsLiveMessages is used for capturing messages from Kafka + ProductDataStreamsLiveMessages = "DSM_LIVE_MESSAGES" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/repository.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/repository.go index 031d20f2..0b9ed190 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/repository.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/repository.go @@ -14,6 +14,7 @@ import ( "fmt" "log" "strings" + "sync" "github.com/DataDog/go-tuf/data" ) @@ -66,7 +67,7 @@ type Update struct { // isEmpty returns whether or not all the fields of `Update` are empty func (u *Update) isEmpty() bool { - return len(u.TUFRoots) == 0 && len(u.TUFTargets) == 0 && (u.TargetFiles == nil || len(u.TargetFiles) == 0) && len(u.ClientConfigs) == 0 + return len(u.TUFRoots) == 0 && len(u.TUFTargets) == 0 && len(u.TargetFiles) == 0 && len(u.ClientConfigs) == 0 } // Repository is a remote config client used in a downstream process to retrieve @@ -82,7 +83,7 @@ type Repository struct { latestRootVersion int64 // Config file storage - metadata map[string]Metadata + metadata sync.Map // map[string]Metadata configs map[string]map[string]interface{} } @@ -106,7 +107,7 @@ func NewRepository(embeddedRoot []byte) (*Repository, error) { return &Repository{ latestTargets: data.NewTargets(), tufRootsClient: tufRootsClient, - metadata: make(map[string]Metadata), + metadata: sync.Map{}, configs: configs, tufVerificationEnabled: true, }, nil @@ -125,7 +126,7 @@ func NewUnverifiedRepository() (*Repository, error) { return &Repository{ latestTargets: data.NewTargets(), - metadata: make(map[string]Metadata), + metadata: sync.Map{}, configs: configs, tufVerificationEnabled: false, latestRootVersion: 1, // The backend expects us to start with a root version of 1. @@ -205,9 +206,12 @@ func (r *Repository) Update(update Update) ([]string, error) { } // 3.b and 3.c: Check if this configuration is either new or has been modified - storedMetadata, exists := r.metadata[path] - if exists && hashesEqual(targetFileMetadata.Hashes, storedMetadata.Hashes) { - continue + storedMetadata, exists := r.metadata.Load(path) + if exists { + m, ok := storedMetadata.(Metadata) + if ok && hashesEqual(targetFileMetadata.Hashes, m.Hashes) { + continue + } } // 3.d: Ensure that the raw configuration file is present in the @@ -246,7 +250,7 @@ func (r *Repository) Update(update Update) ([]string, error) { // TUF: Store the updated roots now that everything has validated if r.tufVerificationEnabled { r.tufRootsClient = tmpRootClient - } else if update.TUFRoots != nil && len(update.TUFRoots) > 0 { + } else if len(update.TUFRoots) > 0 { v, err := extractRootVersion(update.TUFRoots[len(update.TUFRoots)-1]) if err != nil { return nil, err @@ -283,9 +287,11 @@ func (r *Repository) Update(update Update) ([]string, error) { // Note: it is the responsibility of the caller to ensure that no new Update() call was made between // the first Update() call and the call to UpdateApplyStatus() so as to keep the repository state accurate. func (r *Repository) UpdateApplyStatus(cfgPath string, status ApplyStatus) { - if m, ok := r.metadata[cfgPath]; ok { - m.ApplyStatus = status - r.metadata[cfgPath] = m + if val, ok := r.metadata.Load(cfgPath); ok { + if m, ok := val.(Metadata); ok { + m.ApplyStatus = status + r.metadata.Store(cfgPath, m) + } } } @@ -311,12 +317,12 @@ func (r *Repository) applyUpdateResult(_ Update, result updateResult) { } } for path, metadata := range result.metadata { - r.metadata[path] = metadata + r.metadata.Store(path, metadata) } // 5.b Clean up the cache of any removed configs for _, path := range result.removed { - delete(r.metadata, path) + r.metadata.Delete(path) for _, configs := range r.configs { delete(configs, path) } @@ -329,10 +335,17 @@ func (r *Repository) CurrentState() (RepositoryState, error) { var configs []ConfigState var cached []CachedFile - for path, metadata := range r.metadata { - configs = append(configs, configStateFromMetadata(metadata)) - cached = append(cached, cachedFileFromMetadata(path, metadata)) - } + r.metadata.Range(func(path, value any) bool { + metadata, ok := value.(Metadata) + if ok { + configs = append(configs, configStateFromMetadata(metadata)) + cached = append(cached, cachedFileFromMetadata(path.(string), metadata)) + } else { + // Log the error but continue processing the rest of the configs + log.Printf("Failed to convert metadata for %s", path) + } + return true + }) var latestRootVersion int64 if r.tufVerificationEnabled { diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/trace/LICENSE new file mode 100644 index 00000000..b370545b --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/LICENSE @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-present Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/client.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/client.go new file mode 100644 index 00000000..b46de4fa --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/client.go @@ -0,0 +1,70 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package config contains the configuration for the trace-agent. +package config + +import ( + "net/http" + "sync" + "time" +) + +// TODO(gbbr): Perhaps this is not the best place for this structure. + +// ResetClient wraps (http.Client).Do and resets the underlying connections at the +// configured interval +type ResetClient struct { + httpClientFactory func() *http.Client + resetInterval time.Duration + + mu sync.RWMutex + httpClient *http.Client + lastReset time.Time +} + +// NewResetClient returns an initialized Client resetting connections at the passed resetInterval ("0" +// means that no reset is performed). +// The underlying http.Client used will be created using the passed http client factory. +func NewResetClient(resetInterval time.Duration, httpClientFactory func() *http.Client) *ResetClient { + return &ResetClient{ + httpClientFactory: httpClientFactory, + resetInterval: resetInterval, + httpClient: httpClientFactory(), + lastReset: time.Now(), + } +} + +// Do wraps (http.Client).Do. Thread safe. +func (c *ResetClient) Do(req *http.Request) (*http.Response, error) { + c.checkReset() + + c.mu.RLock() + httpClient := c.httpClient + c.mu.RUnlock() + + return httpClient.Do(req) +} + +// checkReset checks whether a client reset should be performed, and performs it +// if so +func (c *ResetClient) checkReset() { + if c.resetInterval == 0 { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + if time.Since(c.lastReset) < c.resetInterval { + return + } + + c.lastReset = time.Now() + // Close idle connections on underlying client. Safe to do while other goroutines use the client. + // This is a best effort: if other goroutine(s) are currently using the client, + // the related open connection(s) will remain open until the client is GC'ed + c.httpClient.CloseIdleConnections() + c.httpClient = c.httpClientFactory() +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go new file mode 100644 index 00000000..ec02f066 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go @@ -0,0 +1,728 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +import ( + "crypto/tls" + "errors" + "net" + "net/http" + "net/url" + "os" + "regexp" + "time" + + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" + + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" + "github.com/DataDog/datadog-agent/pkg/obfuscate" + "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" + "github.com/DataDog/datadog-agent/pkg/trace/log" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" +) + +// ServiceName specifies the service name used in the operating system. +const ServiceName = "datadog-trace-agent" + +// ErrMissingAPIKey is returned when the config could not be validated due to missing API key. +var ErrMissingAPIKey = errors.New("you must specify an API Key, either via a configuration file or the DD_API_KEY env var") + +// Endpoint specifies an endpoint that the trace agent will write data (traces, stats & services) to. +type Endpoint struct { + APIKey string `json:"-"` // never marshal this + Host string + + // NoProxy will be set to true when the proxy setting for the trace API endpoint + // needs to be ignored (e.g. it is part of the "no_proxy" list in the yaml settings). + NoProxy bool + + // IsMRF determines whether this is a Multi-Region Failover endpoint. + IsMRF bool `mapstructure:"-" json:"-"` +} + +// TelemetryEndpointPrefix specifies the prefix of the telemetry endpoint URL. +const TelemetryEndpointPrefix = "https://instrumentation-telemetry-intake." + +// OTLP holds the configuration for the OpenTelemetry receiver. +type OTLP struct { + // BindHost specifies the host to bind the receiver to. + BindHost string `mapstructure:"-"` + + // GRPCPort specifies the port to use for the plain HTTP receiver. + // If unset (or 0), the receiver will be off. + GRPCPort int `mapstructure:"grpc_port"` + + // SpanNameRemappings is the map of datadog span names and preferred name to map to. This can be used to + // automatically map Datadog Span Operation Names to an updated value. All entries should be key/value pairs. + SpanNameRemappings map[string]string `mapstructure:"span_name_remappings"` + + // SpanNameAsResourceName specifies whether the OpenTelemetry span's name should be + // used as the Datadog span's operation name. By default (when this is false), the + // operation name is deduced from a combination between the instrumentation scope + // name and the span kind. + // + // For context, the OpenTelemetry 'Span Name' is equivalent to the Datadog 'resource name'. + // The Datadog Span's Operation Name equivalent in OpenTelemetry does not exist, but the span's + // kind comes close. + SpanNameAsResourceName bool `mapstructure:"span_name_as_resource_name"` + + // MaxRequestBytes specifies the maximum number of bytes that will be read + // from an incoming HTTP request. + MaxRequestBytes int64 `mapstructure:"-"` + + // ProbabilisticSampling specifies the percentage of traces to ingest. Exceptions are made for errors + // and rare traces (outliers) if "RareSamplerEnabled" is true. Invalid values are equivalent to 100. + // If spans have the "sampling.priority" attribute set, probabilistic sampling is skipped and the user's + // decision is followed. + ProbabilisticSampling float64 + + // AttributesTranslator specifies an OTLP to Datadog attributes translator. + AttributesTranslator *attributes.Translator `mapstructure:"-"` + + // IgnoreMissingDatadogFields specifies whether we should recompute DD span fields if the corresponding "datadog." + // namespaced span attributes are missing. If it is false (default), we will use the incoming "datadog." namespaced + // OTLP span attributes to construct the DD span, and if they are missing, we will recompute them from the other + // OTLP semantic convention attributes. If it is true, we will only populate a field if its associated "datadog." + // OTLP span attribute exists, otherwise we will leave it empty. + IgnoreMissingDatadogFields bool `mapstructure:"ignore_missing_datadog_fields"` + + // GrpcMaxRecvMsgSizeMib specifies the max receive message size (in Mib) in OTLP receiver gRPC server in the trace agent binary. + // This config only applies to Agent OTLP ingestion. It does not apply to OSS Datadog exporter/connector or DDOT. + GrpcMaxRecvMsgSizeMib int `mapstructure:"-"` +} + +// ObfuscationConfig holds the configuration for obfuscating sensitive data +// for various span types. +type ObfuscationConfig struct { + // ES holds the obfuscation configuration for ElasticSearch bodies. + ES obfuscate.JSONConfig `mapstructure:"elasticsearch"` + + // OpenSearch holds the obfuscation configuration for OpenSearch bodies. + OpenSearch obfuscate.JSONConfig `mapstructure:"opensearch"` + + // Mongo holds the obfuscation configuration for MongoDB queries. + Mongo obfuscate.JSONConfig `mapstructure:"mongodb"` + + // SQLExecPlan holds the obfuscation configuration for SQL Exec Plans. This is strictly for safety related obfuscation, + // not normalization. Normalization of exec plans is configured in SQLExecPlanNormalize. + SQLExecPlan obfuscate.JSONConfig `mapstructure:"sql_exec_plan"` + + // SQLExecPlanNormalize holds the normalization configuration for SQL Exec Plans. + SQLExecPlanNormalize obfuscate.JSONConfig `mapstructure:"sql_exec_plan_normalize"` + + // HTTP holds the obfuscation settings for HTTP URLs. + HTTP obfuscate.HTTPConfig `mapstructure:"http"` + + // RemoveStackTraces specifies whether stack traces should be removed. + // More specifically "error.stack" tag values will be cleared. + RemoveStackTraces bool `mapstructure:"remove_stack_traces"` + + // Redis holds the configuration for obfuscating the "redis.raw_command" tag + // for spans of type "redis". + Redis obfuscate.RedisConfig `mapstructure:"redis"` + + // Valkey holds the configuration for obfuscating the "valkey.raw_command" tag + // for spans of type "valkey". + Valkey obfuscate.ValkeyConfig `mapstructure:"valkey"` + + // Memcached holds the configuration for obfuscating the "memcached.command" tag + // for spans of type "memcached". + Memcached obfuscate.MemcachedConfig `mapstructure:"memcached"` + + // CreditCards holds the configuration for obfuscating credit cards. + CreditCards obfuscate.CreditCardsConfig `mapstructure:"credit_cards"` + + // Cache holds the configuration for caching obfuscation results. + Cache obfuscate.CacheConfig `mapstructure:"cache"` +} + +func obfuscationMode(conf *AgentConfig, sqllexerEnabled bool) obfuscate.ObfuscationMode { + if conf.SQLObfuscationMode != "" { + if conf.SQLObfuscationMode == string(obfuscate.ObfuscateOnly) || conf.SQLObfuscationMode == string(obfuscate.ObfuscateAndNormalize) { + return obfuscate.ObfuscationMode(conf.SQLObfuscationMode) + } + log.Warnf("Invalid SQL obfuscator mode %s, falling back to default", conf.SQLObfuscationMode) + return "" + } + if sqllexerEnabled { + return obfuscate.ObfuscateOnly + } + return "" +} + +// Export returns an obfuscate.Config matching o. +func (o *ObfuscationConfig) Export(conf *AgentConfig) obfuscate.Config { + return obfuscate.Config{ + SQL: obfuscate.SQLConfig{ + TableNames: conf.HasFeature("table_names"), + ReplaceDigits: conf.HasFeature("quantize_sql_tables") || conf.HasFeature("replace_sql_digits"), + KeepSQLAlias: conf.HasFeature("keep_sql_alias"), + DollarQuotedFunc: conf.HasFeature("dollar_quoted_func"), + ObfuscationMode: obfuscationMode(conf, conf.HasFeature("sqllexer")), + }, + ES: o.ES, + OpenSearch: o.OpenSearch, + Mongo: o.Mongo, + SQLExecPlan: o.SQLExecPlan, + SQLExecPlanNormalize: o.SQLExecPlanNormalize, + HTTP: o.HTTP, + Redis: o.Redis, + Valkey: o.Valkey, + Memcached: o.Memcached, + CreditCard: o.CreditCards, + Logger: new(debugLogger), + Cache: o.Cache, + } +} + +type debugLogger struct{} + +func (debugLogger) Debugf(format string, params ...interface{}) { + log.Debugf(format, params...) +} + +// Enablable can represent any option that has an "enabled" boolean sub-field. +type Enablable struct { + Enabled bool `mapstructure:"enabled"` +} + +// TelemetryConfig holds Instrumentation telemetry Endpoints information +type TelemetryConfig struct { + Enabled bool `mapstructure:"enabled"` + Endpoints []*Endpoint +} + +// ReplaceRule specifies a replace rule. +type ReplaceRule struct { + // Name specifies the name of the tag that the replace rule addresses. However, + // some exceptions apply such as: + // • "resource.name" will target the resource + // • "*" will target all tags and the resource + Name string `mapstructure:"name"` + + // Pattern specifies the regexp pattern to be used when replacing. It must compile. + Pattern string `mapstructure:"pattern"` + + // Re holds the compiled Pattern and is only used internally. + Re *regexp.Regexp `mapstructure:"-"` + + // Repl specifies the replacement string to be used when Pattern matches. + Repl string `mapstructure:"repl"` +} + +// WriterConfig specifies configuration for an API writer. +type WriterConfig struct { + // ConnectionLimit specifies the maximum number of concurrent outgoing + // connections allowed for the sender. + ConnectionLimit int `mapstructure:"connection_limit"` + + // QueueSize specifies the maximum number or payloads allowed to be queued + // in the sender. + QueueSize int `mapstructure:"queue_size"` + + // FlushPeriodSeconds specifies the frequency at which the writer's buffer + // will be flushed to the sender, in seconds. Fractions are permitted. + FlushPeriodSeconds float64 `mapstructure:"flush_period_seconds"` +} + +// FargateOrchestratorName is a Fargate orchestrator name. +type FargateOrchestratorName string + +const ( + // OrchestratorECS represents AWS ECS + OrchestratorECS FargateOrchestratorName = "ECS" + // OrchestratorEKS represents AWS EKS + OrchestratorEKS FargateOrchestratorName = "EKS" + // OrchestratorUnknown is used when we cannot retrieve the orchestrator + OrchestratorUnknown FargateOrchestratorName = "Unknown" +) + +// ProfilingProxyConfig ... +type ProfilingProxyConfig struct { + // DDURL ... + DDURL string + // AdditionalEndpoints ... + AdditionalEndpoints map[string][]string +} + +// EVPProxy contains the settings for the EVPProxy proxy. +type EVPProxy struct { + // Enabled reports whether EVPProxy is enabled (true by default). + Enabled bool + // DDURL is the Datadog site to forward payloads to (defaults to the Site setting if not set). + DDURL string + // APIKey is the main API Key (defaults to the main API key). + APIKey string `json:"-"` // Never marshal this field + // ApplicationKey to be used for requests with the X-Datadog-NeedsAppKey set (defaults to the top-level Application Key). + ApplicationKey string `json:"-"` // Never marshal this field + // AdditionalEndpoints is a map of additional Datadog sites to API keys. + AdditionalEndpoints map[string][]string + // MaxPayloadSize indicates the size at which payloads will be rejected, in bytes. + MaxPayloadSize int64 + // ReceiverTimeout indicates the maximum time an EVPProxy request can take. Value in seconds. + ReceiverTimeout int +} + +// OpenLineageProxy contains the settings for the OpenLineageProxy proxy. +type OpenLineageProxy struct { + // Enabled reports whether OpenLineageProxy is enabled (true by default). + Enabled bool + // DDURL is the Datadog site to forward payloads to (defaults to the Site setting if not set). + DDURL string + // APIKey is the main API Key (defaults to the main API key). + APIKey string `json:"-"` // Never marshal this field + // AdditionalEndpoints is a map of additional Datadog sites to API keys. + AdditionalEndpoints map[string][]string + // APIVersion indicates what version the OpenLineageProxy uses for the DO-intake API. + APIVersion int +} + +// InstallSignatureConfig contains the information on how the agent was installed +// and a unique identifier that distinguishes this agent from others. +type InstallSignatureConfig struct { + Found bool `json:"-"` + InstallID string `json:"install_id"` + InstallType string `json:"install_type"` + InstallTime int64 `json:"install_time"` +} + +// DebuggerProxyConfig ... +type DebuggerProxyConfig struct { + // DDURL ... + DDURL string + // APIKey ... + APIKey string `json:"-"` // Never marshal this field + // AdditionalEndpoints is a map of additional Datadog sites to API keys. + AdditionalEndpoints map[string][]string `json:"-"` // Never marshal this field +} + +// SymDBProxyConfig ... +type SymDBProxyConfig struct { + // DDURL ... + DDURL string + // APIKey ... + APIKey string `json:"-"` // Never marshal this field + // AdditionalEndpoints is a map of additional Datadog endpoints to API keys. + AdditionalEndpoints map[string][]string `json:"-"` // Never marshal this field +} + +// AgentConfig handles the interpretation of the configuration (with default +// behaviors) in one place. It is also a simple structure to share across all +// the Agent components, with 100% safe and reliable values. +// It is exposed with expvar, so make sure to exclude any sensible field +// from JSON encoding. Use New() to create an instance. +type AgentConfig struct { + Features map[string]struct{} + + Enabled bool + AgentVersion string + GitCommit string + Site string // the intake site to use (e.g. "datadoghq.com") + + // FargateOrchestrator specifies the name of the Fargate orchestrator. e.g. "ECS", "EKS", "Unknown" + FargateOrchestrator FargateOrchestratorName + + // Global + Hostname string + DefaultEnv string // the traces will default to this environment + ConfigPath string // the source of this config, if any + + // Endpoints specifies the set of hosts and API keys where traces and stats + // will be uploaded to. The first endpoint is the main configuration endpoint; + // any following ones are read from the 'additional_endpoints' parts of the + // configuration file, if present. + Endpoints []*Endpoint + + // Concentrator + BucketInterval time.Duration // the size of our pre-aggregation per bucket + ExtraAggregators []string // DEPRECATED + PeerTagsAggregation bool // enables/disables stats aggregation for peer entity tags, used by Concentrator and ClientStatsAggregator + ComputeStatsBySpanKind bool // enables/disables the computing of stats based on a span's `span.kind` field + PeerTags []string // additional tags to use for peer entity stats aggregation + + // Sampler configuration + ExtraSampleRate float64 + TargetTPS float64 + ErrorTPS float64 + MaxEPS float64 + MaxRemoteTPS float64 + + // Rare Sampler configuration + RareSamplerEnabled bool + RareSamplerTPS int + RareSamplerCooldownPeriod time.Duration + RareSamplerCardinality int + + // Probabilistic Sampler configuration + ProbabilisticSamplerEnabled bool + ProbabilisticSamplerHashSeed uint32 + ProbabilisticSamplerSamplingPercentage float32 + + // Error Tracking Standalone + ErrorTrackingStandalone bool + + // Receiver + ReceiverEnabled bool // specifies whether Receiver listeners are enabled. Unless OTLPReceiver is used, this should always be true. + ReceiverHost string + ReceiverPort int + ReceiverSocket string // if not empty, UDS will be enabled on unix:// + ConnectionLimit int // for rate-limiting, how many unique connections to allow in a lease period (30s) + ReceiverTimeout int + MaxRequestBytes int64 // specifies the maximum allowed request size for incoming trace payloads + TraceBuffer int // specifies the number of traces to buffer before blocking. + Decoders int // specifies the number of traces that can be concurrently decoded. + MaxConnections int // specifies the maximum number of concurrent incoming connections allowed. + DecoderTimeout int // specifies the maximum time in milliseconds that the decoders will wait for a turn to accept a payload before returning 429 + + WindowsPipeName string + PipeBufferSize int + PipeSecurityDescriptor string + + GUIPort string // the port of the Datadog Agent GUI (for control access) + + // Writers + SynchronousFlushing bool // Mode where traces are only submitted when FlushAsync is called, used for Serverless Extension + StatsWriter *WriterConfig + TraceWriter *WriterConfig + ConnectionResetInterval time.Duration // frequency at which outgoing connections are reset. 0 means no reset is performed + // MaxSenderRetries is the maximum number of retries that a sender will perform + // before giving up. Note that the sender may not perform all MaxSenderRetries if + // the agent is under load and the outgoing payload queue is full. In that + // case, the sender will drop failed payloads when it is unable to enqueue + // them for another retry. + MaxSenderRetries int + // HTTP client used in writer connections. If nil, default client values will be used. + HTTPClientFunc func() *http.Client `json:"-"` + // HTTP Transport used in writer connections. If nil, default transport values will be used. + HTTPTransportFunc func() *http.Transport `json:"-"` + + // internal telemetry + StatsdEnabled bool + StatsdHost string + StatsdPort int + StatsdPipeName string // for Windows Pipes + StatsdSocket string // for UDS Sockets + + // logging + LogFilePath string + + // watchdog + MaxMemory float64 // MaxMemory is the threshold (bytes allocated) above which program panics and exits, to be restarted + MaxCPU float64 // MaxCPU is the max UserAvg CPU the program should consume + WatchdogInterval time.Duration // WatchdogInterval is the delay between 2 watchdog checks + + // http/s proxying + ProxyURL *url.URL + SkipSSLValidation bool + + // filtering + Ignore map[string][]string + + // ReplaceTags is used to filter out sensitive information from tag values. + // It maps tag keys to a set of replacements. Only supported in A6. + ReplaceTags []*ReplaceRule + + // GlobalTags list metadata that will be added to all spans + GlobalTags map[string]string + + // transaction analytics + AnalyzedRateByServiceLegacy map[string]float64 + AnalyzedSpansByService map[string]map[string]float64 + + // infrastructure agent binary + DDAgentBin string + + // Obfuscation holds sensitive data obufscator's configuration. + Obfuscation *ObfuscationConfig + + // SQLObfuscationMode holds obfuscator mode. + SQLObfuscationMode string + + // MaxResourceLen the maximum length the resource can have + MaxResourceLen int + + // RequireTags specifies a list of tags which must be present on the root span in order for a trace to be accepted. + RequireTags []*Tag + + // RejectTags specifies a list of tags which must be absent on the root span in order for a trace to be accepted. + RejectTags []*Tag + + // RequireTagsRegex specifies a list of regexp for tags which must be present on the root span in order for a trace to be accepted. + RequireTagsRegex []*TagRegex + + // RejectTagsRegex specifies a list of regexp for tags which must be absent on the root span in order for a trace to be accepted. + RejectTagsRegex []*TagRegex + + // OTLPReceiver holds the configuration for OpenTelemetry receiver. + OTLPReceiver *OTLP + + // ProfilingProxy specifies settings for the profiling proxy. + ProfilingProxy ProfilingProxyConfig + + // Telemetry settings + TelemetryConfig *TelemetryConfig + + // EVPProxy contains the settings for the EVPProxy proxy. + EVPProxy EVPProxy + + // OpenLineageProxy contains the settings for the OpenLineageProxy proxy; + OpenLineageProxy OpenLineageProxy + + // DebuggerProxy contains the settings for the Live Debugger proxy. + DebuggerProxy DebuggerProxyConfig + + // DebuggerDiagnosticsProxy contains the settings for the Live Debugger diagnostics proxy. + DebuggerDiagnosticsProxy DebuggerProxyConfig + + // SymDBProxy contains the settings for the Symbol Database proxy. + SymDBProxy SymDBProxyConfig + + // Proxy specifies a function to return a proxy for a given Request. + // See (net/http.Transport).Proxy for more details. + Proxy func(*http.Request) (*url.URL, error) `json:"-"` + + // MaxCatalogEntries specifies the maximum number of services to be added to the priority sampler's + // catalog. If not set (0) it will default to 5000. + MaxCatalogEntries int + + // RemoteConfigClient retrieves sampling updates from the remote config backend + RemoteConfigClient RemoteClient `json:"-"` + + // ContainerTags ... + ContainerTags func(cid string) ([]string, error) `json:"-"` + + // ContainerIDFromOriginInfo ... + ContainerIDFromOriginInfo func(originInfo origindetection.OriginInfo) (string, error) `json:"-"` + + // ContainerProcRoot is the root dir for `proc` info + ContainerProcRoot string + + // DebugServerPort defines the port used by the debug server + DebugServerPort int + + // Install Signature + InstallSignature InstallSignatureConfig + + // Lambda function name + LambdaFunctionName string + + // Azure container apps tags, in the form of a comma-separated list of + // key-value pairs, starting with a comma + AzureContainerAppTags string + + // GetAgentAuthToken retrieves an auth token to communicate with other agent processes + // Function will be nil if in an environment without an auth token + GetAgentAuthToken func() string `json:"-"` + + // IsMRFEnabled determines whether Multi-Region Failover is enabled. It is based on the core config's + // `multi_region_failover.enabled` and `multi_region_failover.failover_apm` settings. + IsMRFEnabled func() bool `json:"-"` +} + +// RemoteClient client is used to APM Sampling Updates from a remote source. +// This is an interface around the client provided by pkg/config/remote to allow for easier testing. +type RemoteClient interface { + Close() + Start() + Subscribe(string, func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) + UpdateApplyStatus(cfgPath string, status state.ApplyStatus) +} + +// Tag represents a key/value pair. +type Tag struct { + K, V string +} + +// TagRegex represents a key/value regex pattern pair. +type TagRegex struct { + K string + V *regexp.Regexp +} + +// New returns a configuration with the default values. +func New() *AgentConfig { + return &AgentConfig{ + Enabled: true, + DefaultEnv: "none", + Endpoints: []*Endpoint{{Host: "https://trace.agent.datadoghq.com"}}, + FargateOrchestrator: OrchestratorUnknown, + Site: "datadoghq.com", + MaxCatalogEntries: 5000, + + BucketInterval: time.Duration(10) * time.Second, + + ExtraSampleRate: 1.0, + TargetTPS: 10, + ErrorTPS: 10, + MaxEPS: 200, + MaxRemoteTPS: 100, + + RareSamplerEnabled: false, + RareSamplerTPS: 5, + RareSamplerCooldownPeriod: 5 * time.Minute, + RareSamplerCardinality: 200, + + ErrorTrackingStandalone: false, + + ReceiverEnabled: true, + ReceiverHost: "localhost", + ReceiverPort: 8126, + MaxRequestBytes: 25 * 1024 * 1024, // 25MB + PipeBufferSize: 1_000_000, + PipeSecurityDescriptor: "D:AI(A;;GA;;;WD)", + GUIPort: "5002", + + StatsWriter: new(WriterConfig), + TraceWriter: new(WriterConfig), + ConnectionResetInterval: 0, // disabled + MaxSenderRetries: 4, + + StatsdHost: "localhost", + StatsdPort: 8125, + StatsdEnabled: true, + + LambdaFunctionName: os.Getenv("AWS_LAMBDA_FUNCTION_NAME"), + + MaxMemory: 5e8, // 500 Mb, should rarely go above 50 Mb + MaxCPU: 0.5, // 50%, well behaving agents keep below 5% + WatchdogInterval: 10 * time.Second, + + Ignore: make(map[string][]string), + AnalyzedRateByServiceLegacy: make(map[string]float64), + AnalyzedSpansByService: make(map[string]map[string]float64), + Obfuscation: &ObfuscationConfig{}, + SQLObfuscationMode: "", + MaxResourceLen: 5000, + + GlobalTags: computeGlobalTags(), + + Proxy: http.ProxyFromEnvironment, + OTLPReceiver: &OTLP{}, + ContainerTags: noopContainerTagsFunc, + ContainerIDFromOriginInfo: NoopContainerIDFromOriginInfoFunc, + TelemetryConfig: &TelemetryConfig{ + Endpoints: []*Endpoint{{Host: TelemetryEndpointPrefix + "datadoghq.com"}}, + }, + EVPProxy: EVPProxy{ + Enabled: true, + MaxPayloadSize: 5 * 1024 * 1024, + }, + OpenLineageProxy: OpenLineageProxy{ + Enabled: true, + APIVersion: 2, + }, + + Features: make(map[string]struct{}), + PeerTagsAggregation: true, + ComputeStatsBySpanKind: true, + } +} + +func computeGlobalTags() map[string]string { + if inAzureAppServices() { + return traceutil.GetAppServicesTags() + } + return make(map[string]string) +} + +// ErrContainerTagsFuncNotDefined is returned when the containerTags function is not defined. +var ErrContainerTagsFuncNotDefined = errors.New("containerTags function not defined") + +func noopContainerTagsFunc(_ string) ([]string, error) { + return nil, ErrContainerTagsFuncNotDefined +} + +// ErrContainerIDFromOriginInfoFuncNotDefined is returned when the ContainerIDFromOriginInfo function is not defined. +var ErrContainerIDFromOriginInfoFuncNotDefined = errors.New("ContainerIDFromOriginInfo function not defined") + +// NoopContainerIDFromOriginInfoFunc is used when the ContainerIDFromOriginInfo function is not defined. +func NoopContainerIDFromOriginInfoFunc(_ origindetection.OriginInfo) (string, error) { + return "", ErrContainerIDFromOriginInfoFuncNotDefined +} + +// APIKey returns the first (main) endpoint's API key. +func (c *AgentConfig) APIKey() string { + if len(c.Endpoints) == 0 { + return "" + } + return c.Endpoints[0].APIKey +} + +// UpdateAPIKey updates the API Key associated with the main endpoint. +func (c *AgentConfig) UpdateAPIKey(val string) { + if len(c.Endpoints) == 0 { + return + } + c.Endpoints[0].APIKey = val +} + +// NewHTTPClient returns a new http.Client to be used for outgoing connections to the +// Datadog API. +func (c *AgentConfig) NewHTTPClient() *ResetClient { + // If a custom HTTPClientFunc been set, use it. Otherwise use default client values + if c.HTTPClientFunc != nil { + return NewResetClient(c.ConnectionResetInterval, c.HTTPClientFunc) + } + return NewResetClient(c.ConnectionResetInterval, func() *http.Client { + return &http.Client{ + Timeout: 10 * time.Second, + Transport: c.NewHTTPTransport(), + } + }) +} + +// NewHTTPTransport returns a new http.Transport to be used for outgoing connections to +// the Datadog API. +func (c *AgentConfig) NewHTTPTransport() *http.Transport { + if c.HTTPTransportFunc != nil { + return c.HTTPTransportFunc() + } + transport := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: c.SkipSSLValidation}, + // below field values are from http.DefaultTransport (go1.12) + Proxy: c.Proxy, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 30 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + return transport +} + +// HasFeature returns true if the agent has the given feature flag. +func (c *AgentConfig) HasFeature(feat string) bool { + _, ok := c.Features[feat] + return ok +} + +// AllFeatures returns a slice of all the feature flags the agent has. +func (c *AgentConfig) AllFeatures() []string { + feats := []string{} + for feat := range c.Features { + feats = append(feats, feat) + } + return feats +} + +// ConfiguredPeerTags returns the set of peer tags that should be used +// for aggregation based on the various config values and the base set of tags. +func (c *AgentConfig) ConfiguredPeerTags() []string { + if !c.PeerTagsAggregation { + return nil + } + return preparePeerTags(append(basePeerTags, c.PeerTags...)) +} + +func inAzureAppServices() bool { + _, existsLinux := os.LookupEnv("WEBSITE_STACK") + _, existsWin := os.LookupEnv("WEBSITE_APPSERVICEAPPLOGS_TRACE_ENABLED") + return existsLinux || existsWin +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/peer_tags.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/peer_tags.go new file mode 100644 index 00000000..6b2a58c9 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/peer_tags.go @@ -0,0 +1,55 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +import ( + _ "embed" //nolint:revive + "sort" + "strings" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "gopkg.in/ini.v1" +) + +//go:embed peer_tags.ini +var peerTagFile []byte + +// basePeerTags is the base set of peer tag precursors (tags from which peer tags +// are derived) we aggregate on when peer tag aggregation is enabled. +var basePeerTags = func() []string { + var precursors []string = []string{"_dd.base_service"} + + cfg, err := ini.Load(peerTagFile) + if err != nil { + log.Error("Error loading file for peer tags: ", err) + return precursors + } + peerTags := cfg.Section("dd.apm.peer.tags").Keys() + + for _, t := range peerTags { + ps := strings.Split(t.Value(), ",") + precursors = append(precursors, ps...) + } + sort.Strings(precursors) + + return precursors +}() + +func preparePeerTags(tags []string) []string { + if len(tags) == 0 { + return nil + } + var deduped []string + seen := make(map[string]struct{}) + for _, t := range tags { + if _, ok := seen[t]; !ok { + seen[t] = struct{}{} + deduped = append(deduped, t) + } + } + sort.Strings(deduped) + return deduped +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/peer_tags.ini b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/peer_tags.ini new file mode 100644 index 00000000..24ffaafe --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/peer_tags.ini @@ -0,0 +1,18 @@ +# Generated - DO NOT EDIT +# Source: https://github.com/DataDog/semantic-core/ +[dd.apm.peer.tags] +peer.aws.dynamodb.table = "tablename" +peer.aws.kinesis.stream = "streamname" +peer.aws.s3.bucket = "bucketname,aws.s3.bucket" +peer.aws.sqs.queue = "queuename" +peer.cassandra.contact.points = "db.cassandra.contact.points" +peer.couchbase.seed.nodes = "db.couchbase.seed.nodes" +peer.db.name = "db.name,mongodb.db,db.instance,cassandra.keyspace,db.namespace" +peer.db.system = "db.system,active_record.db.vendor,db.type,sequel.db.vendor" +peer.hostname = "peer.hostname,hostname,net.peer.name,db.hostname,network.destination.name,grpc.host,http.host,server.address,http.server_name,out.host,dns.hostname,network.destination.ip" +peer.kafka.bootstrap.servers = "messaging.kafka.bootstrap.servers" +peer.messaging.destination = "topicname,messaging.destination,messaging.destination.name,messaging.rabbitmq.exchange,amqp.destination,amqp.queue,amqp.exchange,msmq.queue.path,aws.queue.name" +peer.messaging.system = "messaging.system" +peer.rpc.service = "rpc.service" +peer.rpc.system = "rpc.system" +peer.service = "peer.service" diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/buflogger.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/buflogger.go new file mode 100644 index 00000000..90672147 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/buflogger.go @@ -0,0 +1,97 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package log + +import ( + "bytes" + "fmt" + "sync" +) + +var _ Logger = (*buflogger)(nil) + +// NewBufferLogger creates a new Logger which outputs everything to the given buffer. +// It is synchronised for concurrent use; as such, it is not optimal for use outside +// testing environments. +func NewBufferLogger(out *bytes.Buffer) Logger { + return &buflogger{buf: out} +} + +type buflogger struct { + mu sync.Mutex + buf *bytes.Buffer +} + +func (b *buflogger) logWithLevel(lvl string, msg string) { + b.mu.Lock() + defer b.mu.Unlock() + b.buf.WriteString(fmt.Sprintf("[%s] %s", lvl, msg)) +} + +// Trace implements Logger. +func (b *buflogger) Trace(v ...interface{}) { b.logWithLevel("TRACE", fmt.Sprint(v...)) } + +// Tracef implements Logger. +func (b *buflogger) Tracef(format string, params ...interface{}) { + b.logWithLevel("TRACE", fmt.Sprintf(format, params...)) +} + +// Debug implements Logger. +func (b *buflogger) Debug(v ...interface{}) { b.logWithLevel("DEBUG", fmt.Sprint(v...)) } + +// Debugf implements Logger. +func (b *buflogger) Debugf(format string, params ...interface{}) { + b.logWithLevel("DEBUG", fmt.Sprintf(format, params...)) +} + +// Info implements Logger. +func (b *buflogger) Info(v ...interface{}) { b.logWithLevel("INFO", fmt.Sprint(v...)) } + +// Infof implements Logger. +func (b *buflogger) Infof(format string, params ...interface{}) { + b.logWithLevel("INFO", fmt.Sprintf(format, params...)) +} + +// Warn implements Logger. +func (b *buflogger) Warn(v ...interface{}) error { + b.logWithLevel("WARN", fmt.Sprint(v...)) + return nil +} + +// Warnf implements Logger. +func (b *buflogger) Warnf(format string, params ...interface{}) error { + b.logWithLevel("WARN", fmt.Sprintf(format, params...)) + return nil +} + +// Error implements Logger. +func (b *buflogger) Error(v ...interface{}) error { + b.logWithLevel("ERROR", fmt.Sprint(v...)) + return nil +} + +// Errorf implements Logger. +func (b *buflogger) Errorf(format string, params ...interface{}) error { + b.logWithLevel("ERROR", fmt.Sprintf(format, params...)) + return nil +} + +// Critical implements Logger. +func (b *buflogger) Critical(v ...interface{}) error { + b.logWithLevel("CRITICAL", fmt.Sprint(v...)) + return nil +} + +// Criticalf implements Logger. +func (b *buflogger) Criticalf(format string, params ...interface{}) error { + b.logWithLevel("CRITICAL", fmt.Sprintf(format, params...)) + return nil +} + +// Flush implements Logger. +func (b *buflogger) Flush() {} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/logger.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/logger.go new file mode 100644 index 00000000..552eeaa0 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/logger.go @@ -0,0 +1,196 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package log implements the trace-agent logger. +package log + +import ( + "sync" +) + +var ( + mu sync.RWMutex + logger Logger = NoopLogger +) + +// SetLogger sets l as the default Logger and returns the old logger. +func SetLogger(l Logger) Logger { + mu.Lock() + oldlogger := logger + logger = l + mu.Unlock() + return oldlogger +} + +// IsSet returns whether the logger has been set up. +func IsSet() bool { + mu.Lock() + defer mu.Unlock() + return logger != NoopLogger +} + +// Logger implements the core logger interface. +type Logger interface { + Trace(v ...interface{}) + Tracef(format string, params ...interface{}) + Debug(v ...interface{}) + Debugf(format string, params ...interface{}) + Info(v ...interface{}) + Infof(format string, params ...interface{}) + Warn(v ...interface{}) error + Warnf(format string, params ...interface{}) error + Error(v ...interface{}) error + Errorf(format string, params ...interface{}) error + Critical(v ...interface{}) error + Criticalf(format string, params ...interface{}) error + Flush() +} + +// Trace formats message using the default formats for its operands +// and writes to log with level = Trace +func Trace(v ...interface{}) { + mu.RLock() + logger.Trace(v...) + mu.RUnlock() +} + +// Tracef formats message according to format specifier +// and writes to log with level = Trace. +func Tracef(format string, params ...interface{}) { + mu.RLock() + logger.Tracef(format, params...) + mu.RUnlock() +} + +// Debug formats message using the default formats for its operands +// and writes to log with level = Debug +func Debug(v ...interface{}) { + mu.RLock() + logger.Debug(v...) + mu.RUnlock() +} + +// Debugf formats message according to format specifier +// and writes to log with level = Debug. +func Debugf(format string, params ...interface{}) { + mu.RLock() + logger.Debugf(format, params...) + mu.RUnlock() +} + +// Info formats message using the default formats for its operands +// and writes to log with level = Info +func Info(v ...interface{}) { + mu.RLock() + logger.Info(v...) + mu.RUnlock() +} + +// Infof formats message according to format specifier +// and writes to log with level = Info. +func Infof(format string, params ...interface{}) { + mu.RLock() + logger.Infof(format, params...) + mu.RUnlock() +} + +// Warn formats message using the default formats for its operands +// and writes to log with level = Warn +func Warn(v ...interface{}) { + mu.RLock() + logger.Warn(v...) //nolint:errcheck + mu.RUnlock() +} + +// Warnf formats message according to format specifier +// and writes to log with level = Warn. +func Warnf(format string, params ...interface{}) { + mu.RLock() + logger.Warnf(format, params...) //nolint:errcheck + mu.RUnlock() +} + +// Error formats message using the default formats for its operands +// and writes to log with level = Error +func Error(v ...interface{}) { + mu.RLock() + logger.Error(v...) //nolint:errcheck + mu.RUnlock() +} + +// Errorf formats message according to format specifier +// and writes to log with level = Error. +func Errorf(format string, params ...interface{}) { + mu.RLock() + logger.Errorf(format, params...) //nolint:errcheck + mu.RUnlock() +} + +// Critical formats message using the default formats for its operands +// and writes to log with level = Critical +func Critical(v ...interface{}) { + mu.RLock() + logger.Critical(v...) //nolint:errcheck + mu.RUnlock() +} + +// Criticalf formats message according to format specifier +// and writes to log with level = Critical. +func Criticalf(format string, params ...interface{}) { + mu.RLock() + logger.Criticalf(format, params...) //nolint:errcheck + mu.RUnlock() +} + +// Flush flushes all the messages in the logger. +func Flush() { + mu.RLock() + logger.Flush() + mu.RUnlock() +} + +// NoopLogger is a logger which has no effect upon calling. +var NoopLogger = noopLogger{} + +type noopLogger struct{} + +// Trace implements Logger. +func (noopLogger) Trace(_ ...interface{}) {} + +// Tracef implements Logger. +func (noopLogger) Tracef(_ string, _ ...interface{}) {} + +// Debug implements Logger. +func (noopLogger) Debug(_ ...interface{}) {} + +// Debugf implements Logger. +func (noopLogger) Debugf(_ string, _ ...interface{}) {} + +// Info implements Logger. +func (noopLogger) Info(_ ...interface{}) {} + +// Infof implements Logger. +func (noopLogger) Infof(_ string, _ ...interface{}) {} + +// Warn implements Logger. +func (noopLogger) Warn(_ ...interface{}) error { return nil } + +// Warnf implements Logger. +func (noopLogger) Warnf(_ string, _ ...interface{}) error { return nil } + +// Error implements Logger. +func (noopLogger) Error(_ ...interface{}) error { return nil } + +// Errorf implements Logger. +func (noopLogger) Errorf(_ string, _ ...interface{}) error { return nil } + +// Critical implements Logger. +func (noopLogger) Critical(_ ...interface{}) error { return nil } + +// Criticalf implements Logger. +func (noopLogger) Criticalf(_ string, _ ...interface{}) error { return nil } + +// Flush implements Logger. +func (noopLogger) Flush() {} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/throttled.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/throttled.go new file mode 100644 index 00000000..3b81cee4 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/throttled.go @@ -0,0 +1,63 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import ( + "time" + + "go.uber.org/atomic" +) + +// NewThrottled returns a new throttled logger. The returned logger will allow up to n calls in +// a time period of length d. +func NewThrottled(n int, d time.Duration) *ThrottledLogger { + return &ThrottledLogger{ + n: uint64(n), + c: atomic.NewUint64(0), + d: d, + } +} + +// ThrottledLogger limits the number of log calls during a time window. To create a new logger +// use NewThrottled. +type ThrottledLogger struct { + n uint64 // number of log calls allowed during interval d + c *atomic.Uint64 // number of log calls performed during an interval d + d time.Duration +} + +type loggerFunc func(format string, params ...interface{}) + +func (tl *ThrottledLogger) log(logFunc loggerFunc, format string, params ...interface{}) { + c := tl.c.Inc() - 1 + if c == 0 { + // first call, trigger the reset + time.AfterFunc(tl.d, func() { tl.c.Store(0) }) + } + if c >= tl.n { + if c == tl.n { + logFunc("Too many similar messages, pausing up to %s...", tl.d) + } + return + } + logFunc(format, params...) +} + +// Error logs the message at the error level. +func (tl *ThrottledLogger) Error(format string, params ...interface{}) { + tl.log(Errorf, format, params...) +} + +// Warn logs the message at the warning level. +func (tl *ThrottledLogger) Warn(format string, params ...interface{}) { + tl.log(Warnf, format, params...) +} + +// Write implements io.Writer. +func (tl *ThrottledLogger) Write(p []byte) (n int, err error) { + tl.Error(string(p)) + return len(p), nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/catalog.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/catalog.go new file mode 100644 index 00000000..27ff9a42 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/catalog.go @@ -0,0 +1,93 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package sampler + +import ( + "container/list" + "sync" + + "github.com/DataDog/datadog-agent/pkg/trace/log" +) + +// defaultServiceRateKey specifies the key for the default rate to be used by any service that +// doesn't have a rate specified. +const defaultServiceRateKey = "service:,env:" + +// maxCatalogEntries specifies the maximum number of entries allowed in the catalog. +const maxCatalogEntries = 5000 + +// serviceKeyCatalog reverse-maps service signatures to their generated hashes for +// easy look up. +type serviceKeyCatalog struct { + mu sync.Mutex + items map[ServiceSignature]*list.Element + ll *list.List + maxEntries int +} + +type catalogEntry struct { + key ServiceSignature + sig Signature +} + +// newServiceLookup returns a new serviceKeyCatalog with maxEntries maximum number of entries. +// If maxEntries is 0, a default of 5000 (maxCatalogEntries) will be used. +func newServiceLookup(maxEntries int) *serviceKeyCatalog { + entries := maxCatalogEntries + if maxEntries > 0 { + entries = maxEntries + } + return &serviceKeyCatalog{ + items: make(map[ServiceSignature]*list.Element), + ll: list.New(), + maxEntries: entries, + } +} + +func (cat *serviceKeyCatalog) register(svcSig ServiceSignature) Signature { + cat.mu.Lock() + defer cat.mu.Unlock() + if el, ok := cat.items[svcSig]; ok { + // signature already exists, move to front and return already-computed hash + cat.ll.MoveToFront(el) + return el.Value.(catalogEntry).sig + } + // new signature, compute new hash + hash := svcSig.Hash() + el := cat.ll.PushFront(catalogEntry{key: svcSig, sig: hash}) + cat.items[svcSig] = el + if cat.ll.Len() > cat.maxEntries { + // list went beyond maximum allowed entries, removed back of the list + del := cat.ll.Remove(cat.ll.Back()).(catalogEntry) + delete(cat.items, del.key) + log.Warnf("More than %d services in service-rates catalog. Dropping %v.", cat.maxEntries, del.key) + } + return hash +} + +// ratesByService returns a map of service signatures mapping to the rates identified using +// the signatures. +func (cat *serviceKeyCatalog) ratesByService(agentEnv string, rates map[Signature]float64, defaultRate float64) map[ServiceSignature]float64 { + rbs := make(map[ServiceSignature]float64, len(rates)+1) + cat.mu.Lock() + defer cat.mu.Unlock() + for key, el := range cat.items { + sig := el.Value.(catalogEntry).sig + if rate, ok := rates[sig]; ok { + rbs[key] = rate + } else { + cat.ll.Remove(el) + delete(cat.items, key) + continue + } + + if rateWithEmptyEnv(key.Env, agentEnv) { + rbs[ServiceSignature{Name: key.Name}] = rbs[key] + } + } + rbs[ServiceSignature{}] = defaultRate + return rbs +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/coresampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/coresampler.go new file mode 100644 index 00000000..4b625ade --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/coresampler.go @@ -0,0 +1,270 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package sampler + +import ( + "sort" + "sync" + "time" + + "go.uber.org/atomic" + + "github.com/DataDog/datadog-go/v5/statsd" +) + +const ( + bucketDuration = 5 * time.Second + numBuckets = 6 + maxRateIncrease = 1.2 +) + +// Sampler is the main component of the sampling logic +// Seen traces are counted per signature in a circular buffer +// of numBuckets. +// The sampler distributes uniformly on all signature +// a targetTPS. The bucket with the maximum counts over the period +// of the buffer is used to compute the sampling rates. +type Sampler struct { + // seen counts seen signatures by Signature in a circular buffer of numBuckets of bucketDuration. + // In the case of the PrioritySampler, chunks dropped in the Client are also taken in account. + seen map[Signature][numBuckets]float32 + // allSigsSeen counts all signatures in a circular buffer of numBuckets of bucketDuration + allSigsSeen [numBuckets]float32 + // lastBucketID is the index of the last bucket on which traces were counted + lastBucketID int64 + // rates maps sampling rate in % + rates map[Signature]float64 + // lowestRate is the lowest rate of all signatures + lowestRate float64 + + // muSeen is a lock protecting seen map and totalSeen count + muSeen sync.RWMutex + // muRates is a lock protecting rates map + muRates sync.RWMutex + + // Maximum limit to the total number of traces per second to sample + targetTPS *atomic.Float64 + // extraRate is an extra raw sampling rate to apply on top of the sampler rate + extraRate float64 +} + +// newSampler returns an initialized Sampler +func newSampler(extraRate float64, targetTPS float64) *Sampler { + s := &Sampler{ + seen: make(map[Signature][numBuckets]float32), + extraRate: extraRate, + targetTPS: atomic.NewFloat64(targetTPS), + } + return s +} + +// updateTargetTPS updates the targetTPS and all rates +func (s *Sampler) updateTargetTPS(targetTPS float64) { + previousTargetTPS := s.targetTPS.Load() + s.targetTPS.Store(targetTPS) + + if previousTargetTPS == 0 { + return + } + ratio := targetTPS / previousTargetTPS + + s.muRates.Lock() + for sig, rate := range s.rates { + newRate := min(rate*ratio, 1) + s.rates[sig] = newRate + } + s.muRates.Unlock() +} + +// countWeightedSig counts a trace sampled by the sampler and update rates +// if buckets are rotated +func (s *Sampler) countWeightedSig(now time.Time, signature Signature, n float32) bool { + bucketID := now.Unix() / int64(bucketDuration.Seconds()) + s.muSeen.Lock() + prevBucketID := s.lastBucketID + s.lastBucketID = bucketID + + // pass through each bucket, zero expired ones and adjust sampling rates + updateRates := prevBucketID != bucketID + if updateRates { + s.updateRates(prevBucketID, bucketID) + } + + buckets, ok := s.seen[signature] + if !ok { + buckets = [numBuckets]float32{} + } + s.allSigsSeen[bucketID%numBuckets] += n + buckets[bucketID%numBuckets] += n + s.seen[signature] = buckets + + s.muSeen.Unlock() + return updateRates +} + +// updateRates distributes TPS on each signature and apply it to the moving +// max of seen buckets. +// Rates increase are bounded by 20% increases, it requires 13 evaluations (1.2**13 = 10.6) +// to increase a sampling rate by 10 fold in about 1min. +// A caller of updateRates must hold a lock on s.muSeen (e.g. as used by countWeightedSig). +func (s *Sampler) updateRates(previousBucket, newBucket int64) { + if len(s.seen) == 0 { + return + } + rates := make(map[Signature]float64, len(s.seen)) + + seenTPSs := make([]float64, 0, len(s.seen)) + sigs := make([]Signature, 0, len(s.seen)) + for sig, buckets := range s.seen { + maxBucket, buckets := zeroAndGetMax(buckets, previousBucket, newBucket) + s.seen[sig] = buckets + seenTPSs = append(seenTPSs, float64(maxBucket)/bucketDuration.Seconds()) + sigs = append(sigs, sig) + } + _, allSigsSeen := zeroAndGetMax(s.allSigsSeen, previousBucket, newBucket) + s.allSigsSeen = allSigsSeen + + tpsPerSig := computeTPSPerSig(s.targetTPS.Load(), seenTPSs) + + s.muRates.Lock() + defer s.muRates.Unlock() + s.lowestRate = 1 + for i, sig := range sigs { + seenTPS := seenTPSs[i] + rate := 1.0 + if tpsPerSig < seenTPS && seenTPS > 0 { + rate = tpsPerSig / seenTPS + } + // capping increase rate to 20% + if prevRate, ok := s.rates[sig]; ok && prevRate != 0 { + if rate/prevRate > maxRateIncrease { + rate = prevRate * maxRateIncrease + } + } + if rate > 1.0 { + rate = 1.0 + } + // no traffic on this signature, clean it up from the sampler + if rate == 1.0 && seenTPS == 0 { + delete(s.seen, sig) + continue + } + if rate < s.lowestRate { + s.lowestRate = rate + } + rates[sig] = rate + } + s.rates = rates +} + +// computeTPSPerSig distributes TPS looking at the seenTPS of all signatures. +// By default it spreads uniformly the TPS on all signatures. If a signature +// is low volume and does not use all of its TPS, the remaining is spread uniformly +// on all other signatures. +func computeTPSPerSig(targetTPS float64, seen []float64) float64 { + sorted := make([]float64, len(seen)) + copy(sorted, seen) + sort.Float64s(sorted) + + sigTarget := targetTPS / float64(len(sorted)) + + for i, c := range sorted { + if c >= sigTarget || i == len(sorted)-1 { + break + } + targetTPS -= c + sigTarget = targetTPS / float64((len(sorted) - i - 1)) + } + return sigTarget +} + +// zeroAndGetMax zeroes expired buckets and returns the max count +func zeroAndGetMax(buckets [numBuckets]float32, previousBucket, newBucket int64) (float32, [numBuckets]float32) { + maxBucket := float32(0) + for i := previousBucket + 1; i <= previousBucket+numBuckets; i++ { + index := i % numBuckets + + // if a complete rotation happened between previousBucket and newBucket + // all buckets will be zeroed + if i < newBucket { + buckets[index] = 0 + continue + } + + value := buckets[index] + if value > maxBucket { + maxBucket = value + } + + // zeroing after taking in account the previous value of the bucket + // overridden by this rotation. This allows to take in account all buckets + if i == newBucket { + buckets[index] = 0 + } + } + return maxBucket, buckets +} + +// getSignatureSampleRate returns the sampling rate to apply to a signature +func (s *Sampler) getSignatureSampleRate(sig Signature) float64 { + s.muRates.RLock() + rate, ok := s.rates[sig] + s.muRates.RUnlock() + if !ok { + return s.defaultRate() + } + return rate * s.extraRate +} + +// getAllSignatureSampleRates returns the sampling rate to apply to each signature +func (s *Sampler) getAllSignatureSampleRates() (map[Signature]float64, float64) { + s.muRates.RLock() + rates := make(map[Signature]float64, len(s.rates)) + for sig, val := range s.rates { + rates[sig] = val * s.extraRate + } + s.muRates.RUnlock() + return rates, s.defaultRate() +} + +// defaultRate returns the rate to apply to unknown signatures. It's computed by considering +// the moving max of all Sigs seen by the sampler, and the lowest rate stored. +// Callers of defaultRate must hold a RLock on s.muRates +func (s *Sampler) defaultRate() float64 { + targetTPS := s.targetTPS.Load() + if targetTPS == 0 { + return 0 + } + + var maxSeen float32 + s.muSeen.RLock() + defer s.muSeen.RUnlock() + for _, c := range s.allSigsSeen { + if c > maxSeen { + maxSeen = c + } + } + seenTPS := float64(maxSeen) / bucketDuration.Seconds() + + rate := 1.0 + if targetTPS < seenTPS && seenTPS > 0 { + rate = targetTPS / seenTPS + } + if s.lowestRate < rate && s.lowestRate != 0 { + return s.lowestRate + } + return rate +} + +func (s *Sampler) size() int64 { + s.muSeen.RLock() + defer s.muSeen.RUnlock() + return int64(len(s.seen)) +} + +func (s *Sampler) report(statsd statsd.ClientInterface, name Name) { + _ = statsd.Gauge(MetricSamplerSize, float64(s.size()), []string{"sampler:" + name.String()}, 1) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/dynamic_config.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/dynamic_config.go new file mode 100644 index 00000000..792a2108 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/dynamic_config.go @@ -0,0 +1,115 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package sampler + +import ( + "math" + "strconv" + "sync" + "time" + + "go.uber.org/atomic" +) + +// DynamicConfig contains configuration items which may change +// dynamically over time. +type DynamicConfig struct { + // RateByService contains the rate for each service/env tuple, + // used in priority sampling by client libs. + RateByService RateByService +} + +// NewDynamicConfig creates a new dynamic config object which maps service signatures +// to their corresponding sampling rates. Each service will have a default assigned +// matching the service rate of the specified env. +func NewDynamicConfig() *DynamicConfig { + return &DynamicConfig{RateByService: RateByService{}} +} + +// State specifies the current state of DynamicConfig +type State struct { + Rates map[string]float64 + Version string +} + +// rc specifies a pair of rate and color. +// color is used for detecting changes. +type rc struct { + r float64 + c int8 +} + +// RateByService stores the sampling rate per service. It is thread-safe, so +// one can read/write on it concurrently, using getters and setters. +type RateByService struct { + mu sync.RWMutex // guards rates + // currentColor is either 0 or 1. And, it changes every time `SetAll()` is called. + // When `SetAll()` is called, we paint affected keys with `currentColor`. + // If there is a key has a color doesn't match `currentColor`, it means that key no longer exists. + currentColor int8 + rates map[string]*rc + version string +} + +// SetAll the sampling rate for all services. If a service/env is not +// in the map, then the entry is removed. +func (rbs *RateByService) SetAll(rates map[ServiceSignature]float64) { + rbs.mu.Lock() + defer rbs.mu.Unlock() + + rbs.currentColor = 1 - rbs.currentColor + changed := false + if rbs.rates == nil { + rbs.rates = make(map[string]*rc, len(rates)) + } + for s, r := range rates { + ks := s.String() + r = math.Min(math.Max(r, 0), 1) + if oldV, ok := rbs.rates[ks]; !ok || oldV.r != r { + changed = true + rbs.rates[ks] = &rc{ + r: r, + } + } + rbs.rates[ks].c = rbs.currentColor + } + for k, v := range rbs.rates { + if v.c != rbs.currentColor { + changed = true + delete(rbs.rates, k) + } + } + if changed { + rbs.version = newVersion() + } +} + +// GetNewState returns the current state if the given version is different from the local version. +func (rbs *RateByService) GetNewState(version string) State { + rbs.mu.RLock() + defer rbs.mu.RUnlock() + + if version != "" && version == rbs.version { + return State{ + Version: version, + } + } + ret := State{ + Rates: make(map[string]float64, len(rbs.rates)), + Version: rbs.version, + } + for k, v := range rbs.rates { + ret.Rates[k] = v.r + } + + return ret +} + +var localVersion atomic.Int64 + +func newVersion() string { + return strconv.FormatInt(time.Now().Unix(), 16) + "-" + strconv.FormatInt(localVersion.Inc(), 16) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/env.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/env.go new file mode 100644 index 00000000..0978a606 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/env.go @@ -0,0 +1,23 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022-present Datadog, Inc. + +package sampler + +// tracers with an env value of "" or agentEnv share +// the same sampler. This is required as remote is unaware +// of agentEnv and tracerEnv different values +func toSamplerEnv(tracerEnv, agentEnv string) string { + env := tracerEnv + if env == "" { + env = agentEnv + } + return env +} + +// tracers with empty env will have the same rate given +// as tracers with agentEnv +func rateWithEmptyEnv(samplerEnv, agentEnv string) bool { + return samplerEnv == agentEnv +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/metrics.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/metrics.go new file mode 100644 index 00000000..a9523347 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/metrics.go @@ -0,0 +1,204 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package sampler + +import ( + "sync" + "time" + + "github.com/DataDog/datadog-agent/pkg/trace/watchdog" + "github.com/DataDog/datadog-go/v5/statsd" +) + +const ( + // MetricSamplerSeen is the metric name for the number of traces seen by the sampler. + MetricSamplerSeen = "datadog.trace_agent.sampler.seen" + // MetricSamplerKept is the metric name for the number of traces kept by the sampler. + MetricSamplerKept = "datadog.trace_agent.sampler.kept" + // MetricSamplerSize is the current number of unique trace signatures tracked for stats calculation. + MetricSamplerSize = "datadog.trace_agent.sampler.size" +) + +// Name represents the name of the sampler. +type Name uint8 + +const ( + // NameUnknown is the default value. It should not be used. + NameUnknown Name = iota + // NamePriority is the name of the priority sampler. + NamePriority + // NameNoPriority is the name of the no priority sampler. + NameNoPriority + // NameError is the name of the error sampler. + NameError + // NameRare is the name of the rare sampler. + NameRare + // NameProbabilistic is the name of the probabilistic sampler. + NameProbabilistic +) + +// String returns the string representation of the Name. +func (n Name) String() string { + switch n { + case NamePriority: + return "priority" + case NameNoPriority: + return "no_priority" + case NameError: + return "error" + case NameRare: + return "rare" + case NameProbabilistic: + return "probabilistic" + default: + return "unknown" + } +} + +func (n Name) shouldAddEnvTag() bool { + return n == NamePriority || n == NameNoPriority || n == NameRare || n == NameError +} + +// Metrics is a structure to record metrics for the different samplers. +type Metrics struct { + statsd statsd.ClientInterface + valueMutex sync.Mutex + value map[MetricsKey]metricsValue + additionalReporters []AdditionalMetricsReporter + startMutex sync.Mutex + ticker *time.Ticker + started bool +} + +type metricsValue struct { + seen int64 + kept int64 +} + +// NewMetrics creates a new Metrics. +func NewMetrics(statsd statsd.ClientInterface) *Metrics { + return &Metrics{ + statsd: statsd, + value: make(map[MetricsKey]metricsValue), + } +} + +// AdditionalMetricsReporter reports additional sampler metrics. +// Metrics reported through this interface are reported at each Metrics tick. +type AdditionalMetricsReporter interface { + report(statsd statsd.ClientInterface) +} + +// Add sampler metrics reporter. +func (m *Metrics) Add(mr ...AdditionalMetricsReporter) { + m.additionalReporters = append(m.additionalReporters, mr...) +} + +// MetricsKey represents the key for the metrics. +type MetricsKey struct { + targetService string + targetEnv string + samplingPriority SamplingPriority + sampler Name +} + +// NewMetricsKey creates a new MetricsKey. +func NewMetricsKey(service, env string, sampler Name, samplingPriority SamplingPriority) MetricsKey { + mk := MetricsKey{ + targetService: service, + targetEnv: env, + sampler: sampler, + } + if sampler == NamePriority { + mk.samplingPriority = samplingPriority + } + return mk +} + +func (k MetricsKey) tags() []string { + tags := make([]string, 0, 4) // Pre-allocate number of fields for efficiency + tags = append(tags, "sampler:"+k.sampler.String()) + if k.sampler == NamePriority { + tags = append(tags, "sampling_priority:"+k.samplingPriority.tagValue()) + } + if k.targetService != "" { + tags = append(tags, "target_service:"+k.targetService) + } + if k.targetEnv != "" && k.sampler.shouldAddEnvTag() { + tags = append(tags, "target_env:"+k.targetEnv) + } + return tags +} + +// RecordMetricsKey records if metricsKey has been seen before and whether it was kept or not. +func (m *Metrics) RecordMetricsKey(sampled bool, metricsKey MetricsKey) { + m.valueMutex.Lock() + defer m.valueMutex.Unlock() + v, ok := m.value[metricsKey] + if !ok { + mv := metricsValue{seen: 1} + if sampled { + mv.kept = 1 + } + m.value[metricsKey] = mv + return + } + v.seen++ + if sampled { + v.kept++ + } + m.value[metricsKey] = v +} + +// Start the metrics reporting loop. +func (m *Metrics) Start() { + m.startMutex.Lock() + defer m.startMutex.Unlock() + if m.started { + return + } + m.started = true + m.ticker = time.NewTicker(10 * time.Second) + go func() { + defer watchdog.LogOnPanic(m.statsd) + for range m.ticker.C { + m.Report() + } + }() +} + +// Stop the metrics reporting loop. +func (m *Metrics) Stop() { + m.startMutex.Lock() + if !m.started { + m.startMutex.Unlock() + return + } + m.started = false + m.ticker.Stop() + m.startMutex.Unlock() + m.Report() +} + +// Report reports the metrics and additional sampler metrics. +func (m *Metrics) Report() { + m.valueMutex.Lock() + for key, value := range m.value { + tags := key.tags() + if value.seen > 0 { + _ = m.statsd.Count(MetricSamplerSeen, value.seen, tags, 1) + } + if value.kept > 0 { + _ = m.statsd.Count(MetricSamplerKept, value.kept, tags, 1) + } + } + m.value = make(map[MetricsKey]metricsValue) // reset counters + m.valueMutex.Unlock() + + for _, mr := range m.additionalReporters { + mr.report(m.statsd) + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go new file mode 100644 index 00000000..2bbb0f33 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go @@ -0,0 +1,158 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package sampler contains all the logic of the agent-side trace sampling +// +// Currently implementation is based on the scoring of the "signature" of each trace +// Based on the score, we get a sample rate to apply to the given trace +// +// Current score implementation is super-simple, it is a counter with polynomial decay per signature. +// We increment it for each incoming trace then we periodically divide the score by two every X seconds. +// Right after the division, the score is an approximation of the number of received signatures over X seconds. +// It is different from the scoring in the Agent. +// +// Since the sampling can happen at different levels (client, agent, server) or depending on different rules, +// we have to track the sample rate applied at previous steps. This way, sampling twice at 50% can result in an +// effective 25% sampling. The rate is stored as a metric in the trace root. +package sampler + +import ( + "time" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/config" + "github.com/DataDog/datadog-go/v5/statsd" +) + +const ( + deprecatedRateKey = "_sampling_priority_rate_v1" + agentRateKey = "_dd.agent_psr" + ruleRateKey = "_dd.rule_psr" +) + +// PrioritySampler computes priority rates per tracerEnv, service to apply in a feedback loop with trace-agent clients. +// Computed rates are sent in http responses to trace-agent. The rates are continuously adjusted in function +// of the received traffic to match a targetTPS (target traces per second). +type PrioritySampler struct { + agentEnv string + // sampler targetTPS is defined locally on the agent + // This sampler tries to get the received number of sampled trace chunks/s to match its targetTPS. + sampler *Sampler + + // rateByService contains the sampling rates in % to communicate with trace-agent clients. + // This struct is shared with the agent API which sends the rates in http responses to spans post requests + rateByService *RateByService + catalog *serviceKeyCatalog +} + +// NewPrioritySampler returns an initialized Sampler +func NewPrioritySampler(conf *config.AgentConfig, dynConf *DynamicConfig) *PrioritySampler { + s := &PrioritySampler{ + agentEnv: conf.DefaultEnv, + sampler: newSampler(conf.ExtraSampleRate, conf.TargetTPS), + rateByService: &dynConf.RateByService, + catalog: newServiceLookup(conf.MaxCatalogEntries), + } + return s +} + +var _ AdditionalMetricsReporter = (*PrioritySampler)(nil) + +func (s *PrioritySampler) report(statsd statsd.ClientInterface) { + s.sampler.report(statsd, NamePriority) +} + +// UpdateTargetTPS updates the target tps +func (s *PrioritySampler) UpdateTargetTPS(targetTPS float64) { + s.sampler.updateTargetTPS(targetTPS) +} + +// GetTargetTPS returns the target tps +func (s *PrioritySampler) GetTargetTPS() float64 { + return s.sampler.targetTPS.Load() +} + +// update sampling rates +func (s *PrioritySampler) updateRates() { + s.rateByService.SetAll(s.ratesByService()) +} + +// Sample counts an incoming trace and returns the trace sampling decision and the applied sampling rate +func (s *PrioritySampler) Sample(now time.Time, trace *pb.TraceChunk, root *pb.Span, tracerEnv string, clientDroppedP0sWeight float64) bool { + // Extra safety, just in case one trace is empty + if len(trace.Spans) == 0 { + return false + } + + samplingPriority, _ := GetSamplingPriority(trace) + // Regardless of rates, sampling here is based on the metadata set + // by the client library. Which, is turn, is based on agent hints, + // but the rule of thumb is: respect client choice. + sampled := samplingPriority.IsKeep() + + serviceSignature := ServiceSignature{Name: root.Service, Env: toSamplerEnv(tracerEnv, s.agentEnv)} + + // Short-circuit and return without counting the trace in the sampling rate logic + // if its value has not been set automatically by the client lib. + // The feedback loop should be scoped to the values it can act upon. + if samplingPriority < 0 { + return sampled + } + if samplingPriority > 1 { + return sampled + } + + signature := s.catalog.register(serviceSignature) + + // Update sampler state by counting this trace + s.countSignature(now, root, signature, clientDroppedP0sWeight) + + if sampled { + s.applyRate(root, signature) + } + return sampled +} + +func (s *PrioritySampler) applyRate(root *pb.Span, signature Signature) float64 { + if root.ParentID != 0 { + return 1.0 + } + // recent tracers annotate roots with applied priority rate + // agentRateKey is set when the agent computed rate is applied + if rate, ok := getMetric(root, agentRateKey); ok { + return rate + } + // ruleRateKey is set when a tracer rule rate is applied + if rate, ok := getMetric(root, ruleRateKey); ok { + return rate + } + // slow path used by older tracer versions + // dd-trace-go used to set the rate in deprecatedRateKey + if rate, ok := getMetric(root, deprecatedRateKey); ok { + return rate + } + rate := s.sampler.getSignatureSampleRate(signature) + + setMetric(root, deprecatedRateKey, rate) + + return rate +} + +// countSignature counts all chunks received with local chunk root signature. +func (s *PrioritySampler) countSignature(now time.Time, root *pb.Span, signature Signature, clientDroppedP0Weight float64) { + rootWeight := weightRoot(root) + newRates := s.sampler.countWeightedSig(now, signature, rootWeight+float32(clientDroppedP0Weight)) + + if newRates { + s.updateRates() + } +} + +// ratesByService returns all rates by service, this information is useful for +// agents to pick the right service rate. +func (s *PrioritySampler) ratesByService() map[ServiceSignature]float64 { + rates, defaultRate := s.sampler.getAllSignatureSampleRates() + return s.catalog.ratesByService(s.agentEnv, rates, defaultRate) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/probabilistic.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/probabilistic.go new file mode 100644 index 00000000..28102c3e --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/probabilistic.go @@ -0,0 +1,110 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022-present Datadog, Inc. + +package sampler + +import ( + "encoding/binary" + "encoding/hex" + "hash/fnv" + "strconv" + + "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/config" + "github.com/DataDog/datadog-agent/pkg/trace/log" +) + +const ( + // These constants exist to match the behavior of the OTEL probabilistic sampler. + // See: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/6229c6ad1c49e9cc4b41a8aab8cb5a94a7b82ea5/processor/probabilisticsamplerprocessor/tracesprocessor.go#L38-L42 + numProbabilisticBuckets = 0x4000 + bitMaskHashBuckets = numProbabilisticBuckets - 1 + percentageScaleFactor = numProbabilisticBuckets / 100.0 + + // probRateKey indicates the percentage sampling rate configured for the probabilistic sampler + probRateKey = "_dd.prob_sr" +) + +// ProbabilisticSampler is a sampler that overrides all other samplers, +// it deterministically samples incoming traces by a hash of their trace ID +type ProbabilisticSampler struct { + enabled bool + hashSeed []byte + scaledSamplingPercentage uint32 + samplingPercentage float64 + // fullTraceIDMode looks at the full 128-bit trace ID to make the sampling decision + // This can be useful when trying to run this probabilistic sampler alongside the + // OTEL probabilistic sampler processor which always looks at the full 128-bit trace id. + // This is disabled by default to ensure compatibility in distributed systems where legacy applications may + // drop the top 64 bits of the trace ID. + fullTraceIDMode bool +} + +// NewProbabilisticSampler returns a new ProbabilisticSampler that deterministically samples +// a given percentage of incoming spans based on their trace ID +func NewProbabilisticSampler(conf *config.AgentConfig) *ProbabilisticSampler { + hashSeedBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(hashSeedBytes, conf.ProbabilisticSamplerHashSeed) + _, fullTraceIDMode := conf.Features["probabilistic_sampler_full_trace_id"] + return &ProbabilisticSampler{ + enabled: conf.ProbabilisticSamplerEnabled, + hashSeed: hashSeedBytes, + scaledSamplingPercentage: uint32(conf.ProbabilisticSamplerSamplingPercentage * percentageScaleFactor), + samplingPercentage: float64(conf.ProbabilisticSamplerSamplingPercentage) / 100., + fullTraceIDMode: fullTraceIDMode, + } +} + +// Sample a trace given the chunk's root span, returns true if the trace should be kept +func (ps *ProbabilisticSampler) Sample(root *trace.Span) bool { + if !ps.enabled { + return false + } + + tid := make([]byte, 16) + var err error + if !ps.fullTraceIDMode { + binary.BigEndian.PutUint64(tid, root.TraceID) + } else { + tid, err = get128BitTraceID(root) + } + if err != nil { + log.Errorf("Unable to probabilistically sample, failed to determine 128-bit trace ID from incoming span: %v", err) + return false + } + + hasher := fnv.New32a() + _, _ = hasher.Write(ps.hashSeed) + _, _ = hasher.Write(tid) + hash := hasher.Sum32() + keep := hash&bitMaskHashBuckets < ps.scaledSamplingPercentage + if keep { + setMetric(root, probRateKey, ps.samplingPercentage) + } + return keep +} + +func get128BitTraceID(span *trace.Span) ([]byte, error) { + // If it's an otel span the whole trace ID is in otel.trace + if tid, ok := span.Meta["otel.trace_id"]; ok { + bs, err := hex.DecodeString(tid) + if err != nil { + return nil, err + } + return bs, nil + } + tid := make([]byte, 16) + binary.BigEndian.PutUint64(tid[8:], span.TraceID) + // Get hex encoded upper bits for datadog spans + // If no value is found we can use the default `0` value as that's what will have been propagated + if upper, ok := span.Meta["_dd.p.tid"]; ok { + u, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + return nil, err + } + binary.BigEndian.PutUint64(tid[:8], u) + } + return tid, nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go new file mode 100644 index 00000000..aad09f58 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go @@ -0,0 +1,227 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package sampler + +import ( + "sync" + "time" + + "go.uber.org/atomic" + "golang.org/x/time/rate" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/config" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" + "github.com/DataDog/datadog-go/v5/statsd" +) + +const ( + // ttlRenewalPeriod specifies the frequency at which we will upload cached entries. + ttlRenewalPeriod = 1 * time.Minute + // rareSamplerBurst sizes the token store used by the rate limiter. + rareSamplerBurst = 50 + rareKey = "_dd.rare" + + // MetricsRareHits is the metric name for the number of traces kept by the rare sampler. + MetricsRareHits = "datadog.trace_agent.sampler.rare.hits" + // MetricsRareMisses is the metric name for the number of traces missed by the rare sampler. + MetricsRareMisses = "datadog.trace_agent.sampler.rare.misses" + // MetricsRareShrinks is the metric name for the number of times the rare sampler has shrunk. + MetricsRareShrinks = "datadog.trace_agent.sampler.rare.shrinks" +) + +// RareSampler samples traces that are not caught by the Priority sampler. +// It ensures that we sample traces for each combination of +// (env, service, name, resource, error type, http status) seen on a top level or measured span +// for which we did not see any span with a priority > 0 (sampled by Priority). +// The resulting sampled traces will likely be incomplete and will be flagged with +// a exceptioKey metric set at 1. +type RareSampler struct { + enabled *atomic.Bool + hits *atomic.Int64 + misses *atomic.Int64 + shrinks *atomic.Int64 + mu sync.RWMutex + + limiter *rate.Limiter + ttl time.Duration + cardinality int + seen map[Signature]*seenSpans +} + +// NewRareSampler returns a NewRareSampler that ensures that we sample combinations +// of env, service, name, resource, http-status, error type for each top level or measured spans +func NewRareSampler(conf *config.AgentConfig) *RareSampler { + e := &RareSampler{ + enabled: atomic.NewBool(conf.RareSamplerEnabled), + hits: atomic.NewInt64(0), + misses: atomic.NewInt64(0), + shrinks: atomic.NewInt64(0), + limiter: rate.NewLimiter(rate.Limit(conf.RareSamplerTPS), rareSamplerBurst), + ttl: conf.RareSamplerCooldownPeriod, + cardinality: conf.RareSamplerCardinality, + seen: make(map[Signature]*seenSpans), + } + return e +} + +// Sample a trace and returns true if trace was sampled (should be kept) +func (e *RareSampler) Sample(now time.Time, t *pb.TraceChunk, env string) bool { + + if !e.enabled.Load() { + return false + } + return e.handleTrace(now, env, t) +} + +// SetEnabled marks the sampler as enabled or disabled +func (e *RareSampler) SetEnabled(enabled bool) { + e.enabled.Store(enabled) +} + +// IsEnabled returns whether the sampler is enabled +func (e *RareSampler) IsEnabled() bool { + return e.enabled.Load() +} + +func (e *RareSampler) handlePriorityTrace(now time.Time, env string, t *pb.TraceChunk, ttl time.Duration) { + expire := now.Add(ttl) + for _, s := range t.Spans { + if !traceutil.HasTopLevel(s) && !traceutil.IsMeasured(s) { + continue + } + e.addSpan(expire, env, s) + } +} + +func (e *RareSampler) handleTrace(now time.Time, env string, t *pb.TraceChunk) bool { + var sampled bool + for _, s := range t.Spans { + if !traceutil.HasTopLevel(s) && !traceutil.IsMeasured(s) { + continue + } + if sampled = e.sampleSpan(now, env, s); sampled { + break + } + } + + if sampled { + e.handlePriorityTrace(now, env, t, e.ttl) + } + return sampled +} + +// addSpan adds a span to the seenSpans with an expire time. +func (e *RareSampler) addSpan(expire time.Time, env string, s *pb.Span) { + shardSig := ServiceSignature{env, s.Service}.Hash() + ss := e.loadSeenSpans(shardSig) + ss.add(expire, s) +} + +// sampleSpan samples a span if it's not in the seenSpan set. If the span is sampled +// it's added to the seenSpans set. +func (e *RareSampler) sampleSpan(now time.Time, env string, s *pb.Span) bool { + var sampled bool + shardSig := ServiceSignature{env, s.Service}.Hash() + ss := e.loadSeenSpans(shardSig) + sig := ss.sign(s) + expire, ok := ss.getExpire(sig) + if now.After(expire) || !ok { + sampled = e.limiter.Allow() + if sampled { + ss.add(now.Add(e.ttl), s) + e.hits.Inc() + traceutil.SetMetric(s, rareKey, 1) + } else { + e.misses.Inc() + } + } + return sampled +} + +func (e *RareSampler) loadSeenSpans(shardSig Signature) *seenSpans { + e.mu.RLock() + s, ok := e.seen[shardSig] + e.mu.RUnlock() + if ok { + return s + } + s = &seenSpans{ + expires: make(map[spanHash]time.Time), + totalSamplerShrinks: e.shrinks, + cardinality: e.cardinality, + } + e.mu.Lock() + e.seen[shardSig] = s + e.mu.Unlock() + return s +} + +func (e *RareSampler) report(statsd statsd.ClientInterface) { + _ = statsd.Count(MetricsRareHits, e.hits.Swap(0), nil, 1) + _ = statsd.Count(MetricsRareMisses, e.misses.Swap(0), nil, 1) + _ = statsd.Gauge(MetricsRareShrinks, float64(e.shrinks.Load()), nil, 1) +} + +// seenSpans keeps record of a set of spans. +type seenSpans struct { + mu sync.RWMutex + // expires contains expire time of each span seen. + expires map[spanHash]time.Time + // shrunk caracterize seenSpans when it's limited in size by capacityLimit. + shrunk bool + // totalSamplerShrinks is the reference to the total number of shrinks reported by RareSampler. + totalSamplerShrinks *atomic.Int64 + // cardinality limits the number of spans considered per combination of (env, service). + cardinality int +} + +func (ss *seenSpans) add(expire time.Time, s *pb.Span) { + sig := ss.sign(s) + storedExpire, ok := ss.getExpire(sig) + if ok && expire.Sub(storedExpire) < ttlRenewalPeriod { + return + } + // slow path + ss.mu.Lock() + ss.expires[sig] = expire + + // if cardinality limit reached, shrink + size := len(ss.expires) + if size > ss.cardinality { + ss.shrink() + } + ss.mu.Unlock() +} + +// shrink limits the cardinality of signatures considered and the memory usage. +// This ensure that a service with high cardinality of resources does not consume +// all sampling tokens. The cardinality limit matches a backend limit. +// This function is not thread safe and should be called between locks +func (ss *seenSpans) shrink() { + newExpires := make(map[spanHash]time.Time, ss.cardinality) + for h, expire := range ss.expires { + newExpires[h%spanHash(ss.cardinality)] = expire + } + ss.expires = newExpires + ss.shrunk = true + ss.totalSamplerShrinks.Inc() +} + +func (ss *seenSpans) getExpire(h spanHash) (time.Time, bool) { + ss.mu.RLock() + expire, ok := ss.expires[h] + ss.mu.RUnlock() + return expire, ok +} + +func (ss *seenSpans) sign(s *pb.Span) spanHash { + h := computeSpanHash(s, "", true) + if ss.shrunk { + h = h % spanHash(ss.cardinality) + } + return h +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/sampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/sampler.go new file mode 100644 index 00000000..aec01021 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/sampler.go @@ -0,0 +1,254 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package sampler contains all the logic of the agent-side trace sampling +package sampler + +import ( + "math" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" +) + +const ( + // KeySamplingRateGlobal is a metric key holding the global sampling rate. + KeySamplingRateGlobal = "_sample_rate" + + // KeySamplingRateClient is a metric key holding the client-set sampling rate for APM events. + KeySamplingRateClient = "_dd1.sr.rcusr" + + // KeySamplingRatePreSampler is a metric key holding the API rate limiter's rate for APM events. + KeySamplingRatePreSampler = "_dd1.sr.rapre" + + // KeySamplingRateEventExtraction is the key of the metric storing the event extraction rate on an APM event. + KeySamplingRateEventExtraction = "_dd1.sr.eausr" + + // KeySamplingRateMaxEPSSampler is the key of the metric storing the max eps sampler rate on an APM event. + KeySamplingRateMaxEPSSampler = "_dd1.sr.eamax" + + // KeyErrorType is the key of the error type in the meta map + KeyErrorType = "error.type" + + // KeyAnalyzedSpans is the metric key which specifies if a span is analyzed. + KeyAnalyzedSpans = "_dd.analyzed" + + // KeyHTTPStatusCode is the key of the http status code in the meta map + KeyHTTPStatusCode = "http.status_code" + + // KeySpanSamplingMechanism is the metric key holding a span sampling rule that a span was kept on. + KeySpanSamplingMechanism = "_dd.span_sampling.mechanism" +) + +// SamplingPriority is the type encoding a priority sampling decision. +type SamplingPriority int8 + +const ( + // PriorityNone is the value for SamplingPriority when no priority sampling decision could be found. + PriorityNone SamplingPriority = math.MinInt8 + + // PriorityUserDrop is the value set by a user to explicitly drop a trace. + PriorityUserDrop SamplingPriority = -1 + + // PriorityAutoDrop is the value set by a tracer to suggest dropping a trace. + PriorityAutoDrop SamplingPriority = 0 + + // PriorityAutoKeep is the value set by a tracer to suggest keeping a trace. + PriorityAutoKeep SamplingPriority = 1 + + // PriorityUserKeep is the value set by a user to explicitly keep a trace. + PriorityUserKeep SamplingPriority = 2 + + // 2^64 - 1 + maxTraceID = ^uint64(0) + maxTraceIDFloat = float64(maxTraceID) + // Good number for Knuth hashing (large, prime, fit in int64 for languages without uint64) + samplerHasher = uint64(1111111111111111111) +) + +// IsKeep returns whether the priority is "keep". +func (s SamplingPriority) IsKeep() bool { + return s == PriorityAutoKeep || s == PriorityUserKeep +} + +func (s SamplingPriority) tagValue() string { + switch s { + case PriorityUserDrop: + return "manual_drop" + case PriorityAutoDrop: + return "auto_drop" + case PriorityAutoKeep: + return "auto_keep" + case PriorityUserKeep: + return "manual_keep" + default: + return "none" + } +} + +// SampleByRate returns whether to keep a trace, based on its ID and a sampling rate. +// This assumes that trace IDs are nearly uniformly distributed. +func SampleByRate(traceID uint64, rate float64) bool { + if rate < 1 { + return traceID*samplerHasher < uint64(rate*maxTraceIDFloat) + } + return true +} + +// GetSamplingPriority returns the value of the sampling priority metric set on this span and a boolean indicating if +// such a metric was actually found or not. +func GetSamplingPriority(t *pb.TraceChunk) (SamplingPriority, bool) { + if t.Priority == int32(PriorityNone) { + return 0, false + } + return SamplingPriority(t.Priority), true +} + +// GetGlobalRate gets the cumulative sample rate of the trace to which this span belongs to. +func GetGlobalRate(s *pb.Span) float64 { + return getMetricDefault(s, KeySamplingRateGlobal, 1.0) +} + +// GetClientRate gets the rate at which the trace this span belongs to was sampled by the tracer. +// NOTE: This defaults to 1 if no rate is stored. +func GetClientRate(s *pb.Span) float64 { + return getMetricDefault(s, KeySamplingRateClient, 1.0) +} + +// SetClientRate sets the rate at which the trace this span belongs to was sampled by the tracer. +func SetClientRate(s *pb.Span, rate float64) { + if rate < 1 { + setMetric(s, KeySamplingRateClient, rate) + } else { + // We assume missing value is 1 to save bandwidth (check getter). + delete(s.Metrics, KeySamplingRateClient) + } +} + +// GetPreSampleRate returns the rate at which the trace this span belongs to was sampled by the agent's presampler. +// NOTE: This defaults to 1 if no rate is stored. +func GetPreSampleRate(s *pb.Span) float64 { + return getMetricDefault(s, KeySamplingRatePreSampler, 1.0) +} + +// SetPreSampleRate sets the rate at which the trace this span belongs to was sampled by the agent's presampler. +func SetPreSampleRate(s *pb.Span, rate float64) { + if rate < 1 { + setMetric(s, KeySamplingRatePreSampler, rate) + } else { + // We assume missing value is 1 to save bandwidth (check getter). + delete(s.Metrics, KeySamplingRatePreSampler) + } +} + +// GetEventExtractionRate gets the rate at which the trace from which we extracted this event was sampled at the tracer. +// This defaults to 1 if no rate is stored. +func GetEventExtractionRate(s *pb.Span) float64 { + return getMetricDefault(s, KeySamplingRateEventExtraction, 1.0) +} + +// SetEventExtractionRate sets the rate at which the trace from which we extracted this event was sampled at the tracer. +func SetEventExtractionRate(s *pb.Span, rate float64) { + if rate < 1 { + setMetric(s, KeySamplingRateEventExtraction, rate) + } else { + // reduce bandwidth, default is assumed 1.0 in backend + delete(s.Metrics, KeySamplingRateEventExtraction) + } +} + +// GetMaxEPSRate gets the rate at which this event was sampled by the max eps event sampler. +func GetMaxEPSRate(s *pb.Span) float64 { + return getMetricDefault(s, KeySamplingRateMaxEPSSampler, 1.0) +} + +// SetMaxEPSRate sets the rate at which this event was sampled by the max eps event sampler. +func SetMaxEPSRate(s *pb.Span, rate float64) { + if rate < 1 { + setMetric(s, KeySamplingRateMaxEPSSampler, rate) + } else { + // reduce bandwidth, default is assumed 1.0 in backend + delete(s.Metrics, KeySamplingRateMaxEPSSampler) + } +} + +// SetAnalyzedSpan marks a span analyzed +func SetAnalyzedSpan(s *pb.Span) { + setMetric(s, KeyAnalyzedSpans, 1) +} + +// IsAnalyzedSpan checks if a span is analyzed +func IsAnalyzedSpan(s *pb.Span) bool { + v, _ := getMetric(s, KeyAnalyzedSpans) + return v == 1 +} + +func weightRoot(s *pb.Span) float32 { + if s == nil { + return 1 + } + clientRate, ok := s.Metrics[KeySamplingRateGlobal] + if !ok || clientRate <= 0.0 || clientRate > 1.0 { + clientRate = 1 + } + preSamplerRate, ok := s.Metrics[KeySamplingRatePreSampler] + if !ok || preSamplerRate <= 0.0 || preSamplerRate > 1.0 { + preSamplerRate = 1 + } + return float32(1.0 / (preSamplerRate * clientRate)) +} + +func getMetric(s *pb.Span, k string) (float64, bool) { + if s.Metrics == nil { + return 0, false + } + val, ok := s.Metrics[k] + return val, ok +} + +// getMetricDefault gets a value in the span Metrics map or default if no value is stored there. +func getMetricDefault(s *pb.Span, k string, def float64) float64 { + if val, ok := getMetric(s, k); ok { + return val + } + return def +} + +// setMetric sets a value in the span Metrics map. +func setMetric(s *pb.Span, key string, val float64) { + if s.Metrics == nil { + s.Metrics = make(map[string]float64) + } + s.Metrics[key] = val +} + +// SingleSpanSampling does single span sampling on the trace, returning true if the trace was modified +func SingleSpanSampling(pt *traceutil.ProcessedTrace) bool { + ssSpans := getSingleSpanSampledSpans(pt) + if len(ssSpans) > 0 { + // Span sampling has kept some spans -> update the chunk + pt.TraceChunk.Spans = ssSpans + pt.TraceChunk.Priority = int32(PriorityUserKeep) + pt.TraceChunk.DroppedTrace = false + return true + } + return false +} + +// GetSingleSpanSampledSpans searches chunk for spans that have a span sampling tag set and returns them. +func getSingleSpanSampledSpans(pt *traceutil.ProcessedTrace) []*pb.Span { + var sampledSpans []*pb.Span + for _, span := range pt.TraceChunk.Spans { + if _, ok := traceutil.GetMetric(span, KeySpanSamplingMechanism); ok { + // Keep only those spans that have a span sampling tag. + sampledSpans = append(sampledSpans, span) + } + } + if sampledSpans == nil { + // No span sampling tags → no span sampling. + return nil + } + return sampledSpans +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go new file mode 100644 index 00000000..73d78871 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go @@ -0,0 +1,133 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package sampler + +import ( + "sync" + "time" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/config" + "github.com/DataDog/datadog-go/v5/statsd" +) + +const ( + errorsRateKey = "_dd.errors_sr" + noPriorityRateKey = "_dd.no_p_sr" + // shrinkCardinality is the max Signature cardinality before shrinking + shrinkCardinality = 200 +) + +// ErrorsSampler is dedicated to catching traces containing spans with errors. +type ErrorsSampler struct{ ScoreSampler } + +// NoPrioritySampler is dedicated to catching traces with no priority set. +type NoPrioritySampler struct{ ScoreSampler } + +// ScoreSampler samples pieces of traces by computing a signature based on spans (service, name, rsc, http.status, error.type) +// scoring it and applying a rate. +// The rates are applied on the TraceID to maximize the number of chunks with errors caught for the same traceID. +// For a set traceID: P(chunk1 kept and chunk2 kept) = min(P(chunk1 kept), P(chunk2 kept)) +type ScoreSampler struct { + *Sampler + samplingRateKey string + disabled bool + mu sync.Mutex + shrinkAllowList map[Signature]float64 +} + +// NewNoPrioritySampler returns an initialized Sampler dedicated to traces with +// no priority set. +func NewNoPrioritySampler(conf *config.AgentConfig) *NoPrioritySampler { + s := newSampler(conf.ExtraSampleRate, conf.TargetTPS) + return &NoPrioritySampler{ScoreSampler{Sampler: s, samplingRateKey: noPriorityRateKey}} +} + +var _ AdditionalMetricsReporter = (*NoPrioritySampler)(nil) + +func (s *NoPrioritySampler) report(statsd statsd.ClientInterface) { + s.Sampler.report(statsd, NameNoPriority) +} + +// NewErrorsSampler returns an initialized Sampler dedicate to errors. It behaves +// just like the normal ScoreEngine except for its GetType method (useful +// for reporting). +func NewErrorsSampler(conf *config.AgentConfig) *ErrorsSampler { + s := newSampler(conf.ExtraSampleRate, conf.ErrorTPS) + return &ErrorsSampler{ScoreSampler{Sampler: s, samplingRateKey: errorsRateKey, disabled: conf.ErrorTPS == 0}} +} + +var _ AdditionalMetricsReporter = (*ErrorsSampler)(nil) + +func (s *ErrorsSampler) report(statsd statsd.ClientInterface) { + s.Sampler.report(statsd, NameError) +} + +// Sample counts an incoming trace and tells if it is a sample which has to be kept +func (s *ScoreSampler) Sample(now time.Time, trace pb.Trace, root *pb.Span, env string) bool { + if s.disabled { + return false + } + + // Extra safety, just in case one trace is empty + if len(trace) == 0 { + return false + } + signature := computeSignatureWithRootAndEnv(trace, root, env) + signature = s.shrink(signature) + // Update sampler state by counting this trace + s.countWeightedSig(now, signature, weightRoot(root)) + + rate := s.getSignatureSampleRate(signature) + + sampled := s.applySampleRate(root, rate) + return sampled +} + +// UpdateTargetTPS updates the target tps +func (s *ScoreSampler) UpdateTargetTPS(targetTPS float64) { + s.Sampler.updateTargetTPS(targetTPS) +} + +// GetTargetTPS returns the target tps +func (s *ScoreSampler) GetTargetTPS() float64 { + return s.Sampler.targetTPS.Load() +} + +func (s *ScoreSampler) applySampleRate(root *pb.Span, rate float64) bool { + initialRate := GetGlobalRate(root) + newRate := initialRate * rate + traceID := root.TraceID + sampled := SampleByRate(traceID, newRate) + if sampled { + setMetric(root, s.samplingRateKey, rate) + } + return sampled +} + +// shrink limits the number of signatures stored in the sampler. +// After a cardinality above shrinkCardinality/2 is reached +// signatures are spread uniformly on a fixed set of values. +// This ensures that ScoreSamplers are memory capped. +// When the shrink is triggered, previously active signatures +// stay unaffected. +// New signatures may share the same TPS computation. +func (s *ScoreSampler) shrink(sig Signature) Signature { + s.mu.Lock() + defer s.mu.Unlock() + if s.size() < shrinkCardinality/2 { + s.shrinkAllowList = nil + return sig + } + if s.shrinkAllowList == nil { + rates, _ := s.getAllSignatureSampleRates() + s.shrinkAllowList = rates + } + if _, ok := s.shrinkAllowList[sig]; ok { + return sig + } + return sig % (shrinkCardinality / 2) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/signature.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/signature.go new file mode 100644 index 00000000..bca9a5f5 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/signature.go @@ -0,0 +1,122 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package sampler + +import ( + "sort" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" +) + +// Signature is a hash representation of trace or a service, used to identify +// similar signatures. +type Signature uint64 + +// spanHash is the type of the hashes used during the computation of a signature +// Use FNV for hashing since it is super-cheap and we have no cryptographic needs +type spanHash uint32 +type spanHashSlice []spanHash + +func (p spanHashSlice) Len() int { return len(p) } +func (p spanHashSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p spanHashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func sortHashes(hashes []spanHash) { sort.Sort(spanHashSlice(hashes)) } + +// computeSignatureWithRootAndEnv generates the signature of a trace knowing its root +// Signature based on the hash of (env, service, name, resource, is_error) for the root, plus the set of +// (env, service, name, is_error) of each span. +func computeSignatureWithRootAndEnv(trace pb.Trace, root *pb.Span, env string) Signature { + rootHash := computeSpanHash(root, env, true) + spanHashes := make([]spanHash, 0, len(trace)) + + for i := range trace { + spanHashes = append(spanHashes, computeSpanHash(trace[i], env, false)) + } + // Now sort, dedupe then merge all the hashes to build the signature + sortHashes(spanHashes) + + last := spanHashes[0] + traceHash := last ^ rootHash + for i := 1; i < len(spanHashes); i++ { + if spanHashes[i] != last { + last = spanHashes[i] + traceHash = spanHashes[i] ^ traceHash + } + } + + return Signature(traceHash) +} + +// ServiceSignature represents a unique way to identify a service. +type ServiceSignature struct{ Name, Env string } + +// Hash generates the signature of a trace with minimal information such as +// service and env, this is typically used by distributed sampling based on +// priority, and used as a key to store the desired rate for a given +// service,env tuple. +func (s ServiceSignature) Hash() Signature { + h := new32a() + h.Write([]byte(s.Name)) + h.WriteChar(',') + h.Write([]byte(s.Env)) + return Signature(h.Sum32()) +} + +func (s ServiceSignature) String() string { + return "service:" + s.Name + ",env:" + s.Env +} + +func computeSpanHash(span *pb.Span, env string, withResource bool) spanHash { + h := new32a() + h.Write([]byte(env)) + h.Write([]byte(span.Service)) + h.Write([]byte(span.Name)) + h.WriteChar(byte(span.Error)) + if withResource { + h.Write([]byte(span.Resource)) + } + code, ok := traceutil.GetMeta(span, KeyHTTPStatusCode) + if ok { + h.Write([]byte(code)) + } + typ, ok := traceutil.GetMeta(span, KeyErrorType) + if ok { + h.Write([]byte(typ)) + } + return spanHash(h.Sum32()) +} + +// sum32a is an adaptation of https://golang.org/pkg/hash/fnv/#New32a, but simplified +// for our use case to remove interfaces which caused unnecessary allocations. +type sum32a uint32 + +const ( + offset32 = 2166136261 + prime32 = 16777619 +) + +func new32a() sum32a { + return offset32 +} + +func (s *sum32a) Write(data []byte) { + hash := *s + for _, c := range data { + hash ^= sum32a(c) + hash *= prime32 + } + *s = hash +} + +func (s *sum32a) WriteChar(c byte) { + hash := *s + hash ^= sum32a(c) + hash *= prime32 + *s = hash +} + +func (s *sum32a) Sum32() uint32 { return uint32(*s) } diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go new file mode 100644 index 00000000..ba67bacd --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go @@ -0,0 +1,197 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package stats contains the logic to process APM stats. +package stats + +import ( + "hash/fnv" + "sort" + "strconv" + "strings" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/log" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" + "google.golang.org/genproto/googleapis/rpc/code" +) + +const ( + tagSynthetics = "synthetics" + tagSpanKind = "span.kind" + tagBaseService = "_dd.base_service" +) + +// Aggregation contains all the dimension on which we aggregate statistics. +type Aggregation struct { + BucketsAggregationKey + PayloadAggregationKey +} + +// BucketsAggregationKey specifies the key by which a bucket is aggregated. +type BucketsAggregationKey struct { + Service string + Name string + Resource string + Type string + SpanKind string + StatusCode uint32 + Synthetics bool + PeerTagsHash uint64 + IsTraceRoot pb.Trilean + GRPCStatusCode string +} + +// PayloadAggregationKey specifies the key by which a payload is aggregated. +type PayloadAggregationKey struct { + Env string + Hostname string + Version string + ContainerID string + GitCommitSha string + ImageTag string + ProcessTagsHash uint64 +} + +func getStatusCode(meta map[string]string, metrics map[string]float64) uint32 { + code, ok := metrics[traceutil.TagStatusCode] + if ok { + // only 7.39.0+, for lesser versions, always use Meta + return uint32(code) + } + strC := meta[traceutil.TagStatusCode] + if strC == "" { + return 0 + } + c, err := strconv.ParseUint(strC, 10, 32) + if err != nil { + log.Debugf("Invalid status code %s. Using 0.", strC) + return 0 + } + return uint32(c) +} + +// NewAggregationFromSpan creates a new aggregation from the provided span and env +func NewAggregationFromSpan(s *StatSpan, origin string, aggKey PayloadAggregationKey) Aggregation { + synthetics := strings.HasPrefix(origin, tagSynthetics) + var isTraceRoot pb.Trilean + if s.parentID == 0 { + isTraceRoot = pb.Trilean_TRUE + } else { + isTraceRoot = pb.Trilean_FALSE + } + agg := Aggregation{ + PayloadAggregationKey: aggKey, + BucketsAggregationKey: BucketsAggregationKey{ + Resource: s.resource, + Service: s.service, + Name: s.name, + SpanKind: s.spanKind, + Type: s.typ, + StatusCode: s.statusCode, + Synthetics: synthetics, + IsTraceRoot: isTraceRoot, + GRPCStatusCode: s.grpcStatusCode, + PeerTagsHash: tagsFnvHash(s.matchingPeerTags), + }, + } + return agg +} + +func processTagsHash(processTags string) uint64 { + if processTags == "" { + return 0 + } + return tagsFnvHash(strings.Split(processTags, ",")) +} + +func tagsFnvHash(tags []string) uint64 { + if len(tags) == 0 { + return 0 + } + if !sort.StringsAreSorted(tags) { + sort.Strings(tags) + } + h := fnv.New64a() + for i, t := range tags { + if i > 0 { + h.Write([]byte{0}) + } + h.Write([]byte(t)) + } + return h.Sum64() +} + +// NewAggregationFromGroup gets the Aggregation key of grouped stats. +func NewAggregationFromGroup(g *pb.ClientGroupedStats) Aggregation { + return Aggregation{ + BucketsAggregationKey: BucketsAggregationKey{ + Resource: g.Resource, + Service: g.Service, + Name: g.Name, + SpanKind: g.SpanKind, + StatusCode: g.HTTPStatusCode, + Synthetics: g.Synthetics, + PeerTagsHash: tagsFnvHash(g.PeerTags), + IsTraceRoot: g.IsTraceRoot, + GRPCStatusCode: g.GRPCStatusCode, + }, + } +} + +/* +The gRPC codes Google API checks for "CANCELLED". Sometimes we receive "Canceled" from upstream, +sometimes "CANCELLED", which is why both spellings appear in the map. +For multi-word codes, sometimes from upstream we receive them as one word, such as DeadlineExceeded. +Google's API checks for strings with an underscore and in all caps, and would only recognize codes +formatted like "ALREADY_EXISTS" or "DEADLINE_EXCEEDED" +*/ +var grpcStatusMap = map[string]string{ + "CANCELLED": "1", + "CANCELED": "1", + "INVALIDARGUMENT": "3", + "DEADLINEEXCEEDED": "4", + "NOTFOUND": "5", + "ALREADYEXISTS": "6", + "PERMISSIONDENIED": "7", + "RESOURCEEXHAUSTED": "8", + "FAILEDPRECONDITION": "9", + "OUTOFRANGE": "11", + "DATALOSS": "15", +} + +func getGRPCStatusCode(meta map[string]string, metrics map[string]float64) string { + // List of possible keys to check in order + statusCodeFields := []string{"rpc.grpc.status_code", "grpc.code", "rpc.grpc.status.code", "grpc.status.code"} + + for _, key := range statusCodeFields { + if strC, exists := meta[key]; exists && strC != "" { + c, err := strconv.ParseUint(strC, 10, 32) + if err == nil { + return strconv.FormatUint(c, 10) + } + strC = strings.TrimPrefix(strC, "StatusCode.") // Some tracers send status code values prefixed by "StatusCode." + strCUpper := strings.ToUpper(strC) + if statusCode, exists := grpcStatusMap[strCUpper]; exists { + return statusCode + } + + // If not integer or canceled or multi-word, check for valid gRPC status string + if codeNum, found := code.Code_value[strCUpper]; found { + return strconv.Itoa(int(codeNum)) + } + + return "" + } + } + + for _, key := range statusCodeFields { // Check if gRPC status code is stored in metrics + if code, ok := metrics[key]; ok { + return strconv.FormatUint(uint64(code), 10) + } + } + + return "" +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go new file mode 100644 index 00000000..a94037a4 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go @@ -0,0 +1,449 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package stats + +import ( + "time" + + "github.com/DataDog/datadog-agent/pkg/trace/version" + "github.com/DataDog/sketches-go/ddsketch" + "github.com/DataDog/sketches-go/ddsketch/mapping" + "github.com/DataDog/sketches-go/ddsketch/pb/sketchpb" + "github.com/DataDog/sketches-go/ddsketch/store" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/config" + "github.com/DataDog/datadog-agent/pkg/trace/log" + "github.com/DataDog/datadog-agent/pkg/trace/watchdog" + + "github.com/DataDog/datadog-go/v5/statsd" + + "google.golang.org/protobuf/proto" +) + +const ( + bucketDuration = 2 * time.Second + clientBucketDuration = 10 * time.Second + oldestBucketStart = 20 * time.Second +) + +var ( + ddsketchMapping, _ = mapping.NewLogarithmicMapping(relativeAccuracy) +) + +// ClientStatsAggregator aggregates client stats payloads on buckets of bucketDuration +// If a single payload is received on a bucket, this Aggregator is a passthrough. +// If two or more payloads collide, their counts will be aggregated into one bucket. +// Multiple payloads will be sent: +// - Original payloads with their distributions will be sent with counts zeroed. +// - A single payload with the bucket aggregated counts will be sent. +// This and the aggregator timestamp alignment ensure that all counts will have at most one point per second per agent for a specific granularity. +// While distributions are not tied to the agent. +type ClientStatsAggregator struct { + In chan *pb.ClientStatsPayload + writer Writer + buckets map[int64]*bucket // buckets used to aggregate client stats + conf *config.AgentConfig + + flushTicker *time.Ticker + oldestTs time.Time + agentEnv string + agentHostname string + agentVersion string + + exit chan struct{} + done chan struct{} + + statsd statsd.ClientInterface +} + +// NewClientStatsAggregator initializes a new aggregator ready to be started +func NewClientStatsAggregator(conf *config.AgentConfig, writer Writer, statsd statsd.ClientInterface) *ClientStatsAggregator { + c := &ClientStatsAggregator{ + flushTicker: time.NewTicker(time.Second), + In: make(chan *pb.ClientStatsPayload, 10), + buckets: make(map[int64]*bucket, 20), + conf: conf, + writer: writer, + agentEnv: conf.DefaultEnv, + agentHostname: conf.Hostname, + agentVersion: conf.AgentVersion, + oldestTs: alignAggTs(time.Now().Add(bucketDuration - oldestBucketStart)), + exit: make(chan struct{}), + done: make(chan struct{}), + statsd: statsd, + } + return c +} + +// Start starts the aggregator. +func (a *ClientStatsAggregator) Start() { + go func() { + defer watchdog.LogOnPanic(a.statsd) + for { + select { + case t := <-a.flushTicker.C: + a.flushOnTime(t) + case input := <-a.In: + a.add(time.Now(), input) + case <-a.exit: + a.flushAll() + close(a.done) + return + } + } + }() +} + +// Stop stops the aggregator. Calling Stop twice will panic. +func (a *ClientStatsAggregator) Stop() { + close(a.exit) + a.flushTicker.Stop() + <-a.done +} + +// flushOnTime flushes all buckets up to flushTs, except the last one. +func (a *ClientStatsAggregator) flushOnTime(now time.Time) { + flushTs := alignAggTs(now.Add(bucketDuration - oldestBucketStart)) + for t := a.oldestTs; t.Before(flushTs); t = t.Add(bucketDuration) { + if b, ok := a.buckets[t.Unix()]; ok { + a.flush(b.aggregationToPayloads()) + delete(a.buckets, t.Unix()) + } + } + a.oldestTs = flushTs +} + +func (a *ClientStatsAggregator) flushAll() { + for _, b := range a.buckets { + a.flush(b.aggregationToPayloads()) + } +} + +// getAggregationBucketTime returns unix time at which we aggregate the bucket. +// We timeshift payloads older than a.oldestTs to a.oldestTs. +// Payloads in the future are timeshifted to the latest bucket. +func (a *ClientStatsAggregator) getAggregationBucketTime(now, bs time.Time) time.Time { + if bs.Before(a.oldestTs) { + return a.oldestTs + } + if bs.After(now) { + return alignAggTs(now) + } + return alignAggTs(bs) +} + +// add takes a new ClientStatsPayload and aggregates its stats in the internal buckets. +func (a *ClientStatsAggregator) add(now time.Time, p *pb.ClientStatsPayload) { + // populate container tags data on the payload + a.setVersionDataFromContainerTags(p) + p.ProcessTagsHash = processTagsHash(p.ProcessTags) + // compute the PayloadAggregationKey, common for all buckets within the payload + payloadAggKey := newPayloadAggregationKey(p.Env, p.Hostname, p.Version, p.ContainerID, p.GitCommitSha, p.ImageTag, p.ProcessTagsHash) + + for _, clientBucket := range p.Stats { + clientBucketStart := time.Unix(0, int64(clientBucket.Start)) + ts := a.getAggregationBucketTime(now, clientBucketStart) + b, ok := a.buckets[ts.Unix()] + if !ok { + b = &bucket{ + ts: ts, + agg: make(map[PayloadAggregationKey]map[BucketsAggregationKey]*aggregatedStats), + processTags: make(map[uint64]string), + } + a.buckets[ts.Unix()] = b + } + b.processTags[p.ProcessTagsHash] = p.ProcessTags + b.aggregateStatsBucket(clientBucket, payloadAggKey) + } +} + +func (a *ClientStatsAggregator) flush(p []*pb.ClientStatsPayload) { + if len(p) == 0 { + return + } + + a.writer.Write(&pb.StatsPayload{ + Stats: p, + AgentEnv: a.agentEnv, + AgentHostname: a.agentHostname, + AgentVersion: a.agentVersion, + ClientComputed: true, + }) +} + +func (a *ClientStatsAggregator) setVersionDataFromContainerTags(p *pb.ClientStatsPayload) { + // No need to go any further if we already have the information in the payload. + if p.ImageTag != "" && p.GitCommitSha != "" { + return + } + if p.ContainerID != "" { + cTags, err := a.conf.ContainerTags(p.ContainerID) + if err != nil { + log.Error("Client stats aggregator is unable to resolve container ID (%s) to container tags: %v", p.ContainerID, err) + } else { + gitCommitSha, imageTag := version.GetVersionDataFromContainerTags(cTags) + // Only override if the payload's original values were empty strings. + if p.ImageTag == "" { + p.ImageTag = imageTag + } + if p.GitCommitSha == "" { + p.GitCommitSha = gitCommitSha + } + } + } +} + +// alignAggTs aligns time to the aggregator timestamps. +// Timestamps from the aggregator are never aligned with concentrator timestamps. +// This ensures that all counts sent by a same agent host are never on the same second. +// aggregator timestamps: 2ks+1s (1s, 3s, 5s, 7s, 9s, 11s) +// concentrator timestamps: 10ks (0s, 10s, 20s ..) +func alignAggTs(t time.Time) time.Time { + return t.Truncate(bucketDuration).Add(time.Second) +} + +type bucket struct { + // ts is the timestamp attached to the payload + ts time.Time + // agg contains the aggregated Hits/Errors/Duration counts + agg map[PayloadAggregationKey]map[BucketsAggregationKey]*aggregatedStats + processTags map[uint64]string +} + +// aggregateStatsBucket takes a ClientStatsBucket and a PayloadAggregationKey, and aggregates all counts +// and distributions from the ClientGroupedStats inside the bucket. +func (b *bucket) aggregateStatsBucket(sb *pb.ClientStatsBucket, payloadAggKey PayloadAggregationKey) { + payloadAgg, ok := b.agg[payloadAggKey] + if !ok { + payloadAgg = make(map[BucketsAggregationKey]*aggregatedStats, len(sb.Stats)) + b.agg[payloadAggKey] = payloadAgg + } + for _, gs := range sb.Stats { + if gs == nil { + continue + } + aggKey := newBucketAggregationKey(gs) + agg, ok := payloadAgg[aggKey] + if !ok { + agg = &aggregatedStats{ + hits: gs.Hits, + topLevelHits: gs.TopLevelHits, + errors: gs.Errors, + duration: gs.Duration, + peerTags: gs.PeerTags, + okDistributionRaw: gs.OkSummary, // store encoded version only + errDistributionRaw: gs.ErrorSummary, // store encoded version only + } + payloadAgg[aggKey] = agg + continue + } + + // aggregate counts + agg.hits += gs.Hits + agg.topLevelHits += gs.TopLevelHits + agg.errors += gs.Errors + agg.duration += gs.Duration + + // Decode, if needed, the raw ddsketches from the first payload that reached the bucket + if agg.okDistributionRaw != nil { + sketch, err := decodeSketch(agg.okDistributionRaw) + if err != nil { + log.Error("Unable to decode OK distribution ddsketch: %v", err) + } else { + agg.okDistribution = normalizeSketch(sketch) + } + agg.okDistributionRaw = nil + } + if agg.errDistributionRaw != nil { + sketch, err := decodeSketch(agg.errDistributionRaw) + if err != nil { + log.Error("Unable to decode Error distribution ddsketch: %v", err) + } else { + agg.errDistribution = normalizeSketch(sketch) + } + agg.errDistributionRaw = nil + } + + // aggregate distributions + if sketch, err := mergeSketch(agg.okDistribution, gs.OkSummary); err == nil { + agg.okDistribution = sketch + } else { + log.Error("Unable to merge OK distribution ddsketch: %v", err) + } + + if sketch, err := mergeSketch(agg.errDistribution, gs.ErrorSummary); err == nil { + agg.errDistribution = sketch + } else { + log.Error("Unable to merge Error distribution ddsketch: %v", err) + } + } +} + +// aggregationToPayloads converts the contents of the bucket into ClientStatsPayloads +func (b *bucket) aggregationToPayloads() []*pb.ClientStatsPayload { + res := make([]*pb.ClientStatsPayload, 0, len(b.agg)) + for payloadKey, aggrStats := range b.agg { + groupedStats := make([]*pb.ClientGroupedStats, 0, len(aggrStats)) + for aggrKey, stats := range aggrStats { + gs, err := exporGroupedStats(aggrKey, stats) + if err != nil { + log.Errorf("Dropping stats bucket due to encoding error: %v.", err) + continue + } + groupedStats = append(groupedStats, gs) + } + clientBuckets := []*pb.ClientStatsBucket{ + { + Start: uint64(b.ts.UnixNano()), + Duration: uint64(clientBucketDuration.Nanoseconds()), + Stats: groupedStats, + }} + res = append(res, &pb.ClientStatsPayload{ + Hostname: payloadKey.Hostname, + Env: payloadKey.Env, + Version: payloadKey.Version, + ImageTag: payloadKey.ImageTag, + GitCommitSha: payloadKey.GitCommitSha, + ContainerID: payloadKey.ContainerID, + Stats: clientBuckets, + ProcessTagsHash: payloadKey.ProcessTagsHash, + ProcessTags: b.processTags[payloadKey.ProcessTagsHash], + }) + } + return res +} + +func exporGroupedStats(aggrKey BucketsAggregationKey, stats *aggregatedStats) (*pb.ClientGroupedStats, error) { + // if the raw sketches are still present (only one payload received), we use them directly. + // Otherwise the aggregated DDSketches are serialized. + okSummary := stats.okDistributionRaw + errSummary := stats.errDistributionRaw + + var err error + if stats.okDistribution != nil { + msg := stats.okDistribution.ToProto() + okSummary, err = proto.Marshal(msg) + if err != nil { + return &pb.ClientGroupedStats{}, err + } + } + if stats.errDistribution != nil { + msg := stats.errDistribution.ToProto() + errSummary, err = proto.Marshal(msg) + if err != nil { + return &pb.ClientGroupedStats{}, err + } + } + return &pb.ClientGroupedStats{ + Service: aggrKey.Service, + Name: aggrKey.Name, + SpanKind: aggrKey.SpanKind, + Resource: aggrKey.Resource, + HTTPStatusCode: aggrKey.StatusCode, + Type: aggrKey.Type, + Synthetics: aggrKey.Synthetics, + IsTraceRoot: aggrKey.IsTraceRoot, + GRPCStatusCode: aggrKey.GRPCStatusCode, + PeerTags: stats.peerTags, + TopLevelHits: stats.topLevelHits, + Hits: stats.hits, + Errors: stats.errors, + Duration: stats.duration, + OkSummary: okSummary, + ErrorSummary: errSummary, + }, nil +} + +func newPayloadAggregationKey(env, hostname, version, cid, gitCommitSha, imageTag string, processTagsHash uint64) PayloadAggregationKey { + return PayloadAggregationKey{ + Env: env, + Hostname: hostname, + Version: version, + ContainerID: cid, + GitCommitSha: gitCommitSha, + ImageTag: imageTag, + ProcessTagsHash: processTagsHash, + } +} + +func newBucketAggregationKey(b *pb.ClientGroupedStats) BucketsAggregationKey { + k := BucketsAggregationKey{ + Service: b.Service, + Name: b.Name, + SpanKind: b.SpanKind, + Resource: b.Resource, + Type: b.Type, + Synthetics: b.Synthetics, + StatusCode: b.HTTPStatusCode, + GRPCStatusCode: b.GRPCStatusCode, + IsTraceRoot: b.IsTraceRoot, + } + if tags := b.GetPeerTags(); len(tags) > 0 { + k.PeerTagsHash = tagsFnvHash(tags) + } + return k +} + +// aggregatedStats holds aggregated counts and distributions +type aggregatedStats struct { + // aggregated counts + hits, topLevelHits, errors, duration uint64 + peerTags []string + + // aggregated DDSketches + okDistribution, errDistribution *ddsketch.DDSketch + + // raw (encoded) DDSketches. Only present if a single payload is received on the active bucket, + // allowing the bucket to not decode the sketch. If a second payload matches the bucket, + // sketches will be decoded and stored in the okDistribution and errDistribution fields. + okDistributionRaw, errDistributionRaw []byte +} + +// mergeSketch take an existing DDSketch, and merges a second one, decoding its contents +func mergeSketch(s1 *ddsketch.DDSketch, raw []byte) (*ddsketch.DDSketch, error) { + if raw == nil { + return s1, nil + } + + s2, err := decodeSketch(raw) + if err != nil { + return s1, err + } + s2 = normalizeSketch(s2) + + if s1 == nil { + return s2, nil + } + + if err = s1.MergeWith(s2); err != nil { + return nil, err + } + return s1, nil +} + +func normalizeSketch(s *ddsketch.DDSketch) *ddsketch.DDSketch { + if s.IndexMapping.Equals(ddsketchMapping) { + // already normalized + return s + } + + return s.ChangeMapping(ddsketchMapping, store.NewCollapsingLowestDenseStore(maxNumBins), store.NewCollapsingLowestDenseStore(maxNumBins), 1) +} + +func decodeSketch(data []byte) (*ddsketch.DDSketch, error) { + if len(data) == 0 { + return nil, nil + } + + var sketch sketchpb.DDSketch + err := proto.Unmarshal(data, &sketch) + if err != nil { + return nil, err + } + + return ddsketch.FromProto(&sketch) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/concentrator.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/concentrator.go new file mode 100644 index 00000000..a9e11e2d --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/concentrator.go @@ -0,0 +1,201 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package stats + +import ( + "sync" + "time" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/config" + "github.com/DataDog/datadog-agent/pkg/trace/log" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" + "github.com/DataDog/datadog-agent/pkg/trace/watchdog" + + "github.com/DataDog/datadog-go/v5/statsd" +) + +// defaultBufferLen represents the default buffer length; the number of bucket size +// units used by the concentrator. +const defaultBufferLen = 2 + +// Writer is an interface for something that can Write Stats Payloads +type Writer interface { + // Write this payload + Write(*pb.StatsPayload) +} + +// Concentrator produces time bucketed statistics from a stream of raw traces. +// https://en.wikipedia.org/wiki/Knelson_concentrator +// Gets an imperial shitton of traces, and outputs pre-computed data structures +// allowing to find the gold (stats) amongst the traces. +type Concentrator struct { + Writer Writer + + spanConcentrator *SpanConcentrator + // bucket duration in nanoseconds + bsize int64 + exit chan struct{} + exitWG sync.WaitGroup + cidStats bool + processStats bool + agentEnv string + agentHostname string + agentVersion string + statsd statsd.ClientInterface + peerTagKeys []string +} + +// NewConcentrator initializes a new concentrator ready to be started +func NewConcentrator(conf *config.AgentConfig, writer Writer, now time.Time, statsd statsd.ClientInterface) *Concentrator { + bsize := conf.BucketInterval.Nanoseconds() + sc := NewSpanConcentrator(&SpanConcentratorConfig{ + ComputeStatsBySpanKind: conf.ComputeStatsBySpanKind, + BucketInterval: bsize, + }, now) + _, disabledCIDStats := conf.Features["disable_cid_stats"] + _, disabledProcessStats := conf.Features["disable_process_stats"] + c := Concentrator{ + spanConcentrator: sc, + Writer: writer, + exit: make(chan struct{}), + cidStats: !disabledCIDStats, + processStats: !disabledProcessStats, + agentEnv: conf.DefaultEnv, + agentHostname: conf.Hostname, + agentVersion: conf.AgentVersion, + statsd: statsd, + bsize: bsize, + peerTagKeys: conf.ConfiguredPeerTags(), + } + return &c +} + +// Start starts the concentrator. +func (c *Concentrator) Start() { + c.exitWG.Add(1) + go func() { + defer watchdog.LogOnPanic(c.statsd) + defer c.exitWG.Done() + c.Run() + }() +} + +// Run runs the main loop of the concentrator goroutine. Traces are received +// through `Add`, this loop only deals with flushing. +func (c *Concentrator) Run() { + // flush with the same period as stats buckets + flushTicker := time.NewTicker(time.Duration(c.bsize) * time.Nanosecond) + defer flushTicker.Stop() + + log.Debug("Starting concentrator") + + for { + select { + case <-flushTicker.C: + c.Writer.Write(c.Flush(false)) + case <-c.exit: + log.Info("Exiting concentrator, computing remaining stats") + c.Writer.Write(c.Flush(true)) + return + } + } +} + +// Stop stops the main Run loop. +func (c *Concentrator) Stop() { + close(c.exit) + c.exitWG.Wait() +} + +// Input specifies a set of traces originating from a certain payload. +type Input struct { + Traces []traceutil.ProcessedTrace + ContainerID string + ContainerTags []string + ProcessTags string +} + +// NewStatsInput allocates a stats input for an incoming trace payload +func NewStatsInput(numChunks int, containerID string, clientComputedStats bool, processTags string) Input { + if clientComputedStats { + return Input{} + } + return Input{Traces: make([]traceutil.ProcessedTrace, 0, numChunks), ContainerID: containerID, ProcessTags: processTags} +} + +// Add applies the given input to the concentrator. +func (c *Concentrator) Add(t Input) { + tags := infraTags{ + containerID: t.ContainerID, + containerTags: t.ContainerTags, + processTagsHash: processTagsHash(t.ProcessTags), + processTags: t.ProcessTags, + } + for _, trace := range t.Traces { + c.addNow(&trace, tags) + } +} + +type infraTags struct { + containerID string + containerTags []string + processTagsHash uint64 + processTags string +} + +// addNow adds the given input into the concentrator. +// Callers must guard! +func (c *Concentrator) addNow(pt *traceutil.ProcessedTrace, tags infraTags) { + if !c.cidStats { + tags.containerID = "" + } + if !c.processStats { + tags.processTagsHash = 0 + tags.processTags = "" + } + hostname := pt.TracerHostname + if hostname == "" { + hostname = c.agentHostname + } + env := pt.TracerEnv + if env == "" { + env = c.agentEnv + } + weight := weight(pt.Root) + aggKey := PayloadAggregationKey{ + Env: env, + Hostname: hostname, + Version: pt.AppVersion, + ContainerID: tags.containerID, + GitCommitSha: pt.GitCommitSha, + ImageTag: pt.ImageTag, + ProcessTagsHash: tags.processTagsHash, + } + for _, s := range pt.TraceChunk.Spans { + statSpan, ok := c.spanConcentrator.NewStatSpanFromPB(s, c.peerTagKeys) + if ok { + c.spanConcentrator.addSpan(statSpan, aggKey, tags, pt.TraceChunk.Origin, weight) + } + } +} + +// Flush deletes and returns complete statistic buckets. +// The force boolean guarantees flushing all buckets if set to true. +func (c *Concentrator) Flush(force bool) *pb.StatsPayload { + return c.flushNow(time.Now().UnixNano(), force) +} + +func (c *Concentrator) flushNow(now int64, force bool) *pb.StatsPayload { + sb := c.spanConcentrator.Flush(now, force) + return &pb.StatsPayload{Stats: sb, AgentHostname: c.agentHostname, AgentEnv: c.agentEnv, AgentVersion: c.agentVersion} +} + +// alignTs returns the provided timestamp truncated to the bucket size. +// It gives us the start time of the time bucket in which such timestamp falls. +func alignTs(ts int64, bsize int64) int64 { + return ts - ts%bsize +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/otel_util.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/otel_util.go new file mode 100644 index 00000000..214e32e7 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/otel_util.go @@ -0,0 +1,162 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package stats + +import ( + "slices" + + "github.com/DataDog/datadog-agent/pkg/obfuscate" + "github.com/DataDog/datadog-agent/pkg/trace/log" + "github.com/DataDog/datadog-agent/pkg/trace/transform" + + "go.opentelemetry.io/collector/pdata/ptrace" + semconv "go.opentelemetry.io/collector/semconv/v1.17.0" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/config" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" +) + +// chunkKey is used to group TraceChunks +type chunkKey struct { + traceIDUInt64 uint64 + env string + version string + hostname string + cid string +} + +// OTLPTracesToConcentratorInputs converts eligible OTLP spans to Concentrator.Input. +// The converted Inputs only have the minimal number of fields for APM stats calculation and are only meant +// to be used in Concentrator.Add(). Do not use them for other purposes. +func OTLPTracesToConcentratorInputs( + traces ptrace.Traces, + conf *config.AgentConfig, + containerTagKeys []string, + peerTagKeys []string, +) []Input { + return OTLPTracesToConcentratorInputsWithObfuscation(traces, conf, containerTagKeys, peerTagKeys, nil) +} + +// OTLPTracesToConcentratorInputsWithObfuscation converts eligible OTLP spans to Concentrator Input. +// The converted Inputs only have the minimal number of fields for APM stats calculation and are only meant +// to be used in Concentrator.Add(). Do not use them for other purposes. +// This function enables obfuscation of spans prior to stats calculation and datadogconnector will migrate +// to this function once this function is published as part of latest pkg/trace module. +func OTLPTracesToConcentratorInputsWithObfuscation( + traces ptrace.Traces, + conf *config.AgentConfig, + containerTagKeys []string, + peerTagKeys []string, + obfuscator *obfuscate.Obfuscator, +) []Input { + spanByID, resByID, scopeByID := traceutil.IndexOTelSpans(traces) + topLevelByKind := conf.HasFeature("enable_otlp_compute_top_level_by_span_kind") + topLevelSpans := traceutil.GetTopLevelOTelSpans(spanByID, resByID, topLevelByKind) + ignoreResNames := make(map[string]struct{}) + for _, resName := range conf.Ignore["resource"] { + ignoreResNames[resName] = struct{}{} + } + chunks := make(map[chunkKey]*pb.TraceChunk) + containerTagsByID := make(map[string][]string) + for spanID, otelspan := range spanByID { + otelres := resByID[spanID] + var resourceName string + if transform.OperationAndResourceNameV2Enabled(conf) { + resourceName = traceutil.GetOTelResourceV2(otelspan, otelres) + } else { + resourceName = traceutil.GetOTelResourceV1(otelspan, otelres) + } + if _, exists := ignoreResNames[resourceName]; exists { + continue + } + env := traceutil.GetOTelEnv(otelres) + hostname := traceutil.GetOTelHostname(otelspan, otelres, conf.OTLPReceiver.AttributesTranslator, conf.Hostname) + version := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeServiceVersion) + cid := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeContainerID, semconv.AttributeK8SPodUID) + var ctags []string + if cid != "" { + ctags = traceutil.GetOTelContainerTags(otelres.Attributes(), containerTagKeys) + if conf.ContainerTags != nil { + tags, err := conf.ContainerTags(cid) + if err != nil { + log.Debugf("Failed to get container tags for container %q: %v", cid, err) + } else { + log.Tracef("Getting container tags for ID %q: %v", cid, tags) + ctags = append(ctags, tags...) + } + } + if ctags != nil { + // Make sure container tags are sorted per APM stats intake requirement + if !slices.IsSorted(ctags) { + slices.Sort(ctags) + } + containerTagsByID[cid] = ctags + } + } + ckey := chunkKey{ + traceIDUInt64: traceutil.OTelTraceIDToUint64(otelspan.TraceID()), + env: env, + version: version, + hostname: hostname, + cid: cid, + } + chunk, ok := chunks[ckey] + if !ok { + chunk = &pb.TraceChunk{} + chunks[ckey] = chunk + } + _, isTop := topLevelSpans[spanID] + ddSpan := transform.OtelSpanToDDSpanMinimal(otelspan, otelres, scopeByID[spanID], isTop, topLevelByKind, conf, peerTagKeys) + if obfuscator != nil { + obfuscateSpanForConcentrator(obfuscator, ddSpan, conf) + } + chunk.Spans = append(chunk.Spans, ddSpan) + } + + inputs := make([]Input, 0, len(chunks)) + for ckey, chunk := range chunks { + pt := traceutil.ProcessedTrace{ + TraceChunk: chunk, + Root: traceutil.GetRoot(chunk.Spans), + TracerEnv: ckey.env, + AppVersion: ckey.version, + TracerHostname: ckey.hostname, + } + inputs = append(inputs, Input{ + Traces: []traceutil.ProcessedTrace{pt}, + ContainerID: ckey.cid, + ContainerTags: containerTagsByID[ckey.cid], + }) + } + return inputs +} + +func obfuscateSpanForConcentrator(o *obfuscate.Obfuscator, span *pb.Span, conf *config.AgentConfig) { + if span.Meta == nil { + return + } + switch span.Type { + case "sql", "cassandra": + _, err := transform.ObfuscateSQLSpan(o, span) + if err != nil { + log.Debugf("Error parsing SQL query: %v. Resource: %q", err, span.Resource) + } + case "redis": + span.Resource = o.QuantizeRedisString(span.Resource) + if conf.Obfuscation.Redis.Enabled { + transform.ObfuscateRedisSpan(o, span, conf.Obfuscation.Redis.RemoveAllArgs) + } + } +} + +// newTestObfuscator creates a new obfuscator for testing +func newTestObfuscator(conf *config.AgentConfig) *obfuscate.Obfuscator { + oconf := conf.Obfuscation.Export(conf) + oconf.Redis.Enabled = true + o := obfuscate.NewObfuscator(oconf) + return o +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/span_concentrator.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/span_concentrator.go new file mode 100644 index 00000000..dec86102 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/span_concentrator.go @@ -0,0 +1,261 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package stats + +import ( + "strings" + "sync" + "time" + + "github.com/DataDog/datadog-agent/pkg/obfuscate" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/log" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" +) + +// SpanConcentratorConfig exposes configuration options for a SpanConcentrator +type SpanConcentratorConfig struct { + // ComputeStatsBySpanKind enables/disables the computing of stats based on a span's `span.kind` field + ComputeStatsBySpanKind bool + // BucketInterval the size of our pre-aggregation per bucket + BucketInterval int64 +} + +// StatSpan holds all the required fields from a span needed to calculate stats +type StatSpan struct { + service string + resource string + name string + typ string + error int32 + parentID uint64 + start int64 + duration int64 + + //Fields below this are derived on creation + + spanKind string + statusCode uint32 + isTopLevel bool + matchingPeerTags []string + grpcStatusCode string +} + +func matchingPeerTags(meta map[string]string, peerTagKeys []string) []string { + if len(peerTagKeys) == 0 { + return nil + } + var pt []string + for _, t := range peerTagKeysToAggregateForSpan(meta[tagSpanKind], meta[tagBaseService], peerTagKeys) { + if v, ok := meta[t]; ok && v != "" { + v = obfuscate.QuantizePeerIPAddresses(v) + pt = append(pt, t+":"+v) + } + } + return pt +} + +// peerTagKeysToAggregateForSpan returns the set of peerTagKeys to use for stats aggregation for the given +// span.kind and _dd.base_service +func peerTagKeysToAggregateForSpan(spanKind string, baseService string, peerTagKeys []string) []string { + if len(peerTagKeys) == 0 { + return nil + } + spanKind = strings.ToLower(spanKind) + if (spanKind == "" || spanKind == "internal") && baseService != "" { + // it's a service override on an internal span so it comes from custom instrumentation and does not represent + // a client|producer|consumer span which is talking to a peer entity + // in this case only the base service tag is relevant for stats aggregation + return []string{tagBaseService} + } + if spanKind == "client" || spanKind == "producer" || spanKind == "consumer" { + return peerTagKeys + } + return nil +} + +// SpanConcentrator produces time bucketed statistics from a stream of raw spans. +type SpanConcentrator struct { + computeStatsBySpanKind bool + // bucket duration in nanoseconds + bsize int64 + // Timestamp of the oldest time bucket for which we allow data. + // Any ingested stats older than it get added to this bucket. + oldestTs int64 + // bufferLen is the number of 10s stats bucket we keep in memory before flushing them. + // It means that we can compute stats only for the last `bufferLen * bsize` and that we + // wait such time before flushing the stats. + // This only applies to past buckets. Stats buckets in the future are allowed with no restriction. + bufferLen int + + // mu protects the buckets field + mu sync.Mutex + buckets map[int64]*RawBucket +} + +// NewSpanConcentrator builds a new SpanConcentrator object +func NewSpanConcentrator(cfg *SpanConcentratorConfig, now time.Time) *SpanConcentrator { + sc := &SpanConcentrator{ + computeStatsBySpanKind: cfg.ComputeStatsBySpanKind, + bsize: cfg.BucketInterval, + oldestTs: alignTs(now.UnixNano(), cfg.BucketInterval), + bufferLen: defaultBufferLen, + mu: sync.Mutex{}, + buckets: make(map[int64]*RawBucket), + } + return sc +} + +// NewStatSpanFromPB is a helper version of NewStatSpan that builds a StatSpan from a pb.Span. +func (sc *SpanConcentrator) NewStatSpanFromPB(s *pb.Span, peerTags []string) (statSpan *StatSpan, ok bool) { + return sc.NewStatSpan(s.Service, s.Resource, s.Name, s.Type, s.ParentID, s.Start, s.Duration, s.Error, s.Meta, s.Metrics, peerTags) +} + +// NewStatSpan builds a StatSpan from the required fields for stats calculation +// peerTags is the configured list of peer tags to look for +// returns (nil,false) if the provided fields indicate a span should not have stats calculated +func (sc *SpanConcentrator) NewStatSpan( + service, resource, name string, + typ string, + parentID uint64, + start, duration int64, + error int32, + meta map[string]string, + metrics map[string]float64, + peerTags []string, +) (statSpan *StatSpan, ok bool) { + if meta == nil { + meta = make(map[string]string) + } + if metrics == nil { + metrics = make(map[string]float64) + } + eligibleSpanKind := sc.computeStatsBySpanKind && computeStatsForSpanKind(meta["span.kind"]) + isTopLevel := traceutil.HasTopLevelMetrics(metrics) + if !(isTopLevel || traceutil.IsMeasuredMetrics(metrics) || eligibleSpanKind) { + return nil, false + } + if traceutil.IsPartialSnapshotMetrics(metrics) { + return nil, false + } + return &StatSpan{ + service: service, + resource: resource, + name: name, + typ: typ, + error: error, + parentID: parentID, + start: start, + duration: duration, + spanKind: meta[tagSpanKind], + statusCode: getStatusCode(meta, metrics), + isTopLevel: isTopLevel, + matchingPeerTags: matchingPeerTags(meta, peerTags), + + grpcStatusCode: getGRPCStatusCode(meta, metrics), + }, true +} + +// computeStatsForSpanKind returns true if the span.kind value makes the span eligible for stats computation. +func computeStatsForSpanKind(kind string) bool { + k := strings.ToLower(kind) + _, ok := KindsComputed[k] + return ok +} + +// KindsComputed is the list of span kinds that will have stats computed on them +// when computeStatsByKind is enabled in the concentrator. +var KindsComputed = map[string]struct{}{ + "server": {}, + "consumer": {}, + "client": {}, + "producer": {}, +} + +func (sc *SpanConcentrator) addSpan(s *StatSpan, aggKey PayloadAggregationKey, tags infraTags, origin string, weight float64) { + sc.mu.Lock() + defer sc.mu.Unlock() + end := s.start + s.duration + btime := max(end-end%sc.bsize, sc.oldestTs) + + b, ok := sc.buckets[btime] + if !ok { + b = NewRawBucket(uint64(btime), uint64(sc.bsize)) + sc.buckets[btime] = b + } + if tags.processTagsHash != 0 && len(tags.processTags) > 0 { + b.processTagsByHash[tags.processTagsHash] = tags.processTags + } + if tags.containerID != "" && len(tags.containerTags) > 0 { + b.containerTagsByID[tags.containerID] = tags.containerTags + } + b.HandleSpan(s, weight, origin, aggKey) +} + +// AddSpan to the SpanConcentrator, appending the new data to the appropriate internal bucket. +// todo:raphael migrate dd-trace-go API to not depend on containerID/containerTags and add processTags at encoding layer +func (sc *SpanConcentrator) AddSpan(s *StatSpan, aggKey PayloadAggregationKey, containerID string, containerTags []string, origin string) { + sc.addSpan(s, aggKey, infraTags{containerID: containerID, containerTags: containerTags}, origin, 1) +} + +// Flush deletes and returns complete ClientStatsPayloads. +// The force boolean guarantees flushing all buckets if set to true. +func (sc *SpanConcentrator) Flush(now int64, force bool) []*pb.ClientStatsPayload { + m := make(map[PayloadAggregationKey][]*pb.ClientStatsBucket) + containerTagsByID := make(map[string][]string) + processTagsByHash := make(map[uint64]string) + + sc.mu.Lock() + for ts, srb := range sc.buckets { + // Always keep `bufferLen` buckets (default is 2: current + previous one). + // This is a trade-off: we accept slightly late traces (clock skew and stuff) + // but we delay flushing by at most `bufferLen` buckets. + // + // This delay might result in not flushing stats payload (data loss) + // if the agent stops while the latest buckets aren't old enough to be flushed. + // The "force" boolean skips the delay and flushes all buckets, typically on agent shutdown. + if !force && ts > now-int64(sc.bufferLen)*sc.bsize { + log.Tracef("Bucket %d is not old enough to be flushed, keeping it", ts) + continue + } + log.Debugf("Flushing bucket %d", ts) + for k, b := range srb.Export() { + m[k] = append(m[k], b) + if ctags, ok := srb.containerTagsByID[k.ContainerID]; ok { + containerTagsByID[k.ContainerID] = ctags + } + if ptags, ok := srb.processTagsByHash[k.ProcessTagsHash]; ok { + processTagsByHash[k.ProcessTagsHash] = ptags + } + } + delete(sc.buckets, ts) + } + // After flushing, update the oldest timestamp allowed to prevent having stats for + // an already-flushed bucket. + newOldestTs := alignTs(now, sc.bsize) - int64(sc.bufferLen-1)*sc.bsize + if newOldestTs > sc.oldestTs { + log.Debugf("Update oldestTs to %d", newOldestTs) + sc.oldestTs = newOldestTs + } + sc.mu.Unlock() + sb := make([]*pb.ClientStatsPayload, 0, len(m)) + for k, s := range m { + p := &pb.ClientStatsPayload{ + Env: k.Env, + Hostname: k.Hostname, + ContainerID: k.ContainerID, + Version: k.Version, + GitCommitSha: k.GitCommitSha, + ImageTag: k.ImageTag, + Stats: s, + Tags: containerTagsByID[k.ContainerID], + ProcessTags: processTagsByHash[k.ProcessTagsHash], + ProcessTagsHash: k.ProcessTagsHash, + } + sb = append(sb, p) + } + return sb +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go new file mode 100644 index 00000000..85401860 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go @@ -0,0 +1,210 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package stats + +import ( + "math" + "math/rand" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/log" + + "github.com/golang/protobuf/proto" + + "github.com/DataDog/sketches-go/ddsketch" +) + +const ( + // relativeAccuracy is the value accuracy we have on the percentiles. For example, we can + // say that p99 is 100ms +- 1ms + relativeAccuracy = 0.01 + // maxNumBins is the maximum number of bins of the ddSketch we use to store percentiles. + // It can affect relative accuracy, but in practice, 2048 bins is enough to have 1% relative accuracy from + // 80 micro second to 1 year: http://www.vldb.org/pvldb/vol12/p2195-masson.pdf + maxNumBins = 2048 +) + +// Most "algorithm" stuff here is tested with stats_test.go as what is important +// is that the final data, the one with send after a call to Export(), is correct. + +type groupedStats struct { + // using float64 here to avoid the accumulation of rounding issues. + hits float64 + topLevelHits float64 + errors float64 + duration float64 + okDistribution *ddsketch.DDSketch + errDistribution *ddsketch.DDSketch + peerTags []string +} + +// round a float to an int, uniformly choosing +// between the lower and upper approximations. +func round(f float64) uint64 { + i := uint64(f) + if rand.Float64() < f-float64(i) { + i++ + } + return i +} + +func (s *groupedStats) export(a Aggregation) (*pb.ClientGroupedStats, error) { + msg := s.okDistribution.ToProto() + okSummary, err := proto.Marshal(msg) + if err != nil { + return &pb.ClientGroupedStats{}, err + } + msg = s.errDistribution.ToProto() + errSummary, err := proto.Marshal(msg) + if err != nil { + return &pb.ClientGroupedStats{}, err + } + return &pb.ClientGroupedStats{ + Service: a.Service, + Name: a.Name, + Resource: a.Resource, + HTTPStatusCode: a.StatusCode, + Type: a.Type, + Hits: round(s.hits), + Errors: round(s.errors), + Duration: round(s.duration), + TopLevelHits: round(s.topLevelHits), + OkSummary: okSummary, + ErrorSummary: errSummary, + Synthetics: a.Synthetics, + SpanKind: a.SpanKind, + PeerTags: s.peerTags, + IsTraceRoot: a.IsTraceRoot, + GRPCStatusCode: a.GRPCStatusCode, + }, nil +} + +func newGroupedStats() *groupedStats { + okSketch, err := ddsketch.LogCollapsingLowestDenseDDSketch(relativeAccuracy, maxNumBins) + if err != nil { + log.Errorf("Error when creating ddsketch: %v", err) + } + errSketch, err := ddsketch.LogCollapsingLowestDenseDDSketch(relativeAccuracy, maxNumBins) + if err != nil { + log.Errorf("Error when creating ddsketch: %v", err) + } + return &groupedStats{ + okDistribution: okSketch, + errDistribution: errSketch, + } +} + +// RawBucket is used to compute span data and aggregate it +// within a time-framed bucket. This should not be used outside +// the agent, use ClientStatsBucket for this. +type RawBucket struct { + // This should really have no public fields. At all. + + start uint64 // timestamp of start in our format + duration uint64 // duration of a bucket in nanoseconds + + // this should really remain private as it's subject to refactoring + data map[Aggregation]*groupedStats + + containerTagsByID map[string][]string // a map from container ID to container tags + processTagsByHash map[uint64]string // a map from process hash to process tags +} + +// NewRawBucket opens a new calculation bucket for time ts and initializes it properly +func NewRawBucket(ts, d uint64) *RawBucket { + // The only non-initialized value is the Duration which should be set by whoever closes that bucket + return &RawBucket{ + start: ts, + duration: d, + data: make(map[Aggregation]*groupedStats), + containerTagsByID: make(map[string][]string), + processTagsByHash: make(map[uint64]string), + } +} + +// Export transforms a RawBucket into a ClientStatsBucket, typically used +// before communicating data to the API, as RawBucket is the internal +// type while ClientStatsBucket is the public, shared one. +func (sb *RawBucket) Export() map[PayloadAggregationKey]*pb.ClientStatsBucket { + m := make(map[PayloadAggregationKey]*pb.ClientStatsBucket) + for k, v := range sb.data { + b, err := v.export(k) + if err != nil { + log.Errorf("Dropping stats bucket due to encoding error: %v.", err) + continue + } + key := PayloadAggregationKey{ + Hostname: k.Hostname, + Version: k.Version, + Env: k.Env, + ContainerID: k.ContainerID, + GitCommitSha: k.GitCommitSha, + ImageTag: k.ImageTag, + ProcessTagsHash: k.ProcessTagsHash, + } + s, ok := m[key] + if !ok { + s = &pb.ClientStatsBucket{ + Start: sb.start, + Duration: sb.duration, + } + } + s.Stats = append(s.Stats, b) + m[key] = s + } + return m +} + +// HandleSpan adds the span to this bucket stats, aggregated with the finest grain matching given aggregators +func (sb *RawBucket) HandleSpan(s *StatSpan, weight float64, origin string, aggKey PayloadAggregationKey) { + if aggKey.Env == "" { + panic("env should never be empty") + } + aggr := NewAggregationFromSpan(s, origin, aggKey) + sb.add(s, weight, aggr) +} + +func (sb *RawBucket) add(s *StatSpan, weight float64, aggr Aggregation) { + var gs *groupedStats + var ok bool + + if gs, ok = sb.data[aggr]; !ok { + gs = newGroupedStats() + gs.peerTags = s.matchingPeerTags + sb.data[aggr] = gs + } + if s.isTopLevel { + gs.topLevelHits += weight + } + gs.hits += weight + if s.error != 0 { + gs.errors += weight + } + gs.duration += float64(s.duration) * weight + // alter resolution of duration distro + trundur := nsTimestampToFloat(s.duration) + if s.error != 0 { + if err := gs.errDistribution.Add(trundur); err != nil { + log.Debugf("Error adding error distribution stats: %v", err) + } + } else { + if err := gs.okDistribution.Add(trundur); err != nil { + log.Debugf("Error adding distribution stats: %v", err) + } + } +} + +// nsTimestampToFloat converts a nanosec timestamp into a float nanosecond timestamp truncated to a fixed precision +func nsTimestampToFloat(ns int64) float64 { + b := math.Float64bits(float64(ns)) + // IEEE-754 + // the mask include 1 bit sign 11 bits exponent (0xfff) + // then we filter the mantissa to 10bits (0xff8) (9 bits as it has implicit value of 1) + // 10 bits precision (any value will be +/- 1/1024) + // https://en.wikipedia.org/wiki/Double-precision_floating-point_format + b &= 0xfffff80000000000 + return math.Float64frombits(b) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/weight.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/weight.go new file mode 100644 index 00000000..d28ca5e4 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/weight.go @@ -0,0 +1,24 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package stats + +import pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + +// keySamplingRateGlobal is a metric key holding the global sampling rate. +const keySamplingRateGlobal = "_sample_rate" + +// weight returns the weight of the span as defined for sampling, i.e. the +// inverse of the sampling rate. +func weight(s *pb.Span) float64 { + if s == nil { + return 1 + } + sampleRate, ok := s.Metrics[keySamplingRateGlobal] + if !ok || sampleRate <= 0.0 || sampleRate > 1.0 { + return 1 + } + return 1.0 / sampleRate +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go new file mode 100644 index 00000000..111ecc66 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go @@ -0,0 +1,166 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package traceutil + +import ( + "fmt" + "os" + "runtime" + "strings" +) + +const ( + aasInstanceID = "aas.environment.instance_id" + aasInstanceName = "aas.environment.instance_name" + aasOperatingSystem = "aas.environment.os" + aasRuntime = "aas.environment.runtime" + aasExtensionVersion = "aas.environment.extension_version" + aasFunctionRuntime = "aas.environment.function_runtime" + aasResourceGroup = "aas.resource.group" + aasResourceID = "aas.resource.id" + aasSiteKind = "aas.site.kind" + aasSiteName = "aas.site.name" + aasSiteType = "aas.site.type" + aasSubscriptionID = "aas.subscription.id" + + dotnetFramework = ".NET" + nodeFramework = "Node.js" + javaFramework = "Java" + pythonFramework = "Python" + phpFramework = "PHP" + goFramework = "Go" + containerFramework = "Container" + unknown = "unknown" + + appService = "app" +) + +// GetAppServicesTags returns the env vars pulled from the Azure App Service instance. +// In some cases we will need to add extra tags for function apps. +func GetAppServicesTags() map[string]string { + siteName := os.Getenv("WEBSITE_SITE_NAME") + ownerName := os.Getenv("WEBSITE_OWNER_NAME") + resourceGroup := os.Getenv("WEBSITE_RESOURCE_GROUP") + instanceID := getEnvOrUnknown("WEBSITE_INSTANCE_ID") + computerName := getEnvOrUnknown("COMPUTERNAME") + extensionVersion := os.Getenv("DD_AAS_EXTENSION_VERSION") + + // Windows and linux environments provide the OS differently + // We should grab it from GO's builtin runtime pkg + websiteOS := runtime.GOOS + + currentRuntime := getRuntime(websiteOS) + subscriptionID := parseAzureSubscriptionID(ownerName) + resourceID := compileAzureResourceID(subscriptionID, resourceGroup, siteName) + + tags := map[string]string{ + aasInstanceID: instanceID, + aasInstanceName: computerName, + aasOperatingSystem: websiteOS, + aasRuntime: currentRuntime, + aasResourceGroup: resourceGroup, + aasResourceID: resourceID, + aasSiteKind: appService, + aasSiteName: siteName, + aasSiteType: appService, + aasSubscriptionID: subscriptionID, + } + + // Remove the Java and .NET logic once non-universal extensions are deprecated + if websiteOS == "windows" { + if extensionVersion != "" { + tags[aasExtensionVersion] = extensionVersion + } else if val := os.Getenv("DD_AAS_JAVA_EXTENSION_VERSION"); val != "" { + tags[aasExtensionVersion] = val + } else if val := os.Getenv("DD_AAS_DOTNET_EXTENSION_VERSION"); val != "" { + tags[aasExtensionVersion] = val + } + } + + // Function Apps require a different runtime and kind + if rt, ok := os.LookupEnv("FUNCTIONS_WORKER_RUNTIME"); ok { + tags[aasRuntime] = rt + tags[aasFunctionRuntime] = os.Getenv("FUNCTIONS_EXTENSION_VERSION") + tags[aasSiteKind] = "functionapp" + } + + return tags +} + +func getEnvOrUnknown(env string) string { + if val, ok := os.LookupEnv(env); ok { + return val + } + return unknown +} + +func getRuntime(websiteOS string) (rt string) { + switch websiteOS { + case "windows": + rt = getWindowsRuntime() + case "linux", "darwin": + rt = getLinuxRuntime() + default: + rt = unknown + } + + return rt +} + +func getWindowsRuntime() (rt string) { + if os.Getenv("WEBSITE_STACK") == "JAVA" { + rt = javaFramework + } else if val := os.Getenv("WEBSITE_NODE_DEFAULT_VERSION"); val != "" { + rt = nodeFramework + } else { + // FIXME: Windows AAS only supports Java, Node, and .NET so we can infer this + // Needs to be inferred because no other env vars give us context on the runtime + rt = dotnetFramework + } + + return rt +} + +func getLinuxRuntime() (rt string) { + rt = unknown + + switch os.Getenv("WEBSITE_STACK") { + case "DOCKER": + rt = containerFramework + case "": + if val := os.Getenv("DOCKER_SERVER_VERSION"); val != "" { + rt = containerFramework + } + case "NODE": + rt = nodeFramework + case "PYTHON": + rt = pythonFramework + case "JAVA", "TOMCAT": + rt = javaFramework + case "DOTNETCORE": + rt = dotnetFramework + case "PHP": + rt = phpFramework + } + + return rt +} + +func parseAzureSubscriptionID(subID string) (id string) { + parsedSubID := strings.SplitN(subID, "+", 2) + if len(parsedSubID) > 1 { + id = parsedSubID[0] + } + return +} + +func compileAzureResourceID(subID, resourceGroup, siteName string) (id string) { + if len(subID) > 0 && len(resourceGroup) > 0 && len(siteName) > 0 { + id = fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/microsoft.web/sites/%s", + subID, resourceGroup, siteName) + } + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/doc.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/doc.go new file mode 100644 index 00000000..ac2de89f --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/doc.go @@ -0,0 +1,8 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package traceutil contains functions for extracting and processing traces. It should +// only import payload and nothing else. +package traceutil diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize/normalize.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize/normalize.go new file mode 100644 index 00000000..65d945d1 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize/normalize.go @@ -0,0 +1,405 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package normalize contains functions for normalizing trace data. +package normalize + +import ( + "errors" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +const ( + // DefaultSpanName is the default name we assign a span if it's missing and we have no reasonable fallback + DefaultSpanName = "unnamed_operation" + // DefaultServiceName is the default name we assign a service if it's missing and we have no reasonable fallback + DefaultServiceName = "unnamed-service" +) + +const ( + // MaxNameLen the maximum length a name can have + MaxNameLen = 100 + // MaxServiceLen the maximum length a service can have + MaxServiceLen = 100 + // MaxResourceLen the maximum length a resource can have + MaxResourceLen = 5000 +) + +var ( + // ErrEmpty specifies that the passed input was empty. + ErrEmpty = errors.New("empty") + // ErrTooLong signifies that the input was too long. + ErrTooLong = errors.New("too long") + // ErrInvalid signifies that the input was invalid. + ErrInvalid = errors.New("invalid") +) + +var isAlphaLookup = [256]bool{} +var isAlphaNumLookup = [256]bool{} +var isValidASCIIStartCharLookup = [256]bool{} +var isValidASCIITagCharLookup = [256]bool{} + +func init() { + for i := 0; i < 256; i++ { + isAlphaLookup[i] = isAlpha(byte(i)) + isAlphaNumLookup[i] = isAlphaNum(byte(i)) + isValidASCIIStartCharLookup[i] = isValidASCIIStartChar(byte(i)) + isValidASCIITagCharLookup[i] = isValidASCIITagChar(byte(i)) + } +} + +// NormalizeName normalizes a span name and returns an error describing the reason +// (if any) why the name was modified. +// +//nolint:revive +func NormalizeName(name string) (string, error) { + if name == "" { + return DefaultSpanName, ErrEmpty + } + var err error + if len(name) > MaxNameLen { + name = TruncateUTF8(name, MaxNameLen) + err = ErrTooLong + } + name, ok := normMetricNameParse(name) + if !ok { + return DefaultSpanName, ErrInvalid + } + return name, err +} + +// NormalizeService normalizes a span service and returns an error describing the reason +// (if any) why the name was modified. +// +//nolint:revive +func NormalizeService(svc string, lang string) (string, error) { + if svc == "" { + return fallbackService(lang), ErrEmpty + } + var err error + if len(svc) > MaxServiceLen { + svc = TruncateUTF8(svc, MaxServiceLen) + err = ErrTooLong + } + // We are normalizing just the tag value. + s := NormalizeTagValue(svc) + if s == "" { + return fallbackService(lang), ErrInvalid + } + return s, err +} + +// NormalizePeerService normalizes a span's peer.service and returns an error describing the reason +// (if any) why the name was modified. +// +//nolint:revive +func NormalizePeerService(svc string) (string, error) { + if svc == "" { + return "", nil + } + var err error + if len(svc) > MaxServiceLen { + svc = TruncateUTF8(svc, MaxServiceLen) + err = ErrTooLong + } + // We are normalizing just the tag value. + s := NormalizeTagValue(svc) + if s == "" { + return "", ErrInvalid + } + return s, err +} + +// fallbackServiceNames is a cache of default service names to use +// when the span's service is unset or invalid. +var fallbackServiceNames sync.Map + +// fallbackService returns the fallback service name for a service +// belonging to language lang. +func fallbackService(lang string) string { + if lang == "" { + return DefaultServiceName + } + if v, ok := fallbackServiceNames.Load(lang); ok { + return v.(string) + } + var str strings.Builder + str.WriteString("unnamed-") + str.WriteString(lang) + str.WriteString("-service") + fallbackServiceNames.Store(lang, str.String()) + return str.String() +} + +const maxTagLength = 200 + +// NormalizeTag applies some normalization to ensure the full tag_key:tag_value string matches the backend requirements. +// +//nolint:revive +func NormalizeTag(v string) string { + return normalize(v, true) +} + +// NormalizeTagValue applies some normalization to ensure the tag value matches the backend requirements. +// It should be used for cases where we have just the tag_value as the input (instead of tag_key:tag_value). +// +//nolint:revive +func NormalizeTagValue(v string) string { + return normalize(v, false) +} + +func normalize(v string, removeDigitStartChar bool) string { + // Fast path: Check if the tag is valid and only contains ASCII characters, + // if yes return it as-is right away. For most use-cases this reduces CPU usage. + if isNormalizedASCIITag(v, removeDigitStartChar) { + return v + } + // the algorithm works by creating a set of cuts marking start and end offsets in v + // that have to be replaced with underscore (_) + if len(v) == 0 { + return "" + } + var ( + trim int // start character (if trimming) + cuts [][2]int // sections to discard: (start, end) pairs + chars int // number of characters processed + ) + var ( + i int // current byte + r rune // current rune + jump int // tracks how many bytes the for range advances on its next iteration + ) + tag := []byte(v) + for i, r = range v { + jump = utf8.RuneLen(r) // next i will be i+jump + if r == utf8.RuneError { + // On invalid UTF-8, the for range advances only 1 byte (see: https://golang.org/ref/spec#For_range (point 2)). + // However, utf8.RuneError is equivalent to unicode.ReplacementChar so we should rely on utf8.DecodeRune to tell + // us whether this is an actual error or just a unicode.ReplacementChar that was present in the string. + _, width := utf8.DecodeRune(tag[i:]) + jump = width + } + // fast path; all letters (and colons) are ok + switch { + case r >= 'a' && r <= 'z' || r == ':': + chars++ + goto end + case r >= 'A' && r <= 'Z': + // lower-case + tag[i] += 'a' - 'A' + chars++ + goto end + } + if unicode.IsUpper(r) { + // lowercase this character + if low := unicode.ToLower(r); utf8.RuneLen(r) == utf8.RuneLen(low) { + // but only if the width of the lowercased character is the same; + // there are some rare edge-cases where this is not the case, such + // as \u017F (ſ) + utf8.EncodeRune(tag[i:], low) + r = low + } + } + switch { + case unicode.IsLetter(r): + chars++ + // If it's not a unicode letter, and it's the first char, and digits are allowed for the start char, + // we should goto end because the remaining cases are not valid for a start char. + case removeDigitStartChar && chars == 0: + trim = i + jump + goto end + case unicode.IsDigit(r) || r == '.' || r == '/' || r == '-': + chars++ + default: + // illegal character + chars++ + if n := len(cuts); n > 0 && cuts[n-1][1] >= i { + // merge intersecting cuts + cuts[n-1][1] += jump + } else { + // start a new cut + cuts = append(cuts, [2]int{i, i + jump}) + } + } + end: + if i+jump >= 2*maxTagLength { + // bail early if the tag contains a lot of non-letter/digit characters. + // If a tag is test🍣🍣[...]🍣, then it's unlikely to be a properly formatted tag + break + } + if chars >= maxTagLength { + // we've reached the maximum + break + } + } + + tag = tag[trim : i+jump] // trim start and end + if len(cuts) == 0 { + // tag was ok, return it as it is + return string(tag) + } + delta := trim // cut offsets delta + for _, cut := range cuts { + // start and end of cut, including delta from previous cuts: + start, end := cut[0]-delta, cut[1]-delta + + if end >= len(tag) { + // this cut includes the end of the string; discard it + // completely and finish the loop. + tag = tag[:start] + break + } + // replace the beginning of the cut with '_' + tag[start] = '_' + if end-start == 1 { + // nothing to discard + continue + } + // discard remaining characters in the cut + copy(tag[start+1:], tag[end:]) + + // shorten the slice + tag = tag[:len(tag)-(end-start)+1] + + // count the new delta for future cuts + delta += cut[1] - cut[0] - 1 + } + return string(tag) +} + +// This code is borrowed from dd-go metric normalization + +// fast isAlpha for ascii +func isAlpha(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') +} + +// fast isAlphaNumeric for ascii +func isAlphaNum(b byte) bool { + return isAlpha(b) || (b >= '0' && b <= '9') +} + +func isValidNormalizedMetricName(name string) bool { + if name == "" { + return false + } + if !isAlphaLookup[name[0]] { + return false + } + for j := 1; j < len(name); j++ { + b := name[j] + if !(isAlphaNumLookup[b] || (b == '.' && !(name[j-1] == '_')) || (b == '_' && !(name[j-1] == '_'))) { + return false + } + } + return true +} + +// normMetricNameParse normalizes metric names with a parser instead of using +// garbage-creating string replacement routines. +func normMetricNameParse(name string) (string, bool) { + if name == "" || len(name) > MaxNameLen { + return name, false + } + + var i, ptr int + var resa [MaxNameLen]byte + res := resa[:0] + + // skip non-alphabetic characters + for ; i < len(name) && !isAlphaLookup[name[i]]; i++ { + continue + } + + // if there were no alphabetic characters it wasn't valid + if i == len(name) { + return "", false + } + + if isValidNormalizedMetricName(name[i:]) { + normalized := name[i:] + if normalized[len(normalized)-1] == '_' { + normalized = normalized[:len(normalized)-1] + } + return normalized, true + } + + for ; i < len(name); i++ { + switch { + case isAlphaNumLookup[name[i]]: + res = append(res, name[i]) + ptr++ + case name[i] == '.': + // we skipped all non-alpha chars up front so we have seen at least one + switch res[ptr-1] { + // overwrite underscores that happen before periods + case '_': + res[ptr-1] = '.' + default: + res = append(res, '.') + ptr++ + } + default: + // we skipped all non-alpha chars up front so we have seen at least one + switch res[ptr-1] { + // no double underscores, no underscores after periods + case '.', '_': + default: + res = append(res, '_') + ptr++ + } + } + } + + if res[ptr-1] == '_' { + res = res[:ptr-1] + } + + return string(res), true +} + +func isNormalizedASCIITag(tag string, checkValidStartChar bool) bool { + if len(tag) == 0 { + return true + } + if len(tag) > maxTagLength { + return false + } + i := 0 + if checkValidStartChar { + if !isValidASCIIStartCharLookup[tag[0]] { + return false + } + i++ + } + for ; i < len(tag); i++ { + b := tag[i] + // TODO: Attempt to optimize this check using SIMD/vectorization. + if isValidASCIITagCharLookup[b] { + continue + } + if b == '_' { + // an underscore is only okay if followed by a valid non-underscore character + i++ + if i == len(tag) || !isValidASCIITagCharLookup[tag[i]] { + return false + } + } else { + return false + } + } + return true +} + +func isValidASCIIStartChar(c byte) bool { + return ('a' <= c && c <= 'z') || c == ':' +} + +func isValidASCIITagChar(c byte) bool { + return isValidASCIIStartChar(c) || ('0' <= c && c <= '9') || c == '.' || c == '/' || c == '-' +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize/truncate.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize/truncate.go new file mode 100644 index 00000000..f34d94e4 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize/truncate.go @@ -0,0 +1,37 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package normalize + +import "unicode/utf8" + +// TruncateUTF8 truncates the given string to make sure it uses less than limit bytes. +// If the last character is an utf8 character that would be splitten, it removes it +// entirely to make sure the resulting string is not broken. +func TruncateUTF8(s string, limit int) string { + if len(s) <= limit { + return s + } + s = s[:limit] + // The max length of a valid code point is 4 bytes, therefore if we see all valid + // code points in the last 4 bytes we know we have a fully valid utf-8 string + // If not we can truncate one byte at a time until the end of the string is valid utf-8 + for len(s) >= 1 { + if len(s) >= 4 && utf8.Valid([]byte(s[len(s)-4:])) { + break + } + if len(s) >= 3 && utf8.Valid([]byte(s[len(s)-3:])) { + break + } + if len(s) >= 2 && utf8.Valid([]byte(s[len(s)-2:])) { + break + } + if len(s) >= 1 && utf8.Valid([]byte(s[len(s)-1:])) { + break + } + s = s[:len(s)-1] + } + return s +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/otel_util.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/otel_util.go new file mode 100644 index 00000000..1699d286 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/otel_util.go @@ -0,0 +1,637 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package traceutil + +import ( + "context" + "encoding/binary" + "strings" + + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" + semconv117 "go.opentelemetry.io/collector/semconv/v1.17.0" + semconv126 "go.opentelemetry.io/collector/semconv/v1.26.0" + semconv "go.opentelemetry.io/collector/semconv/v1.6.1" + "go.opentelemetry.io/otel/attribute" + + "github.com/DataDog/datadog-agent/pkg/trace/log" + normalizeutil "github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize" +) + +// Util functions for converting OTel semantics to DD semantics. + +var ( + // SignalTypeSet is the OTel attribute set for traces. + SignalTypeSet = attribute.NewSet(attribute.String("signal", "traces")) +) + +const ( + // TagStatusCode is the tag key for http status code. + TagStatusCode = "http.status_code" +) + +// span.Type constants for db systems +const ( + spanTypeSQL = "sql" + spanTypeCassandra = "cassandra" + spanTypeRedis = "redis" + spanTypeMemcached = "memcached" + spanTypeMongoDB = "mongodb" + spanTypeElasticsearch = "elasticsearch" + spanTypeOpenSearch = "opensearch" + spanTypeDB = "db" +) + +// DBTypes are semconv types that should map to span.Type values given in the mapping +var dbTypes = map[string]string{ + // SQL db types + semconv.AttributeDBSystemOtherSQL: spanTypeSQL, + semconv.AttributeDBSystemMSSQL: spanTypeSQL, + semconv.AttributeDBSystemMySQL: spanTypeSQL, + semconv.AttributeDBSystemOracle: spanTypeSQL, + semconv.AttributeDBSystemDB2: spanTypeSQL, + semconv.AttributeDBSystemPostgreSQL: spanTypeSQL, + semconv.AttributeDBSystemRedshift: spanTypeSQL, + semconv.AttributeDBSystemCloudscape: spanTypeSQL, + semconv.AttributeDBSystemHSQLDB: spanTypeSQL, + semconv.AttributeDBSystemMaxDB: spanTypeSQL, + semconv.AttributeDBSystemIngres: spanTypeSQL, + semconv.AttributeDBSystemFirstSQL: spanTypeSQL, + semconv.AttributeDBSystemEDB: spanTypeSQL, + semconv.AttributeDBSystemCache: spanTypeSQL, + semconv.AttributeDBSystemFirebird: spanTypeSQL, + semconv.AttributeDBSystemDerby: spanTypeSQL, + semconv.AttributeDBSystemInformix: spanTypeSQL, + semconv.AttributeDBSystemMariaDB: spanTypeSQL, + semconv.AttributeDBSystemSqlite: spanTypeSQL, + semconv.AttributeDBSystemSybase: spanTypeSQL, + semconv.AttributeDBSystemTeradata: spanTypeSQL, + semconv.AttributeDBSystemVertica: spanTypeSQL, + semconv.AttributeDBSystemH2: spanTypeSQL, + semconv.AttributeDBSystemColdfusion: spanTypeSQL, + semconv.AttributeDBSystemCockroachdb: spanTypeSQL, + semconv.AttributeDBSystemProgress: spanTypeSQL, + semconv.AttributeDBSystemHanaDB: spanTypeSQL, + semconv.AttributeDBSystemAdabas: spanTypeSQL, + semconv.AttributeDBSystemFilemaker: spanTypeSQL, + semconv.AttributeDBSystemInstantDB: spanTypeSQL, + semconv.AttributeDBSystemInterbase: spanTypeSQL, + semconv.AttributeDBSystemNetezza: spanTypeSQL, + semconv.AttributeDBSystemPervasive: spanTypeSQL, + semconv.AttributeDBSystemPointbase: spanTypeSQL, + semconv117.AttributeDBSystemClickhouse: spanTypeSQL, // not in semconv 1.6.1 + + // Cassandra db types + semconv.AttributeDBSystemCassandra: spanTypeCassandra, + + // Redis db types + semconv.AttributeDBSystemRedis: spanTypeRedis, + + // Memcached db types + semconv.AttributeDBSystemMemcached: spanTypeMemcached, + + // Mongodb db types + semconv.AttributeDBSystemMongoDB: spanTypeMongoDB, + + // Elasticsearch db types + semconv.AttributeDBSystemElasticsearch: spanTypeElasticsearch, + + // Opensearch db types, not in semconv 1.6.1 + semconv117.AttributeDBSystemOpensearch: spanTypeOpenSearch, + + // Generic db types + semconv.AttributeDBSystemHive: spanTypeDB, + semconv.AttributeDBSystemHBase: spanTypeDB, + semconv.AttributeDBSystemNeo4j: spanTypeDB, + semconv.AttributeDBSystemCouchbase: spanTypeDB, + semconv.AttributeDBSystemCouchDB: spanTypeDB, + semconv.AttributeDBSystemCosmosDB: spanTypeDB, + semconv.AttributeDBSystemDynamoDB: spanTypeDB, + semconv.AttributeDBSystemGeode: spanTypeDB, +} + +// checkDBType checks if the dbType is a known db type and returns the corresponding span.Type +func checkDBType(dbType string) string { + spanType, ok := dbTypes[dbType] + if ok { + return spanType + } + // span type not found, return generic db type + return spanTypeDB +} + +// IndexOTelSpans iterates over the input OTel spans and returns 3 maps: +// OTel spans indexed by span ID, OTel resources indexed by span ID, OTel instrumentation scopes indexed by span ID. +// Skips spans with invalid trace ID or span ID. If there are multiple spans with the same (non-zero) span ID, the last one wins. +func IndexOTelSpans(traces ptrace.Traces) (map[pcommon.SpanID]ptrace.Span, map[pcommon.SpanID]pcommon.Resource, map[pcommon.SpanID]pcommon.InstrumentationScope) { + spanByID := make(map[pcommon.SpanID]ptrace.Span) + resByID := make(map[pcommon.SpanID]pcommon.Resource) + scopeByID := make(map[pcommon.SpanID]pcommon.InstrumentationScope) + rspanss := traces.ResourceSpans() + for i := 0; i < rspanss.Len(); i++ { + rspans := rspanss.At(i) + res := rspans.Resource() + for j := 0; j < rspans.ScopeSpans().Len(); j++ { + libspans := rspans.ScopeSpans().At(j) + for k := 0; k < libspans.Spans().Len(); k++ { + span := libspans.Spans().At(k) + if span.TraceID().IsEmpty() || span.SpanID().IsEmpty() { + continue + } + spanByID[span.SpanID()] = span + resByID[span.SpanID()] = res + scopeByID[span.SpanID()] = libspans.Scope() + } + } + } + return spanByID, resByID, scopeByID +} + +// GetTopLevelOTelSpans returns the span IDs of the top level OTel spans. +func GetTopLevelOTelSpans(spanByID map[pcommon.SpanID]ptrace.Span, resByID map[pcommon.SpanID]pcommon.Resource, topLevelByKind bool) map[pcommon.SpanID]struct{} { + topLevelSpans := make(map[pcommon.SpanID]struct{}) + for spanID, span := range spanByID { + if span.ParentSpanID().IsEmpty() { + // case 1: root span + topLevelSpans[spanID] = struct{}{} + continue + } + + if topLevelByKind { + // New behavior for computing top level OTel spans, see computeTopLevelAndMeasured in pkg/trace/api/otlp.go + spanKind := span.Kind() + if spanKind == ptrace.SpanKindServer || spanKind == ptrace.SpanKindConsumer { + // span is a server-side span, mark as top level + topLevelSpans[spanID] = struct{}{} + } + continue + } + + // Otherwise, fall back to old behavior in ComputeTopLevel + parentSpan, ok := spanByID[span.ParentSpanID()] + if !ok { + // case 2: parent span not in the same chunk, presumably it belongs to another service + topLevelSpans[spanID] = struct{}{} + continue + } + + svc := GetOTelService(resByID[spanID], true) + parentSvc := GetOTelService(resByID[parentSpan.SpanID()], true) + if svc != parentSvc { + // case 3: parent is not in the same service + topLevelSpans[spanID] = struct{}{} + } + } + return topLevelSpans +} + +// GetOTelAttrVal returns the matched value as a string in the input map with the given keys. +// If there are multiple keys present, the first matched one is returned. +// If normalize is true, normalize the return value with NormalizeTagValue. +func GetOTelAttrVal(attrs pcommon.Map, normalize bool, keys ...string) string { + val := "" + for _, key := range keys { + attrval, exists := attrs.Get(key) + if exists { + val = attrval.AsString() + break + } + } + + if normalize { + val = normalizeutil.NormalizeTagValue(val) + } + + return val +} + +// GetOTelAttrValInResAndSpanAttrs returns the matched value as a string in the OTel resource attributes and span attributes with the given keys. +// If there are multiple keys present, the first matched one is returned. +// If the key is present in both resource attributes and span attributes, resource attributes take precedence. +// If normalize is true, normalize the return value with NormalizeTagValue. +func GetOTelAttrValInResAndSpanAttrs(span ptrace.Span, res pcommon.Resource, normalize bool, keys ...string) string { + if val := GetOTelAttrVal(res.Attributes(), normalize, keys...); val != "" { + return val + } + return GetOTelAttrVal(span.Attributes(), normalize, keys...) +} + +// SpanKind2Type returns a span's type based on the given kind and other present properties. +// This function is used in Resource V1 logic only. See GetOtelSpanType for Resource V2 logic. +func SpanKind2Type(span ptrace.Span, res pcommon.Resource) string { + var typ string + switch span.Kind() { + case ptrace.SpanKindServer: + typ = "web" + case ptrace.SpanKindClient: + typ = "http" + db := GetOTelAttrValInResAndSpanAttrs(span, res, true, semconv.AttributeDBSystem) + if db == "" { + break + } + switch db { + case "redis", "memcached": + typ = "cache" + default: + typ = "db" + } + default: + typ = "custom" + } + return typ +} + +// GetOTelSpanType returns the DD span type based on OTel span kind and attributes. +// This logic is used in ReceiveResourceSpansV2 logic +func GetOTelSpanType(span ptrace.Span, res pcommon.Resource) string { + typ := GetOTelAttrValInResAndSpanAttrs(span, res, false, "span.type") + if typ != "" { + return typ + } + switch span.Kind() { + case ptrace.SpanKindServer: + typ = "web" + case ptrace.SpanKindClient: + db := GetOTelAttrValInResAndSpanAttrs(span, res, true, semconv.AttributeDBSystem) + if db == "" { + typ = "http" + } else { + typ = checkDBType(db) + } + default: + typ = "custom" + } + return typ +} + +// GetOTelService returns the DD service name based on OTel span and resource attributes. +func GetOTelService(res pcommon.Resource, normalize bool) string { + // No need to normalize with NormalizeTagValue since we will do NormalizeService later + svc := GetOTelAttrVal(res.Attributes(), false, semconv.AttributeServiceName) + if svc == "" { + svc = "otlpresourcenoservicename" + } + if normalize { + newsvc, err := normalizeutil.NormalizeService(svc, "") + switch err { + case normalizeutil.ErrTooLong: + log.Debugf("Fixing malformed trace. Service is too long (reason:service_truncate), truncating span.service to length=%d: %s", normalizeutil.MaxServiceLen, svc) + case normalizeutil.ErrInvalid: + log.Debugf("Fixing malformed trace. Service is invalid (reason:service_invalid), replacing invalid span.service=%s with fallback span.service=%s", svc, newsvc) + } + svc = newsvc + } + return svc +} + +// GetOTelResourceV1 returns the DD resource name based on OTel span and resource attributes. +func GetOTelResourceV1(span ptrace.Span, res pcommon.Resource) (resName string) { + resName = GetOTelAttrValInResAndSpanAttrs(span, res, false, "resource.name") + if resName == "" { + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, "http.request.method", semconv.AttributeHTTPMethod); m != "" { + // use the HTTP method + route (if available) + resName = m + if route := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeHTTPRoute); route != "" { + resName = resName + " " + route + } + } else if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingOperation); m != "" { + resName = m + // use the messaging operation + if dest := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingDestination, semconv117.AttributeMessagingDestinationName); dest != "" { + resName = resName + " " + dest + } + } else if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCMethod); m != "" { + resName = m + // use the RPC method + if svc := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCService); m != "" { + // ...and service if available + resName = resName + " " + svc + } + } else if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv117.AttributeGraphqlOperationType); m != "" { + // Enrich GraphQL query resource names. + // See https://github.com/open-telemetry/semantic-conventions/blob/v1.29.0/docs/graphql/graphql-spans.md + resName = m + if name := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv117.AttributeGraphqlOperationName); name != "" { + resName = resName + " " + name + } + } else { + resName = span.Name() + } + } + if len(resName) > normalizeutil.MaxResourceLen { + resName = resName[:normalizeutil.MaxResourceLen] + } + return +} + +// GetOTelResourceV2 returns the DD resource name based on OTel span and resource attributes. +func GetOTelResourceV2(span ptrace.Span, res pcommon.Resource) (resName string) { + defer func() { + if len(resName) > normalizeutil.MaxResourceLen { + resName = resName[:normalizeutil.MaxResourceLen] + } + }() + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, "resource.name"); m != "" { + resName = m + return + } + + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, "http.request.method", semconv.AttributeHTTPMethod); m != "" { + if m == "_OTHER" { + m = "HTTP" + } + // use the HTTP method + route (if available) + resName = m + if span.Kind() == ptrace.SpanKindServer { + if route := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeHTTPRoute); route != "" { + resName = resName + " " + route + } + } + return + } + + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingOperation); m != "" { + resName = m + // use the messaging operation + if dest := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingDestination, semconv117.AttributeMessagingDestinationName); dest != "" { + resName = resName + " " + dest + } + return + } + + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCMethod); m != "" { + resName = m + // use the RPC method + if svc := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCService); m != "" { + // ...and service if available + resName = resName + " " + svc + } + return + } + + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv117.AttributeGraphqlOperationType); m != "" { + // Enrich GraphQL query resource names. + // See https://github.com/open-telemetry/semantic-conventions/blob/v1.29.0/docs/graphql/graphql-spans.md + resName = m + if name := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv117.AttributeGraphqlOperationName); name != "" { + resName = resName + " " + name + } + return + } + + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeDBSystem); m != "" { + // Since traces are obfuscated by span.Resource in pkg/trace/agent/obfuscate.go, we should use span.Resource as the resource name. + // https://github.com/DataDog/datadog-agent/blob/62619a69cff9863f5b17215847b853681e36ff15/pkg/trace/agent/obfuscate.go#L32 + if dbStatement := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeDBStatement); dbStatement != "" { + resName = dbStatement + return + } + if dbQuery := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv126.AttributeDBQueryText); dbQuery != "" { + resName = dbQuery + return + } + } + + resName = span.Name() + + return +} + +// GetOTelOperationNameV2 returns the DD operation name based on OTel span and resource attributes and given configs. +func GetOTelOperationNameV2( + span ptrace.Span, +) string { + if operationName := GetOTelAttrVal(span.Attributes(), true, "operation.name"); operationName != "" { + return operationName + } + + isClient := span.Kind() == ptrace.SpanKindClient + isServer := span.Kind() == ptrace.SpanKindServer + + // http + if method := GetOTelAttrVal(span.Attributes(), false, "http.request.method", semconv.AttributeHTTPMethod); method != "" { + if isServer { + return "http.server.request" + } + if isClient { + return "http.client.request" + } + } + + // database + if v := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeDBSystem); v != "" && isClient { + return v + ".query" + } + + // messaging + system := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeMessagingSystem) + op := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeMessagingOperation) + if system != "" && op != "" { + switch span.Kind() { + case ptrace.SpanKindClient, ptrace.SpanKindServer, ptrace.SpanKindConsumer, ptrace.SpanKindProducer: + return system + "." + op + } + } + + // RPC & AWS + rpcValue := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeRPCSystem) + isRPC := rpcValue != "" + isAws := isRPC && (rpcValue == "aws-api") + // AWS client + if isAws && isClient { + if service := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeRPCService); service != "" { + return "aws." + service + ".request" + } + return "aws.client.request" + } + // RPC client + if isRPC && isClient { + return rpcValue + ".client.request" + } + // RPC server + if isRPC && isServer { + return rpcValue + ".server.request" + } + + // FAAS client + provider := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeFaaSInvokedProvider) + invokedName := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeFaaSInvokedName) + if provider != "" && invokedName != "" && isClient { + return provider + "." + invokedName + ".invoke" + } + + // FAAS server + trigger := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeFaaSTrigger) + if trigger != "" && isServer { + return trigger + ".invoke" + } + + // GraphQL + if GetOTelAttrVal(span.Attributes(), true, "graphql.operation.type") != "" { + return "graphql.server.request" + } + + // if nothing matches, checking for generic http server/client + protocol := GetOTelAttrVal(span.Attributes(), true, "network.protocol.name") + if isServer { + if protocol != "" { + return protocol + ".server.request" + } + return "server.request" + } else if isClient { + if protocol != "" { + return protocol + ".client.request" + } + return "client.request" + } + + if span.Kind() != ptrace.SpanKindUnspecified { + return span.Kind().String() + } + return ptrace.SpanKindInternal.String() +} + +// GetOTelOperationNameV1 returns the DD operation name based on OTel span and resource attributes and given configs. +func GetOTelOperationNameV1( + span ptrace.Span, + res pcommon.Resource, + lib pcommon.InstrumentationScope, + spanNameAsResourceName bool, + spanNameRemappings map[string]string, + normalize bool) string { + // No need to normalize with NormalizeTagValue since we will do NormalizeName later + name := GetOTelAttrValInResAndSpanAttrs(span, res, false, "operation.name") + if name == "" { + if spanNameAsResourceName { + name = span.Name() + } else { + name = strings.ToLower(span.Kind().String()) + if lib.Name() != "" { + name = lib.Name() + "." + name + } else { + name = "opentelemetry." + name + } + } + } + if v, ok := spanNameRemappings[name]; ok { + name = v + } + + if normalize { + normalizeName, err := normalizeutil.NormalizeName(name) + switch err { + case normalizeutil.ErrEmpty: + log.Debugf("Fixing malformed trace. Name is empty (reason:span_name_empty), setting span.name=%s", normalizeName) + case normalizeutil.ErrTooLong: + log.Debugf("Fixing malformed trace. Name is too long (reason:span_name_truncate), truncating span.name to length=%d", normalizeutil.MaxServiceLen) + case normalizeutil.ErrInvalid: + log.Debugf("Fixing malformed trace. Name is invalid (reason:span_name_invalid), setting span.name=%s", normalizeName) + } + name = normalizeName + } + + return name +} + +// GetOtelSource returns the source based on OTel span and resource attributes. +func GetOtelSource(span ptrace.Span, res pcommon.Resource, tr *attributes.Translator) (source.Source, bool) { + ctx := context.Background() + src, srcok := tr.ResourceToSource(ctx, res, SignalTypeSet, nil) + if !srcok { + if v := GetOTelAttrValInResAndSpanAttrs(span, res, false, "_dd.hostname"); v != "" { + src = source.Source{Kind: source.HostnameKind, Identifier: v} + srcok = true + } + } + return src, srcok +} + +// GetOTelHostname returns the DD hostname based on OTel span and resource attributes. +func GetOTelHostname(span ptrace.Span, res pcommon.Resource, tr *attributes.Translator, fallbackHost string) string { + src, srcok := GetOtelSource(span, res, tr) + if srcok { + switch src.Kind { + case source.HostnameKind: + return src.Identifier + default: + // We are not on a hostname (serverless), hence the hostname is empty + return "" + } + } else { + // fallback hostname from Agent conf.Hostname + return fallbackHost + } +} + +// GetOTelStatusCode returns the DD status code of the OTel span. +func GetOTelStatusCode(span ptrace.Span) uint32 { + if code, ok := span.Attributes().Get("http.response.status_code"); ok { + return uint32(code.Int()) + } + if code, ok := span.Attributes().Get(semconv.AttributeHTTPStatusCode); ok { + return uint32(code.Int()) + } + return 0 +} + +// GetOTelContainerTags returns a list of DD container tags in the OTel resource attributes. +// Tags are always normalized. +func GetOTelContainerTags(rattrs pcommon.Map, tagKeys []string) []string { + var containerTags []string + containerTagsMap := attributes.ContainerTagsFromResourceAttributes(rattrs) + for _, key := range tagKeys { + if mappedKey, ok := attributes.ContainerMappings[key]; ok { + // If the key has a mapping in ContainerMappings, use the mapped key + if val, ok := containerTagsMap[mappedKey]; ok { + t := normalizeutil.NormalizeTag(mappedKey + ":" + val) + containerTags = append(containerTags, t) + } + } else { + // Otherwise populate as additional container tags + if val := GetOTelAttrVal(rattrs, false, key); val != "" { + t := normalizeutil.NormalizeTag(key + ":" + val) + containerTags = append(containerTags, t) + } + } + } + return containerTags +} + +// GetOTelEnv returns the environment based on OTel resource attributes. +func GetOTelEnv(res pcommon.Resource) string { + // TODO(songy23): use AttributeDeploymentEnvironmentName once collector version upgrade is unblocked + return GetOTelAttrVal(res.Attributes(), true, "deployment.environment.name", semconv.AttributeDeploymentEnvironment) +} + +// OTelTraceIDToUint64 converts an OTel trace ID to an uint64 +func OTelTraceIDToUint64(b [16]byte) uint64 { + return binary.BigEndian.Uint64(b[len(b)-8:]) +} + +// OTelSpanIDToUint64 converts an OTel span ID to an uint64 +func OTelSpanIDToUint64(b [8]byte) uint64 { + return binary.BigEndian.Uint64(b[:]) +} + +var spanKindNames = map[ptrace.SpanKind]string{ + ptrace.SpanKindUnspecified: "unspecified", + ptrace.SpanKindInternal: "internal", + ptrace.SpanKindServer: "server", + ptrace.SpanKindClient: "client", + ptrace.SpanKindProducer: "producer", + ptrace.SpanKindConsumer: "consumer", +} + +// OTelSpanKindName converts the given SpanKind to a valid Datadog span kind name. +func OTelSpanKindName(k ptrace.SpanKind) string { + name, ok := spanKindNames[k] + if !ok { + return "unspecified" + } + return name +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go new file mode 100644 index 00000000..913e4127 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package traceutil + +import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" +) + +// ProcessedTrace represents a trace being processed in the agent. +type ProcessedTrace struct { + TraceChunk *pb.TraceChunk + Root *pb.Span + TracerEnv string + AppVersion string + TracerHostname string + ClientDroppedP0sWeight float64 + GitCommitSha string + ImageTag string +} + +// Clone creates a copy of ProcessedTrace, cloning p, p.TraceChunk, and p.Root. This means it is +// safe to modify the returned ProcessedTrace's (pt's) fields along with fields in +// pt.TraceChunk and fields in pt.Root. +// +// The most important consequence of this is that the TraceChunk's Spans field can be assigned, +// *BUT* the Spans value itself should not be modified. i.e. This is ok: +// +// pt2 := pt.Clone() +// pt2.TraceChunk.Spans = make([]*pb.Span) +// +// but this is NOT ok: +// +// pt2 := pt.Clone() +// pt2.TraceChunk.Spans[0] = &pb.Span{} // This will be visible in pt. +func (pt *ProcessedTrace) Clone() *ProcessedTrace { + if pt == nil { + return nil + } + ptClone := new(ProcessedTrace) + *ptClone = *pt + if pt.TraceChunk != nil { + c := pt.TraceChunk.ShallowCopy() + ptClone.TraceChunk = c + } + if pt.Root != nil { + r := pt.Root.ShallowCopy() + ptClone.Root = r + } + return ptClone +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go new file mode 100644 index 00000000..2b416531 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go @@ -0,0 +1,175 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package traceutil + +import ( + "bytes" + + "github.com/tinylib/msgp/msgp" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" +) + +const ( + // topLevelKey is a special metric, it's 1 if the span is top-level, 0 if not, this is kept for backwards + // compatibility but will eventually be replaced with just using the preferred tracerTopLevelKey + topLevelKey = "_top_level" + // measuredKey is a special metric flag that marks a span for trace metrics calculation. + measuredKey = "_dd.measured" + // tracerTopLevelKey is a metric flag set by tracers on top_level spans + tracerTopLevelKey = "_dd.top_level" + // partialVersionKey is a metric carrying the snapshot seq number in the case the span is a partial snapshot + partialVersionKey = "_dd.partial_version" +) + +// HasTopLevel returns true if span is top-level. +func HasTopLevel(s *pb.Span) bool { + return HasTopLevelMetrics(s.Metrics) +} + +// HasTopLevelMetrics returns true if the provided metrics map indicates the span is top-level. +func HasTopLevelMetrics(metrics map[string]float64) bool { + return metrics[topLevelKey] == 1 || metrics[tracerTopLevelKey] == 1 +} + +// UpdateTracerTopLevel sets _top_level tag on spans flagged by the tracer +func UpdateTracerTopLevel(s *pb.Span) { + if s.Metrics[tracerTopLevelKey] == 1 { + SetMetric(s, topLevelKey, 1) + } +} + +// IsMeasured returns true if a span should be measured (i.e., it should get trace metrics calculated). +func IsMeasured(s *pb.Span) bool { + return IsMeasuredMetrics(s.Metrics) +} + +// IsMeasuredMetrics returns true if a span should be measured (i.e., it should get trace metrics calculated). +func IsMeasuredMetrics(metrics map[string]float64) bool { + return metrics[measuredKey] == 1 +} + +// IsPartialSnapshot returns true if the span is a partial snapshot. +// This kind of spans are partial images of long-running spans. +// When incomplete, a partial snapshot has a metric _dd.partial_version which is a positive integer. +// The metric usually increases each time a new version of the same span is sent by the tracer +func IsPartialSnapshot(s *pb.Span) bool { + return IsPartialSnapshotMetrics(s.Metrics) +} + +// IsPartialSnapshotMetrics returns true if the span is a partial snapshot. +// These kinds of spans are partial images of long-running spans. +// When incomplete, a partial snapshot has a metric _dd.partial_version which is a positive integer. +// The metric usually increases each time a new version of the same span is sent by the tracer +func IsPartialSnapshotMetrics(metrics map[string]float64) bool { + v, ok := metrics[partialVersionKey] + return ok && v >= 0 +} + +// SetTopLevel sets the top-level attribute of the span. +func SetTopLevel(s *pb.Span, topLevel bool) { + if !topLevel { + if s.Metrics == nil { + return + } + delete(s.Metrics, topLevelKey) + return + } + // Setting the metrics value, so that code downstream in the pipeline + // can identify this as top-level without recomputing everything. + SetMetric(s, topLevelKey, 1) +} + +// SetMeasured sets the measured attribute of the span. +func SetMeasured(s *pb.Span, measured bool) { + if !measured { + if s.Metrics == nil { + return + } + delete(s.Metrics, measuredKey) + return + } + // Setting the metrics value, so that code downstream in the pipeline + // can identify this as top-level without recomputing everything. + SetMetric(s, measuredKey, 1) +} + +// SetMetric sets the metric at key to the val on the span s. +func SetMetric(s *pb.Span, key string, val float64) { + if s.Metrics == nil { + s.Metrics = make(map[string]float64) + } + s.Metrics[key] = val +} + +// SetMeta sets the metadata at key to the val on the span s. +func SetMeta(s *pb.Span, key, val string) { + if s.Meta == nil { + s.Meta = make(map[string]string) + } + s.Meta[key] = val +} + +// GetMeta gets the metadata value in the span Meta map. +func GetMeta(s *pb.Span, key string) (string, bool) { + if s.Meta == nil { + return "", false + } + val, ok := s.Meta[key] + return val, ok +} + +// GetMetaDefault gets the metadata value in the span Meta map and fallbacks to fallback. +func GetMetaDefault(s *pb.Span, key, fallback string) string { + if s.Meta == nil { + return fallback + } + if val, ok := s.Meta[key]; ok { + return val + } + return fallback +} + +// SetMetaStruct sets the structured metadata at key to the val on the span s. +func SetMetaStruct(s *pb.Span, key string, val interface{}) error { + var b bytes.Buffer + + if s.MetaStruct == nil { + s.MetaStruct = make(map[string][]byte) + } + writer := msgp.NewWriter(&b) + err := writer.WriteIntf(val) + if err != nil { + return err + } + writer.Flush() + s.MetaStruct[key] = b.Bytes() + return nil +} + +// GetMetaStruct gets the structured metadata value in the span MetaStruct map. +func GetMetaStruct(s *pb.Span, key string) (interface{}, bool) { + if s.MetaStruct == nil { + return nil, false + } + if rawVal, ok := s.MetaStruct[key]; ok { + val, _, err := msgp.ReadIntfBytes(rawVal) + if err != nil { + ok = false + } + return val, ok + } + return nil, false +} + +// GetMetric gets the metadata value in the span Metrics map. +func GetMetric(s *pb.Span, key string) (float64, bool) { + if s.Metrics == nil { + return 0, false + } + val, ok := s.Metrics[key] + return val, ok +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/trace.go new file mode 100644 index 00000000..ef8a0f29 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/trace.go @@ -0,0 +1,119 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package traceutil + +import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/log" +) + +const ( + envKey = "env" +) + +// GetEnv returns the first "env" tag found in trace t. +// Search starts by root +func GetEnv(root *pb.Span, t *pb.TraceChunk) string { + if v, ok := root.Meta[envKey]; ok { + return v + } + for _, s := range t.Spans { + if s.SpanID == root.SpanID { + continue + } + if v, ok := s.Meta[envKey]; ok { + return v + } + } + return "" +} + +// GetRoot extracts the root span from a trace +func GetRoot(t pb.Trace) *pb.Span { + // That should be caught beforehand + if len(t) == 0 { + return nil + } + // General case: go over all spans and check for one which matching parent + parentIDToChild := map[uint64]*pb.Span{} + + for i := range t { + // Common case optimization: check for span with ParentID == 0, starting from the end, + // since some clients report the root last + j := len(t) - 1 - i + if t[j].ParentID == 0 { + return t[j] + } + parentIDToChild[t[j].ParentID] = t[j] + } + + for i := range t { + delete(parentIDToChild, t[i].SpanID) + } + + // Here, if the trace is valid, we should have len(parentIDToChild) == 1 + if len(parentIDToChild) != 1 { + log.Debugf("Didn't reliably find the root span for traceID:%v", t[0].TraceID) + } + + // Have a safe behavior if that's not the case + // Pick the first span without its parent + for parentID := range parentIDToChild { + return parentIDToChild[parentID] + } + + // Gracefully fail with the last span of the trace + return t[len(t)-1] +} + +// ChildrenMap returns a map containing for each span id the list of its +// direct children. +func ChildrenMap(t pb.Trace) map[uint64][]*pb.Span { + childrenMap := make(map[uint64][]*pb.Span) + + for i := range t { + span := t[i] + if span.ParentID == 0 { + continue + } + childrenMap[span.ParentID] = append(childrenMap[span.ParentID], span) + } + + return childrenMap +} + +// ComputeTopLevel updates all the spans top-level attribute. +// +// A span is considered top-level if: +// - it's a root span +// - OR its parent is unknown (other part of the code, distributed trace) +// - OR its parent belongs to another service (in that case it's a "local root" +// being the highest ancestor of other spans belonging to this service and +// attached to it). +func ComputeTopLevel(trace pb.Trace) { + spanIDToIndex := make(map[uint64]int, len(trace)) + for i, span := range trace { + spanIDToIndex[span.SpanID] = i + } + for _, span := range trace { + if span.ParentID == 0 { + // span is a root span + SetTopLevel(span, true) + continue + } + parentIndex, ok := spanIDToIndex[span.ParentID] + if !ok { + // span has no parent in chunk + SetTopLevel(span, true) + continue + } + if trace[parentIndex].Service != span.Service { + // parent is not in the same service + SetTopLevel(span, true) + continue + } + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/transform/obfuscate.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/transform/obfuscate.go new file mode 100644 index 00000000..3d49a461 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/transform/obfuscate.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package transform + +import ( + "github.com/DataDog/datadog-agent/pkg/obfuscate" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" +) + +const ( + // TagRedisRawCommand represents a redis raw command tag + TagRedisRawCommand = "redis.raw_command" + // TagValkeyRawCommand represents a redis raw command tag + TagValkeyRawCommand = "valkey.raw_command" + // TagMemcachedCommand represents a memcached command tag + TagMemcachedCommand = "memcached.command" + // TagMongoDBQuery represents a MongoDB query tag + TagMongoDBQuery = "mongodb.query" + // TagElasticBody represents an Elasticsearch body tag + TagElasticBody = "elasticsearch.body" + // TagOpenSearchBody represents an OpenSearch body tag + TagOpenSearchBody = "opensearch.body" + // TagSQLQuery represents a SQL query tag + TagSQLQuery = "sql.query" + // TagHTTPURL represents an HTTP URL tag + TagHTTPURL = "http.url" + // TagDBMS represents a DBMS tag + TagDBMS = "db.type" +) + +const ( + // TextNonParsable is the error text used when a query is non-parsable + TextNonParsable = "Non-parsable SQL query" +) + +// ObfuscateSQLSpan obfuscates a SQL span using pkg/obfuscate logic +func ObfuscateSQLSpan(o *obfuscate.Obfuscator, span *pb.Span) (*obfuscate.ObfuscatedQuery, error) { + if span.Resource == "" { + return nil, nil + } + oq, err := o.ObfuscateSQLStringForDBMS(span.Resource, span.Meta[TagDBMS]) + if err != nil { + // we have an error, discard the SQL to avoid polluting user resources. + span.Resource = TextNonParsable + traceutil.SetMeta(span, TagSQLQuery, TextNonParsable) + return nil, err + } + span.Resource = oq.Query + if len(oq.Metadata.TablesCSV) > 0 { + traceutil.SetMeta(span, "sql.tables", oq.Metadata.TablesCSV) + } + traceutil.SetMeta(span, TagSQLQuery, oq.Query) + return oq, nil +} + +// ObfuscateRedisSpan obfuscates a Redis span using pkg/obfuscate logic +func ObfuscateRedisSpan(o *obfuscate.Obfuscator, span *pb.Span, removeAllArgs bool) { + if span.Meta == nil || span.Meta[TagRedisRawCommand] == "" { + return + } + if removeAllArgs { + span.Meta[TagRedisRawCommand] = o.RemoveAllRedisArgs(span.Meta[TagRedisRawCommand]) + return + } + span.Meta[TagRedisRawCommand] = o.ObfuscateRedisString(span.Meta[TagRedisRawCommand]) +} + +// ObfuscateValkeySpan obfuscates a Valkey span using pkg/obfuscate logic +func ObfuscateValkeySpan(o *obfuscate.Obfuscator, span *pb.Span, removeAllArgs bool) { + if span.Meta == nil || span.Meta[TagValkeyRawCommand] == "" { + return + } + if removeAllArgs { + span.Meta[TagValkeyRawCommand] = o.RemoveAllRedisArgs(span.Meta[TagValkeyRawCommand]) + return + } + span.Meta[TagValkeyRawCommand] = o.ObfuscateRedisString(span.Meta[TagValkeyRawCommand]) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/transform/transform.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/transform/transform.go new file mode 100644 index 00000000..7fbc57fb --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/transform/transform.go @@ -0,0 +1,550 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package transform implements mappings from OTLP to DD semantics, and helpers +package transform + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "strconv" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" + semconv "go.opentelemetry.io/collector/semconv/v1.6.1" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/config" + "github.com/DataDog/datadog-agent/pkg/trace/sampler" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" +) + +const ( + // KeyDatadogService is the key for the service name in the Datadog namespace + KeyDatadogService = "datadog.service" + // KeyDatadogName is the key for the operation name in the Datadog namespace + KeyDatadogName = "datadog.name" + // KeyDatadogResource is the key for the resource name in the Datadog namespace + KeyDatadogResource = "datadog.resource" + // KeyDatadogSpanKind is the key for the span kind in the Datadog namespace + KeyDatadogSpanKind = "datadog.span.kind" + // KeyDatadogType is the key for the span type in the Datadog namespace + KeyDatadogType = "datadog.type" + // KeyDatadogError is the key for the error flag in the Datadog namespace + KeyDatadogError = "datadog.error" + // KeyDatadogErrorMsg is the key for the error message in the Datadog namespace + KeyDatadogErrorMsg = "datadog.error.msg" + // KeyDatadogErrorType is the key for the error type in the Datadog namespace + KeyDatadogErrorType = "datadog.error.type" + // KeyDatadogErrorStack is the key for the error stack in the Datadog namespace + KeyDatadogErrorStack = "datadog.error.stack" + // KeyDatadogVersion is the key for the version in the Datadog namespace + KeyDatadogVersion = "datadog.version" + // KeyDatadogHTTPStatusCode is the key for the HTTP status code in the Datadog namespace + KeyDatadogHTTPStatusCode = "datadog.http_status_code" + // KeyDatadogHost is the key for the host in the Datadog namespace + KeyDatadogHost = "datadog.host" + // KeyDatadogEnvironment is the key for the environment in the Datadog namespace + KeyDatadogEnvironment = "datadog.env" + // KeyDatadogContainerID is the key for the container ID in the Datadog namespace + KeyDatadogContainerID = "datadog.container_id" + // KeyDatadogContainerTags is the key for the container tags in the Datadog namespace + KeyDatadogContainerTags = "datadog.container_tags" +) + +// OperationAndResourceNameV2Enabled checks if the new operation and resource name logic should be used +func OperationAndResourceNameV2Enabled(conf *config.AgentConfig) bool { + return !conf.OTLPReceiver.SpanNameAsResourceName && len(conf.OTLPReceiver.SpanNameRemappings) == 0 && !conf.HasFeature("disable_operation_and_resource_name_logic_v2") +} + +// OtelSpanToDDSpanMinimal otelSpanToDDSpan converts an OTel span to a DD span. +// The converted DD span only has the minimal number of fields for APM stats calculation and is only meant +// to be used in OTLPTracesToConcentratorInputs. Do not use them for other purposes. +func OtelSpanToDDSpanMinimal( + otelspan ptrace.Span, + otelres pcommon.Resource, + lib pcommon.InstrumentationScope, + isTopLevel, topLevelByKind bool, + conf *config.AgentConfig, + peerTagKeys []string, +) *pb.Span { + spanKind := otelspan.Kind() + + ddspan := &pb.Span{ + Service: traceutil.GetOTelAttrVal(otelspan.Attributes(), true, KeyDatadogService), + Name: traceutil.GetOTelAttrVal(otelspan.Attributes(), true, KeyDatadogName), + Resource: traceutil.GetOTelAttrVal(otelspan.Attributes(), true, KeyDatadogResource), + Type: traceutil.GetOTelAttrVal(otelspan.Attributes(), true, KeyDatadogType), + TraceID: traceutil.OTelTraceIDToUint64(otelspan.TraceID()), + SpanID: traceutil.OTelSpanIDToUint64(otelspan.SpanID()), + ParentID: traceutil.OTelSpanIDToUint64(otelspan.ParentSpanID()), + Start: int64(otelspan.StartTimestamp()), + Duration: int64(otelspan.EndTimestamp()) - int64(otelspan.StartTimestamp()), + Meta: make(map[string]string, otelspan.Attributes().Len()+otelres.Attributes().Len()), + Metrics: make(map[string]float64), + } + if isErrorVal, ok := otelspan.Attributes().Get(KeyDatadogError); ok { + ddspan.Error = int32(isErrorVal.Int()) + } else { + if otelspan.Status().Code() == ptrace.StatusCodeError { + ddspan.Error = 1 + } + } + + if incomingSpanKindName := traceutil.GetOTelAttrVal(otelspan.Attributes(), true, KeyDatadogSpanKind); incomingSpanKindName != "" { + ddspan.Meta["span.kind"] = incomingSpanKindName + } + + if !conf.OTLPReceiver.IgnoreMissingDatadogFields { + if ddspan.Service == "" { + ddspan.Service = traceutil.GetOTelService(otelres, true) + } + + if OperationAndResourceNameV2Enabled(conf) { + if ddspan.Name == "" { + ddspan.Name = traceutil.GetOTelOperationNameV2(otelspan) + } + if ddspan.Resource == "" { + ddspan.Resource = traceutil.GetOTelResourceV2(otelspan, otelres) + } + } else { + if ddspan.Name == "" { + ddspan.Name = traceutil.GetOTelOperationNameV1(otelspan, otelres, lib, conf.OTLPReceiver.SpanNameAsResourceName, conf.OTLPReceiver.SpanNameRemappings, true) + } + if ddspan.Resource == "" { + ddspan.Resource = traceutil.GetOTelResourceV1(otelspan, otelres) + } + } + + if ddspan.Type == "" { + // correct span type logic if using new resource receiver, keep same if on v1. separate from OperationAndResourceNameV2Enabled. + if !conf.HasFeature("disable_receive_resource_spans_v2") { + ddspan.Type = traceutil.GetOTelSpanType(otelspan, otelres) + } else { + ddspan.Type = traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, "span.type") + if ddspan.Type == "" { + ddspan.Type = traceutil.SpanKind2Type(otelspan, otelres) + } + } + } + + if !spanMetaHasKey(ddspan, "span.kind") { + ddspan.Meta["span.kind"] = traceutil.OTelSpanKindName(spanKind) + } + var code uint32 + if incomingCode, ok := otelspan.Attributes().Get(KeyDatadogHTTPStatusCode); ok { + code = uint32(incomingCode.Int()) + } else { + code = traceutil.GetOTelStatusCode(otelspan) + } + if code != 0 { + ddspan.Metrics[traceutil.TagStatusCode] = float64(code) + } + } + if isTopLevel { + traceutil.SetTopLevel(ddspan, true) + } + if isMeasured := traceutil.GetOTelAttrVal(otelspan.Attributes(), false, "_dd.measured"); isMeasured == "1" { + traceutil.SetMeasured(ddspan, true) + } else if topLevelByKind && (spanKind == ptrace.SpanKindClient || spanKind == ptrace.SpanKindProducer) { + // When enable_otlp_compute_top_level_by_span_kind is true, compute stats for client-side spans + traceutil.SetMeasured(ddspan, true) + } + for _, peerTagKey := range peerTagKeys { + if peerTagVal := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, false, peerTagKey); peerTagVal != "" { + ddspan.Meta[peerTagKey] = peerTagVal + } + } + return ddspan +} + +func isDatadogAPMConventionKey(k string) bool { + return k == "service.name" || k == "operation.name" || k == "resource.name" || k == "span.type" || strings.HasPrefix(k, "datadog.") +} + +// GetDDKeyForOTLPAttribute looks for a key in the Datadog HTTP convention that matches the given key from the +// OTLP HTTP convention. Otherwise, check if it is a Datadog APM convention key - if it is, it will be handled with +// specialized logic elsewhere, so return an empty string. If it isn't, return the original key. +func GetDDKeyForOTLPAttribute(k string) string { + mappedKey, found := attributes.HTTPMappings[k] + switch { + case found: + break + case strings.HasPrefix(k, "http.request.header."): + mappedKey = fmt.Sprintf("http.request.headers.%s", strings.TrimPrefix(k, "http.request.header.")) + case !isDatadogAPMConventionKey(k): + mappedKey = k + default: + return "" + } + return mappedKey +} + +func setMetaOTLPWithSemConvMappings(k string, value string, ddspan *pb.Span, ignoreMissingDatadogFields bool) { + mappedKey := GetDDKeyForOTLPAttribute(k) + // Exclude Datadog APM conventions. + // These are handled above explicitly. + if mappedKey != "" { + if _, ok := metaKeysToDDSemanticsKeys[mappedKey]; ok { + if ddspan.Meta[mappedKey] != "" || ignoreMissingDatadogFields { + return + } + } + SetMetaOTLP(ddspan, mappedKey, value) + } +} + +func setMetricOTLPWithSemConvMappings(k string, value float64, ddspan *pb.Span, ignoreMissingDatadogFields bool) { + mappedKey := GetDDKeyForOTLPAttribute(k) + // Exclude Datadog APM conventions. + // These are handled above explicitly. + if mappedKey != "" { + if _, ok := metaKeysToDDSemanticsKeys[mappedKey]; ok { + if _, ok := ddspan.Metrics[mappedKey]; ok || ignoreMissingDatadogFields { + return + } + } + SetMetricOTLP(ddspan, mappedKey, value) + } +} + +var ddSemanticsKeysToMetaKeys = map[string]string{ + KeyDatadogEnvironment: "env", + KeyDatadogVersion: "version", + KeyDatadogHTTPStatusCode: "http.status_code", + KeyDatadogErrorMsg: "error.msg", + KeyDatadogErrorType: "error.type", + KeyDatadogErrorStack: "error.stack", +} + +var metaKeysToDDSemanticsKeys = map[string]string{ + "env": KeyDatadogEnvironment, + "version": KeyDatadogVersion, + "http.status_code": KeyDatadogHTTPStatusCode, + "error.msg": KeyDatadogErrorMsg, + "error.type": KeyDatadogErrorType, + "error.stack": KeyDatadogErrorStack, +} + +// OtelSpanToDDSpan converts an OTel span to a DD span. +func OtelSpanToDDSpan( + otelspan ptrace.Span, + otelres pcommon.Resource, + lib pcommon.InstrumentationScope, + conf *config.AgentConfig, +) *pb.Span { + spanKind := otelspan.Kind() + topLevelByKind := conf.HasFeature("enable_otlp_compute_top_level_by_span_kind") + isTopLevel := false + if topLevelByKind { + isTopLevel = otelspan.ParentSpanID() == pcommon.NewSpanIDEmpty() || spanKind == ptrace.SpanKindServer || spanKind == ptrace.SpanKindConsumer + } + ddspan := OtelSpanToDDSpanMinimal(otelspan, otelres, lib, isTopLevel, topLevelByKind, conf, nil) + + for ddSemanticKey, ddSpanMetaKey := range ddSemanticsKeysToMetaKeys { + if incomingValue := traceutil.GetOTelAttrVal(otelspan.Attributes(), false, ddSemanticKey); incomingValue != "" { + ddspan.Meta[ddSpanMetaKey] = incomingValue + } + } + + otelres.Attributes().Range(func(k string, v pcommon.Value) bool { + value := v.AsString() + setMetaOTLPWithSemConvMappings(k, value, ddspan, conf.OTLPReceiver.IgnoreMissingDatadogFields) + return true + }) + + for k, v := range lib.Attributes().Range { + ddspan.Meta[k] = v.AsString() + } + + traceID := otelspan.TraceID() + ddspan.Meta["otel.trace_id"] = hex.EncodeToString(traceID[:]) + if !spanMetaHasKey(ddspan, "version") { + if serviceVersion, ok := otelres.Attributes().Get(semconv.AttributeServiceVersion); ok { + ddspan.Meta["version"] = serviceVersion.AsString() + } + } + + if otelspan.Events().Len() > 0 { + ddspan.Meta["events"] = MarshalEvents(otelspan.Events()) + } + TagSpanIfContainsExceptionEvent(otelspan, ddspan) + if otelspan.Links().Len() > 0 { + ddspan.Meta["_dd.span_links"] = MarshalLinks(otelspan.Links()) + } + + otelspan.Attributes().Range(func(k string, v pcommon.Value) bool { + if strings.HasPrefix(k, "datadog.") { + return true + } + switch v.Type() { + case pcommon.ValueTypeDouble: + setMetricOTLPWithSemConvMappings(k, v.Double(), ddspan, conf.OTLPReceiver.IgnoreMissingDatadogFields) + case pcommon.ValueTypeInt: + setMetricOTLPWithSemConvMappings(k, float64(v.Int()), ddspan, conf.OTLPReceiver.IgnoreMissingDatadogFields) + default: + setMetaOTLPWithSemConvMappings(k, v.AsString(), ddspan, conf.OTLPReceiver.IgnoreMissingDatadogFields) + } + + return true + }) + + if otelspan.TraceState().AsRaw() != "" { + ddspan.Meta["w3c.tracestate"] = otelspan.TraceState().AsRaw() + } + if lib.Name() != "" { + ddspan.Meta[semconv.OtelLibraryName] = lib.Name() + } + if lib.Version() != "" { + ddspan.Meta[semconv.OtelLibraryVersion] = lib.Version() + } + ddspan.Meta[semconv.OtelStatusCode] = otelspan.Status().Code().String() + if msg := otelspan.Status().Message(); msg != "" { + ddspan.Meta[semconv.OtelStatusDescription] = msg + } + + if !conf.OTLPReceiver.IgnoreMissingDatadogFields { + if !spanMetaHasKey(ddspan, "error.msg") || !spanMetaHasKey(ddspan, "error.type") || !spanMetaHasKey(ddspan, "error.stack") { + ddspan.Error = Status2Error(otelspan.Status(), otelspan.Events(), ddspan.Meta) + } + + if !spanMetaHasKey(ddspan, "env") { + if env := traceutil.GetOTelEnv(otelres); env != "" { + ddspan.Meta["env"] = env + } + } + } + + return ddspan +} + +// TagSpanIfContainsExceptionEvent tags spans that contain at least on exception span event. +func TagSpanIfContainsExceptionEvent(otelspan ptrace.Span, ddspan *pb.Span) { + for i := range otelspan.Events().Len() { + if otelspan.Events().At(i).Name() == "exception" { + ddspan.Meta["_dd.span_events.has_exception"] = "true" + return + } + } +} + +// MarshalEvents marshals events into JSON. +func MarshalEvents(events ptrace.SpanEventSlice) string { + var str strings.Builder + str.WriteString("[") + for i := 0; i < events.Len(); i++ { + e := events.At(i) + if i > 0 { + str.WriteString(",") + } + var wrote bool + str.WriteString("{") + if v := e.Timestamp(); v != 0 { + str.WriteString(`"time_unix_nano":`) + str.WriteString(strconv.FormatUint(uint64(v), 10)) + wrote = true + } + if v := e.Name(); v != "" { + if wrote { + str.WriteString(",") + } + str.WriteString(`"name":`) + if name, err := json.Marshal(v); err == nil { + str.WriteString(string(name)) + } else { + // still collect the event information, if possible + log.Errorf("Error parsing span event name %v, using name 'redacted' instead", name) + str.WriteString(`"redacted"`) + } + wrote = true + } + if e.Attributes().Len() > 0 { + if wrote { + str.WriteString(",") + } + str.WriteString(`"attributes":{`) + j := 0 + e.Attributes().Range(func(k string, v pcommon.Value) bool { + // collect the attribute only if the key is json-parseable, else drop the attribute + if key, err := json.Marshal(k); err == nil { + if j > 0 { + str.WriteString(",") + } + str.WriteString(string(key)) + str.WriteString(":") + if val, err := json.Marshal(v.AsRaw()); err == nil { + str.WriteString(string(val)) + } else { + log.Warnf("Trouble parsing the following attribute value, dropping: %v", v.AsString()) + str.WriteString(`"redacted"`) + } + j++ + } else { + log.Errorf("Error parsing the following attribute key on span event %v, dropping attribute: %v", e.Name(), k) + e.SetDroppedAttributesCount(e.DroppedAttributesCount() + 1) + } + j++ + return true + }) + str.WriteString("}") + wrote = true + } + if v := e.DroppedAttributesCount(); v != 0 { + if wrote { + str.WriteString(",") + } + str.WriteString(`"dropped_attributes_count":`) + str.WriteString(strconv.FormatUint(uint64(v), 10)) + } + str.WriteString("}") + } + str.WriteString("]") + return str.String() +} + +// MarshalLinks marshals span links into JSON. +func MarshalLinks(links ptrace.SpanLinkSlice) string { + var str strings.Builder + str.WriteString("[") + for i := 0; i < links.Len(); i++ { + l := links.At(i) + if i > 0 { + str.WriteString(",") + } + t := l.TraceID() + str.WriteString(`{"trace_id":"`) + str.WriteString(hex.EncodeToString(t[:])) + s := l.SpanID() + str.WriteString(`","span_id":"`) + str.WriteString(hex.EncodeToString(s[:])) + str.WriteString(`"`) + if ts := l.TraceState().AsRaw(); len(ts) > 0 { + str.WriteString(`,"tracestate":"`) + str.WriteString(ts) + str.WriteString(`"`) + } + if l.Attributes().Len() > 0 { + str.WriteString(`,"attributes":{`) + var b bool + l.Attributes().Range(func(k string, v pcommon.Value) bool { + if b { + str.WriteString(",") + } + b = true + str.WriteString(`"`) + str.WriteString(k) + str.WriteString(`":"`) + str.WriteString(v.AsString()) + str.WriteString(`"`) + return true + }) + str.WriteString("}") + } + if l.DroppedAttributesCount() > 0 { + str.WriteString(`,"dropped_attributes_count":`) + str.WriteString(strconv.FormatUint(uint64(l.DroppedAttributesCount()), 10)) + } + str.WriteString("}") + } + str.WriteString("]") + return str.String() +} + +// SetMetaOTLP sets the k/v OTLP attribute pair as a tag on span s. +func SetMetaOTLP(s *pb.Span, k, v string) { + switch k { + case "operation.name": + s.Name = v + case "service.name": + s.Service = v + case "resource.name": + s.Resource = v + case "span.type": + s.Type = v + case "analytics.event": + if v, err := strconv.ParseBool(v); err == nil { + if v { + s.Metrics[sampler.KeySamplingRateEventExtraction] = 1 + } else { + s.Metrics[sampler.KeySamplingRateEventExtraction] = 0 + } + } + default: + s.Meta[k] = v + } +} + +// SetMetricOTLP sets the k/v OTLP attribute pair as a metric on span s. +func SetMetricOTLP(s *pb.Span, k string, v float64) { + switch k { + case "sampling.priority": + s.Metrics["_sampling_priority_v1"] = v + default: + s.Metrics[k] = v + } +} + +// Status2Error checks the given status and events and applies any potential error and messages +// to the given span attributes. +func Status2Error(status ptrace.Status, events ptrace.SpanEventSlice, metaMap map[string]string) int32 { + if status.Code() != ptrace.StatusCodeError { + return 0 + } + for i := 0; i < events.Len(); i++ { + e := events.At(i) + if strings.ToLower(e.Name()) != "exception" { + continue + } + attrs := e.Attributes() + if v, ok := attrs.Get(semconv.AttributeExceptionMessage); ok { + metaMap["error.msg"] = v.AsString() + } + if v, ok := attrs.Get(semconv.AttributeExceptionType); ok { + metaMap["error.type"] = v.AsString() + } + if v, ok := attrs.Get(semconv.AttributeExceptionStacktrace); ok { + metaMap["error.stack"] = v.AsString() + } + } + if _, ok := metaMap["error.msg"]; !ok { + // no error message was extracted, find alternatives + if status.Message() != "" { + // use the status message + metaMap["error.msg"] = status.Message() + } else if _, httpcode := GetFirstFromMap(metaMap, "http.response.status_code", "http.status_code"); httpcode != "" { + // `http.status_code` was renamed to `http.response.status_code` in the HTTP stabilization from v1.23. + // See https://opentelemetry.io/docs/specs/semconv/http/migration-guide/#summary-of-changes + + // http.status_text was removed in spec v0.7.0 (https://github.com/open-telemetry/opentelemetry-specification/pull/972) + // TODO (OTEL-1791) Remove this and use a map from status code to status text. + if httptext, ok := metaMap["http.status_text"]; ok { + metaMap["error.msg"] = fmt.Sprintf("%s %s", httpcode, httptext) + } else { + metaMap["error.msg"] = httpcode + } + } + } + return 1 +} + +// GetFirstFromMap checks each key in the given keys in the map and returns the first key-value pair whose +// key matches, or empty strings if none matches. +func GetFirstFromMap(m map[string]string, keys ...string) (string, string) { + for _, key := range keys { + if val := m[key]; val != "" { + return key, val + } + } + return "", "" +} + +func spanMetaHasKey(s *pb.Span, k string) bool { + _, ok := s.Meta[k] + return ok +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/version/version.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/version/version.go new file mode 100644 index 00000000..3b246c54 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/version/version.go @@ -0,0 +1,66 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package version comprises functions that are used to retrieve *app* version data from incoming traces. +package version + +import ( + "strings" + + "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" +) + +const ( + versionField = "version" + gitCommitShaField = "_dd.git.commit.sha" + gitCommitShaTagPrefix = "git.commit.sha:" + imageTagPrefix = "image_tag:" +) + +// GetVersionDataFromContainerTags will return the git commit sha and image tag from container tags, if present. +func GetVersionDataFromContainerTags(cTags []string) (gitCommitSha, imageTag string) { + for _, t := range cTags { + if gitCommitSha == "" { + if sha, ok := strings.CutPrefix(t, gitCommitShaTagPrefix); ok { + gitCommitSha = sha + } + } + if imageTag == "" { + if image, ok := strings.CutPrefix(t, imageTagPrefix); ok { + imageTag = image + } + } + if gitCommitSha != "" && imageTag != "" { + break + } + } + return gitCommitSha, imageTag +} + +// GetGitCommitShaFromTrace returns the first "git_commit_sha" tag found in trace t. +func GetGitCommitShaFromTrace(root *trace.Span, t *trace.TraceChunk) string { + return searchTraceForField(root, t, gitCommitShaField) +} + +// GetAppVersionFromTrace returns the first "version" tag found in trace t. +// Search starts by root +func GetAppVersionFromTrace(root *trace.Span, t *trace.TraceChunk) string { + return searchTraceForField(root, t, versionField) +} + +func searchTraceForField(root *trace.Span, t *trace.TraceChunk, field string) string { + if v, ok := root.Meta[field]; ok { + return v + } + for _, s := range t.Spans { + if s.SpanID == root.SpanID { + continue + } + if v, ok := s.Meta[field]; ok { + return v + } + } + return "" +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu.go new file mode 100644 index 00000000..a2feda3f --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu.go @@ -0,0 +1,51 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows && !aix + +// Package watchdog monitors the trace-agent resource usage. +package watchdog + +import ( + "os" + "path/filepath" + "strconv" + + "github.com/DataDog/datadog-agent/pkg/trace/log" + "github.com/shirou/gopsutil/v4/process" +) + +func getpid() int { + // Based on gopsutil's HostProc https://github.com/shirou/gopsutil/blob/672e2518f2ce365ab8504c9f1a8038dc3ad09cf6/internal/common/common.go#L343-L345 + // This PID needs to match the one in the procfs that gopsutil is going to look in. + p := os.Getenv("HOST_PROC") + if p == "" { + p = "/proc" + } + self := filepath.Join(p, "self") + pidf, err := os.Readlink(self) + if err != nil { + log.Warnf("Failed to read pid from %s: %s. Falling back to os.Getpid", self, err) + return os.Getpid() + } + pid, err := strconv.Atoi(filepath.Base(pidf)) + if err != nil { + log.Warnf("Failed to parse pid from %s: %s. Falling back to os.Getpid", pidf, err) + return os.Getpid() + } + return pid +} + +func cpuTimeUser(pid int32) (float64, error) { + p, err := process.NewProcess(pid) + if err != nil { + return 0, err + } + times, err := p.Times() + if err != nil { + return 0, err + } + return times.User, nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_aix.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_aix.go new file mode 100644 index 00000000..318f098d --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_aix.go @@ -0,0 +1,111 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-2020 Datadog, Inc. + +package watchdog + +import ( + "encoding/binary" + "fmt" + "os" + "time" +) + +// From proc(5) on AIX 7.2 +// status +// Contains state information about the process and one of its +// representative thread. The file is formatted as a struct pstatus +// type containing the following members: +// +// uint32_t pr_flag; /* process flags from proc struct p_flag */ +// uint32_t pr_flag2; /* process flags from proc struct p_flag2 */ +// uint32_t pr_flags; /* /proc flags */ +// uint32_t pr_nlwp; /* number of threads in the process */ +// char pr_stat; /* process state from proc p_stat */ +// char pr_dmodel; /* data model for the process */ +// char pr__pad1[6]; /* reserved for future use */ +// pr_sigset_t pr_sigpend; /* set of process pending signals */ +// prptr64_t pr_brkbase; /* address of the process heap */ +// uint64_t pr_brksize; /* size of the process heap, in bytes */ +// prptr64_t pr_stkbase; /* address of the process stack */ +// uint64_t pr_stksize; /* size of the process stack, in bytes */ +// pid64_t pr_pid; /* process id */ +// pid64_t pr_ppid; /* parent process id */ +// pid64_t pr_pgid; /* process group id */ +// pid64_t pr_sid; /* session id */ +// struct pr_timestruc64_t pr_utime; /* process user cpu time */ +// struct pr_timestruc64_t pr_stime; /* process system cpu time */ +// struct pr_timestruc64_t pr_cutime; /* sum of children's user times */ +// struct pr_timestruc64_t pr_cstime; /* sum of children's system times */ +// pr_sigset_t pr_sigtrace; /* mask of traced signals */ +// fltset_t pr_flttrace; /* mask of traced hardware faults */ +// uint32_t pr_sysentry_offset; /* offset into pstatus file of sysset_t +// * identifying system calls traced on +// +// * entry. If 0, then no entry syscalls +// * are being traced. */ +// uint32_t pr_sysexit_offset; /* offset into pstatus file of sysset_t +// * identifying system calls traced on +// * exit. If 0, then no exit syscalls +// * are being traced. */ +// uint64_t pr__pad[8]; /* reserved for future use */ +// lwpstatus_t pr_lwp; /* "representative" thread status */ +// +// From /usr/include/sys/procfs.h +// typedef struct pr_sigset +// { +// uint64_t ss_set[4]; /* signal set */ +// } pr_sigset_t; +// +// typedef struct pr_timestruc64 +// { +// int64_t tv_sec; /* 64 bit time_t value */ +// int32_t tv_nsec; /* 32 bit suseconds_t value */ +// uint32_t __pad; /* reserved for future use */ +// } pr_timestruc64_t; +// +// typedef void * prptr64_t; +// +// The fields before the user cpu time (pr_utime) are: +// uint32_t pr_flag; 4 4 +// uint32_t pr_flag2; 4 8 +// uint32_t pr_flags; 4 12 +// uint32_t pr_nlwp; 4 16 +// char pr_stat; 1 17 +// char pr_dmodel; 1 18 +// char pr__pad1[6]; 6 24 +// pr_sigset_t pr_sigpend; (4 * 8) = 32 56 +// prptr64_t pr_brkbase; 8 64 +// uint64_t pr_brksize; 8 72 +// prptr64_t pr_stkbase; 8 80 +// uint64_t pr_stksize; 8 88 +// pid64_t pr_pid; 8 96 +// pid64_t pr_ppid; 8 104 +// pid64_t pr_pgid; 8 112 +// pid64_t pr_sid; 8 120 +// total: 120 +// followed by: +// struct pr_timestruc64_t pr_utime; /* process user cpu time */ + +func cpuTimeUser(pid int32) (float64, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/status", pid)) + if err != nil { + return 0, err + } + defer f.Close() + // As explained above, we will skip 120 bytes into the status file to locate the user CPU time. + f.Seek(120, os.SEEK_SET) + var ( + userSecs int64 + userNsecs int32 + ) + binary.Read(f, binary.BigEndian, &userSecs) + binary.Read(f, binary.BigEndian, &userNsecs) + time := float64(userSecs) + (float64(userNsecs) / float64(time.Second)) + return time, nil +} + +func getpid() int { + return os.Getpid() +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_windows.go new file mode 100644 index 00000000..6613e30c --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_windows.go @@ -0,0 +1,56 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package watchdog monitors the trace-agent resource usage. +package watchdog + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func getpid() int { + return os.Getpid() +} + +// this code was copied over from shirou/gopsutil/process because we can't import this package on Windows, +// due to its "wmi" dependency. + +func cpuTimeUser(pid int32) (float64, error) { + t, err := getProcessCPUTimes(pid) + if err != nil { + return 0, err + } + return float64(t.UserTime.HighDateTime)*429.4967296 + float64(t.UserTime.LowDateTime)*1e-7, nil +} + +type systemTimes struct { + CreateTime windows.Filetime + ExitTime windows.Filetime + KernelTime windows.Filetime + UserTime windows.Filetime +} + +func getProcessCPUTimes(pid int32) (systemTimes, error) { + var times systemTimes + + // PROCESS_QUERY_LIMITED_INFORMATION is 0x1000 + h, err := windows.OpenProcess(0x1000, false, uint32(pid)) + if err != nil { + return times, err + } + defer windows.CloseHandle(h) + + err = windows.GetProcessTimes( + windows.Handle(h), + ×.CreateTime, + ×.ExitTime, + ×.KernelTime, + ×.UserTime, + ) + + return times, err +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/info.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/info.go new file mode 100644 index 00000000..5522df7f --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/info.go @@ -0,0 +1,99 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package watchdog + +import ( + "runtime" + "sync" + "time" +) + +const ( + // cacheDelay should be long enough so that we don't poll the info + // too often and waste resources doing it, and also long enough + // so that it's not jittering (CPU can be volatile). + // OTOH it should be short enough to get up-to-date recent info. + cacheDelay = 20 * time.Second +) + +// CPUInfo contains basic CPU info +type CPUInfo struct { + // UserAvg is the average of the user CPU usage since last time + // it was polled. 0 means "not used at all" and 1 means "1 CPU was + // totally full for that period". So it might be greater than 1 if + // the process is monopolizing several cores. + UserAvg float64 +} + +// MemInfo contains basic memory info +type MemInfo struct { + // Alloc is the number of bytes allocated and not yet freed + // as described in runtime.MemStats.Alloc + Alloc uint64 +} + +// Info contains all the watchdog infos, to be published by expvar +type Info struct { + // CPU contains basic CPU info + CPU CPUInfo + // Mem contains basic Mem info + Mem MemInfo +} + +// CurrentInfo is used to query CPU and Mem info, it keeps data from +// the previous calls to calculate averages. It is not thread safe. +type CurrentInfo struct { + pid int32 + mu sync.Mutex + cacheDelay time.Duration + + lastCPUTime time.Time + lastCPUUser float64 + lastCPU CPUInfo +} + +// NewCurrentInfo creates a new CurrentInfo referring to the current running program. +func NewCurrentInfo() *CurrentInfo { + return &CurrentInfo{ + pid: int32(getpid()), + cacheDelay: cacheDelay, + } +} + +// CPU returns basic CPU info, or the previous valid CPU info and an error. +func (pi *CurrentInfo) CPU(now time.Time) (CPUInfo, error) { + pi.mu.Lock() + defer pi.mu.Unlock() + + dt := now.Sub(pi.lastCPUTime) + if dt <= pi.cacheDelay { + return pi.lastCPU, nil // don't query too often, cache a little bit + } + pi.lastCPUTime = now + + userTime, err := cpuTimeUser(pi.pid) + if err != nil { + return pi.lastCPU, err + } + + dua := userTime - pi.lastCPUUser + pi.lastCPUUser = userTime + if dua <= 0 { + pi.lastCPU.UserAvg = 0 // shouldn't happen, but make sure result is always > 0 + } else { + pi.lastCPU.UserAvg = float64(time.Second) * dua / float64(dt) + pi.lastCPUUser = userTime + } + + return pi.lastCPU, nil +} + +// Mem returns basic memory info. +func (pi *CurrentInfo) Mem() MemInfo { + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + return MemInfo{Alloc: ms.Alloc} +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/logonpanic.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/logonpanic.go new file mode 100644 index 00000000..a69934a5 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/logonpanic.go @@ -0,0 +1,49 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package watchdog + +import ( + "fmt" + "runtime" + + "github.com/DataDog/datadog-agent/pkg/trace/log" + + "github.com/DataDog/datadog-go/v5/statsd" +) + +const shortErrMsgLen = 17 // 20 char max with tailing "..." + +// shortMsg shortens the length of error message to avoid having high +// cardinality on "err:" tags +func shortErrMsg(msg string) string { + if len(msg) <= shortErrMsgLen { + return msg + } + return msg[:shortErrMsgLen] + "..." +} + +// LogOnPanic catches panics and logs them on the fly. It also flushes +// the log file, ensuring the message appears. Then it propagates the panic +// so that the program flow remains unchanged. +func LogOnPanic(statsd statsd.ClientInterface) { + if err := recover(); err != nil { + // Full print of the trace in the logs + buf := make([]byte, 4096) + length := runtime.Stack(buf, false) + stacktrace := string(buf[:length]) + errMsg := fmt.Sprintf("%v", err) + logMsg := "Unexpected panic: " + errMsg + "\n" + stacktrace + + _ = statsd.Gauge("datadog.trace_agent.panic", 1, []string{ + "err:" + shortErrMsg(errMsg), + }, 1) + + log.Error(logMsg) + log.Flush() + + panic(err) + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/LICENSE new file mode 100644 index 00000000..b370545b --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/LICENSE @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-present Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/klog_redirect.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/klog_redirect.go new file mode 100644 index 00000000..fad01af4 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/klog_redirect.go @@ -0,0 +1,59 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import "strings" + +// KlogRedirectLogger is used to redirect klog logs to datadog logs. klog is +// client-go's logger, logging to STDERR by default, which makes all severities +// into ERROR, along with the formatting just being off. To make the +// conversion, we set a KlogRedirectLogger as klog's output, and parse the severity +// and log message out of every log line. +// NOTE: on klog v2 this parsing is no longer necessary, as it allows us to use +// kSetLogger() instead of kSetOutputBySeverity(). unfortunately we +// still have some dependencies stuck on v1, so we keep the parsing. +type KlogRedirectLogger struct { + stackDepth int +} + +// NewKlogRedirectLogger creates a new KlogRedirectLogger with provided stack depth +func NewKlogRedirectLogger(stackDepth int) KlogRedirectLogger { + return KlogRedirectLogger{ + stackDepth: stackDepth, + } +} + +func (l KlogRedirectLogger) Write(b []byte) (int, error) { + // klog log lines have the following format: + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... + // so we parse L to decide in which level to log, and we try to find + // the ']' character, to ignore anything up to that point, as we don't + // care about the header outside of the log level. + + msg := string(b) + + i := strings.IndexByte(msg, ']') + if i >= 0 { + // if we find a ']', we ignore anything 2 positions from it + // (itself, plus a blank space) + msg = msg[i+2:] + } + + switch b[0] { + case 'I': + InfoStackDepth(l.stackDepth, msg) + case 'W': + _ = WarnStackDepth(l.stackDepth, msg) + case 'E': + _ = ErrorStackDepth(l.stackDepth, msg) + case 'F': + _ = CriticalStackDepth(l.stackDepth, msg) + default: + InfoStackDepth(l.stackDepth, msg) + } + + return 0, nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/levels.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/levels.go new file mode 100644 index 00000000..52e8dd99 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/levels.go @@ -0,0 +1,47 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import "github.com/cihub/seelog" + +// LogLevel is the type of log levels +// +//nolint:revive // keeping the original type name from seelog +type LogLevel seelog.LogLevel + +// Log levels +const ( + TraceLvl LogLevel = seelog.TraceLvl + DebugLvl LogLevel = seelog.DebugLvl + InfoLvl LogLevel = seelog.InfoLvl + WarnLvl LogLevel = seelog.WarnLvl + ErrorLvl LogLevel = seelog.ErrorLvl + CriticalLvl LogLevel = seelog.CriticalLvl + Off LogLevel = seelog.Off +) + +// Log level string representations +const ( + TraceStr = seelog.TraceStr + DebugStr = seelog.DebugStr + InfoStr = seelog.InfoStr + WarnStr = seelog.WarnStr + ErrorStr = seelog.ErrorStr + CriticalStr = seelog.CriticalStr + OffStr = seelog.OffStr +) + +func (level LogLevel) String() string { + return seelog.LogLevel(level).String() +} + +// LogLevelFromString returns a LogLevel from a string +// +//nolint:revive // keeping the original function name from seelog +func LogLevelFromString(levelStr string) (LogLevel, bool) { + level, ok := seelog.LogLevelFromString(levelStr) + return LogLevel(level), ok +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go new file mode 100644 index 00000000..da2043a4 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go @@ -0,0 +1,1045 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package log implements logging for the datadog agent. It wraps seelog, and +// supports logging to multiple destinations, buffering messages logged before +// setup, and scrubbing secrets from log messages. +// +// # Compatibility +// +// This module is exported and can be used outside of the datadog-agent +// repository, but is not designed as a general-purpose logging system. Its +// API may change incompatibly. +package log + +import ( + "bytes" + "errors" + "fmt" + "os" + "strings" + "sync" + + "go.uber.org/atomic" + + "github.com/DataDog/datadog-agent/pkg/util/scrubber" +) + +type loggerPointer struct { + atomic.Pointer[DatadogLogger] +} + +var ( + // Logger is the main DatadogLogger + logger loggerPointer + jmxLogger loggerPointer + + // This buffer holds log lines sent to the logger before its + // initialization. Even if initializing the logger is one of the first + // things the agent does, we still: load the conf, resolve secrets inside, + // compute the final proxy settings, ... + // + // This buffer should be very short lived. + logsBuffer = []func(){} + bufferMutex sync.Mutex + defaultStackDepth = 3 + + // for testing purposes + scrubBytesFunc = scrubber.ScrubBytes +) + +// DatadogLogger wrapper structure for seelog +type DatadogLogger struct { + inner LoggerInterface + level LogLevel + extra map[string]LoggerInterface + l sync.RWMutex +} + +/* +* Setup and initialization of the logger + */ + +// SetupLogger setup agent wide logger +func SetupLogger(i LoggerInterface, level string) { + logger.Store(setupCommonLogger(i, level)) + + // Flush the log entries logged before initialization now that the logger is initialized + bufferMutex.Lock() + defer bufferMutex.Unlock() + for _, logLine := range logsBuffer { + logLine() + } + logsBuffer = []func(){} +} + +func setupCommonLogger(i LoggerInterface, level string) *DatadogLogger { + l := &DatadogLogger{ + inner: i, + extra: make(map[string]LoggerInterface), + } + + lvl, ok := LogLevelFromString(level) + if !ok { + lvl = InfoLvl + } + l.level = LogLevel(lvl) + + // We're not going to call DatadogLogger directly, but using the + // exported functions, that will give us two frames in the stack + // trace that should be skipped to get to the original caller. + // + // The fact we need a constant "additional depth" means some + // theoretical refactor to avoid duplication in the functions + // below cannot be performed. + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth) + + return l +} + +func addLogToBuffer(logHandle func()) { + bufferMutex.Lock() + defer bufferMutex.Unlock() + + logsBuffer = append(logsBuffer, logHandle) +} + +func (sw *DatadogLogger) scrub(s string) string { + if scrubbed, err := scrubBytesFunc([]byte(s)); err == nil { + return string(scrubbed) + } + return s +} + +/* +* Operation on the **logger level** + */ + +// ChangeLogLevel changes the current log level, valid levels are trace, debug, +// info, warn, error, critical and off, it requires a new seelog logger because +// an existing one cannot be updated +func ChangeLogLevel(li LoggerInterface, level string) error { + if err := logger.changeLogLevel(level); err != nil { + return err + } + + // See detailed explanation in SetupLogger(...) + if err := li.SetAdditionalStackDepth(defaultStackDepth); err != nil { + return err + } + + logger.replaceInnerLogger(li) + return nil + + // need to return something, just set to Info (expected default) +} +func (sw *loggerPointer) changeLogLevel(level string) error { + l := sw.Load() + if l == nil { + return errors.New("cannot change loglevel: logger not initialized") + } + + l.l.Lock() + defer l.l.Unlock() + + if l.inner == nil { + return errors.New("cannot change loglevel: logger is initialized however logger.inner is nil") + } + + lvl, ok := LogLevelFromString(strings.ToLower(level)) + if !ok { + return errors.New("bad log level") + } + l.level = LogLevel(lvl) + return nil +} + +// GetLogLevel returns a seelog native representation of the current log level +func GetLogLevel() (LogLevel, error) { + return logger.getLogLevel() +} +func (sw *loggerPointer) getLogLevel() (LogLevel, error) { + l := sw.Load() + if l == nil { + return InfoLvl, errors.New("cannot get loglevel: logger not initialized") + } + + l.l.RLock() + defer l.l.RUnlock() + + if l.inner == nil { + return InfoLvl, errors.New("cannot get loglevel: logger not initialized") + } + + return l.level, nil +} + +// ShouldLog returns whether a given log level should be logged by the default logger +func ShouldLog(lvl LogLevel) bool { + // The lock stay in the exported function due to the use of `shouldLog` in function that already hold the lock + l := logger.Load() + if l != nil { + l.l.RLock() + defer l.l.RUnlock() + return l.shouldLog(lvl) + } + return false +} + +// This function should be called with `sw.l` held +func (sw *DatadogLogger) shouldLog(level LogLevel) bool { + return level >= sw.level +} + +// ValidateLogLevel validates the given log level and returns the corresponding Seelog log level. +// If the log level is "warning", it is converted to "warn" to handle a common gotcha when used with agent5. +// If the log level is not recognized, an error is returned. +func ValidateLogLevel(logLevel string) (string, error) { + seelogLogLevel := strings.ToLower(logLevel) + if seelogLogLevel == "warning" { // Common gotcha when used to agent5 + seelogLogLevel = "warn" + } + + if _, found := LogLevelFromString(seelogLogLevel); !found { + return "", fmt.Errorf("unknown log level: %s", seelogLogLevel) + } + return seelogLogLevel, nil +} + +/* +* Operation on the **logger** + */ + +// RegisterAdditionalLogger registers an additional logger for logging +func RegisterAdditionalLogger(n string, li LoggerInterface) error { + return logger.registerAdditionalLogger(n, li) +} +func (sw *loggerPointer) registerAdditionalLogger(n string, li LoggerInterface) error { + l := sw.Load() + if l == nil { + return errors.New("cannot register: logger not initialized") + } + + l.l.Lock() + defer l.l.Unlock() + + if l.inner == nil { + return errors.New("cannot register: logger not initialized") + } + + if l.extra == nil { + + return errors.New("logger not fully initialized, additional logging unavailable") + } + + if _, ok := l.extra[n]; ok { + return errors.New("logger already registered with that name") + } + l.extra[n] = li + + return nil +} + +// ReplaceLogger allows replacing the internal logger, returns old logger +func ReplaceLogger(li LoggerInterface) LoggerInterface { + return logger.replaceInnerLogger(li) +} +func (sw *loggerPointer) replaceInnerLogger(li LoggerInterface) LoggerInterface { + l := sw.Load() + if l == nil { + return nil // Return nil if logger is not initialized + } + + l.l.Lock() + defer l.l.Unlock() + + if l.inner == nil { + return nil // Return nil if logger.inner is not initialized + } + + old := l.inner + l.inner = li + + return old +} + +// Flush flushes the underlying inner log +func Flush() { + logger.flush() + jmxLogger.flush() +} +func (sw *loggerPointer) flush() { + l := sw.Load() + if l == nil { + return + } + + l.l.Lock() + defer l.l.Unlock() + + if l.inner != nil { + l.inner.Flush() + } +} + +/* +* log functions + */ + +// log logs a message at the given level, using either bufferFunc (if logging is not yet set up) or +// scrubAndLogFunc, and treating the variadic args as the message. +func log(logLevel LogLevel, bufferFunc func(), scrubAndLogFunc func(string), v ...interface{}) { + l := logger.Load() + + if l == nil { + addLogToBuffer(bufferFunc) + return + } + + l.l.Lock() + defer l.l.Unlock() + + if l.inner == nil { + addLogToBuffer(bufferFunc) + } else if l.shouldLog(logLevel) { + s := BuildLogEntry(v...) + scrubAndLogFunc(s) + } + +} +func logWithError(logLevel LogLevel, bufferFunc func(), scrubAndLogFunc func(string) error, fallbackStderr bool, v ...interface{}) error { + l := logger.Load() + + if l == nil { + addLogToBuffer(bufferFunc) + err := formatError(v...) + if fallbackStderr { + fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error()) + } + return err + } + + l.l.Lock() + + isInnerNil := l.inner == nil + + if isInnerNil { + if !fallbackStderr { + addLogToBuffer(bufferFunc) + } + } else if l.shouldLog(logLevel) { + defer l.l.Unlock() + s := BuildLogEntry(v...) + return scrubAndLogFunc(s) + } + + l.l.Unlock() + + err := formatError(v...) + // Originally (PR 6436) fallbackStderr check had been added to handle a small window + // where error messages had been lost before Logger had been initialized. Adjusting + // just for that case because if the error log should not be logged - because it has + // been suppressed then it should be taken into account. + if fallbackStderr && isInnerNil { + fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error()) + } + return err +} + +/* +* logFormat functions + */ + +func logFormat(logLevel LogLevel, bufferFunc func(), scrubAndLogFunc func(string, ...interface{}), format string, params ...interface{}) { + l := logger.Load() + + if l == nil { + addLogToBuffer(bufferFunc) + return + } + + l.l.Lock() + defer l.l.Unlock() + + if l.inner == nil { + addLogToBuffer(bufferFunc) + } else if l.shouldLog(logLevel) { + scrubAndLogFunc(format, params...) + } +} +func logFormatWithError(logLevel LogLevel, bufferFunc func(), scrubAndLogFunc func(string, ...interface{}) error, format string, fallbackStderr bool, params ...interface{}) error { + l := logger.Load() + + if l == nil { + addLogToBuffer(bufferFunc) + err := formatErrorf(format, params...) + if fallbackStderr { + fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error()) + } + return err + } + + l.l.Lock() + + isInnerNil := l.inner == nil + + if isInnerNil { + if !fallbackStderr { + addLogToBuffer(bufferFunc) + } + } else if l.shouldLog(logLevel) { + defer l.l.Unlock() + return scrubAndLogFunc(format, params...) + } + + l.l.Unlock() + + err := formatErrorf(format, params...) + // Originally (PR 6436) fallbackStderr check had been added to handle a small window + // where error messages had been lost before Logger had been initialized. Adjusting + // just for that case because if the error log should not be logged - because it has + // been suppressed then it should be taken into account. + if fallbackStderr && isInnerNil { + fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error()) + } + return err +} + +/* +* logContext functions + */ + +func logContext(logLevel LogLevel, bufferFunc func(), scrubAndLogFunc func(string), message string, depth int, context ...interface{}) { + l := logger.Load() + + if l == nil { + addLogToBuffer(bufferFunc) + return + } + + l.l.Lock() + defer l.l.Unlock() + + if l.inner == nil { + addLogToBuffer(bufferFunc) + } else if l.shouldLog(logLevel) { + l.inner.SetContext(context) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) + scrubAndLogFunc(message) + l.inner.SetContext(nil) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth) + } +} +func logContextWithError(logLevel LogLevel, bufferFunc func(), scrubAndLogFunc func(string) error, message string, fallbackStderr bool, depth int, context ...interface{}) error { + l := logger.Load() + + if l == nil { + addLogToBuffer(bufferFunc) + err := formatErrorc(message, context...) + if fallbackStderr { + fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error()) + } + return err + } + + l.l.Lock() + + isInnerNil := l.inner == nil + + if isInnerNil { + if !fallbackStderr { + addLogToBuffer(bufferFunc) + } + } else if l.shouldLog(logLevel) { + l.inner.SetContext(context) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) + err := scrubAndLogFunc(message) + l.inner.SetContext(nil) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth) + defer l.l.Unlock() + return err + } + + l.l.Unlock() + + err := formatErrorc(message, context...) + if fallbackStderr && isInnerNil { + fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error()) + } + return err +} + +// trace logs at the trace level, called with sw.l held +func (sw *loggerPointer) trace(s string) { + l := sw.Load() + + if l == nil { + return + } + + scrubbed := l.scrub(s) + l.inner.Trace(scrubbed) + + for _, l := range l.extra { + l.Trace(scrubbed) + } +} + +// trace logs at the trace level and the current stack depth plus the +// additional given one, called with sw.l held +func (sw *loggerPointer) traceStackDepth(s string, depth int) { + l := sw.Load() + scrubbed := l.scrub(s) + + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) + l.inner.Trace(scrubbed) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth) + + for _, l := range l.extra { + l.Trace(scrubbed) + } +} + +// debug logs at the debug level, called with sw.l held +func (sw *loggerPointer) debug(s string) { + l := sw.Load() + scrubbed := l.scrub(s) + l.inner.Debug(scrubbed) + + for _, l := range l.extra { + l.Debug(scrubbed) + } +} + +// debug logs at the debug level and the current stack depth plus the additional given one, called with sw.l held +func (sw *loggerPointer) debugStackDepth(s string, depth int) { + l := sw.Load() + scrubbed := l.scrub(s) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) + l.inner.Debug(scrubbed) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth) + + for _, l := range l.extra { + l.Debug(scrubbed) + } +} + +// info logs at the info level, called with sw.l held +func (sw *loggerPointer) info(s string) { + l := sw.Load() + scrubbed := l.scrub(s) + l.inner.Info(scrubbed) + for _, l := range l.extra { + l.Info(scrubbed) + } +} + +// info logs at the info level and the current stack depth plus the additional given one, called with sw.l held +func (sw *loggerPointer) infoStackDepth(s string, depth int) { + l := sw.Load() + scrubbed := l.scrub(s) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) + l.inner.Info(scrubbed) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth) + + for _, l := range l.extra { + l.Info(scrubbed) + } +} + +// warn logs at the warn level, called with sw.l held +func (sw *loggerPointer) warn(s string) error { + l := sw.Load() + scrubbed := l.scrub(s) + err := l.inner.Warn(scrubbed) + + for _, l := range l.extra { + _ = l.Warn(scrubbed) + } + + return err +} + +// error logs at the error level and the current stack depth plus the additional given one, called with sw.l held +func (sw *loggerPointer) warnStackDepth(s string, depth int) error { + l := sw.Load() + scrubbed := l.scrub(s) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) + err := l.inner.Warn(scrubbed) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth) + + for _, l := range l.extra { + _ = l.Warn(scrubbed) + } + + return err +} + +// error logs at the error level, called with sw.l held +func (sw *loggerPointer) error(s string) error { + l := sw.Load() + scrubbed := l.scrub(s) + err := l.inner.Error(scrubbed) + + for _, l := range l.extra { + _ = l.Error(scrubbed) + } + + return err +} + +// error logs at the error level and the current stack depth plus the additional given one, called with sw.l held +func (sw *loggerPointer) errorStackDepth(s string, depth int) error { + l := sw.Load() + scrubbed := l.scrub(s) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) + err := l.inner.Error(scrubbed) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth) + + for _, l := range l.extra { + _ = l.Error(scrubbed) + } + + return err +} + +// critical logs at the critical level, called with sw.l held +func (sw *loggerPointer) critical(s string) error { + l := sw.Load() + scrubbed := l.scrub(s) + err := l.inner.Critical(scrubbed) + + for _, l := range l.extra { + _ = l.Critical(scrubbed) + } + + return err +} + +// critical logs at the critical level and the current stack depth plus the additional given one, called with sw.l held +func (sw *loggerPointer) criticalStackDepth(s string, depth int) error { + l := sw.Load() + scrubbed := l.scrub(s) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) + err := l.inner.Critical(scrubbed) + _ = l.inner.SetAdditionalStackDepth(defaultStackDepth) + + for _, l := range l.extra { + _ = l.Critical(scrubbed) + } + + return err +} + +// tracef logs with format at the trace level, called with sw.l held +func (sw *loggerPointer) tracef(format string, params ...interface{}) { + l := sw.Load() + scrubbed := l.scrub(fmt.Sprintf(format, params...)) + l.inner.Trace(scrubbed) + + for _, l := range l.extra { + l.Trace(scrubbed) + } +} + +// debugf logs with format at the debug level, called with sw.l held +func (sw *loggerPointer) debugf(format string, params ...interface{}) { + l := sw.Load() + scrubbed := l.scrub(fmt.Sprintf(format, params...)) + l.inner.Debug(scrubbed) + + for _, l := range l.extra { + l.Debug(scrubbed) + } +} + +// infof logs with format at the info level, called with sw.l held +func (sw *loggerPointer) infof(format string, params ...interface{}) { + l := sw.Load() + scrubbed := l.scrub(fmt.Sprintf(format, params...)) + l.inner.Info(scrubbed) + + for _, l := range l.extra { + l.Info(scrubbed) + } +} + +// warnf logs with format at the warn level, called with sw.l held +func (sw *loggerPointer) warnf(format string, params ...interface{}) error { + l := sw.Load() + scrubbed := l.scrub(fmt.Sprintf(format, params...)) + err := l.inner.Warn(scrubbed) + + for _, l := range l.extra { + _ = l.Warn(scrubbed) + } + + return err +} + +// errorf logs with format at the error level, called with sw.l held +func (sw *loggerPointer) errorf(format string, params ...interface{}) error { + l := sw.Load() + scrubbed := l.scrub(fmt.Sprintf(format, params...)) + err := l.inner.Error(scrubbed) + + for _, l := range l.extra { + _ = l.Error(scrubbed) + } + + return err +} + +// criticalf logs with format at the critical level, called with sw.l held +func (sw *loggerPointer) criticalf(format string, params ...interface{}) error { + l := sw.Load() + scrubbed := l.scrub(fmt.Sprintf(format, params...)) + err := l.inner.Critical(scrubbed) + + for _, l := range l.extra { + _ = l.Critical(scrubbed) + } + + return err +} + +// BuildLogEntry concatenates all inputs with spaces +func BuildLogEntry(v ...interface{}) string { + var fmtBuffer bytes.Buffer + + for i := 0; i < len(v)-1; i++ { + fmtBuffer.WriteString("%v ") + } + fmtBuffer.WriteString("%v") + + return fmt.Sprintf(fmtBuffer.String(), v...) +} + +func scrubMessage(message string) string { + msgScrubbed, err := scrubBytesFunc([]byte(message)) + if err == nil { + return string(msgScrubbed) + } + return "[REDACTED] - failure to clean the message" +} + +func formatErrorf(format string, params ...interface{}) error { + msg := scrubMessage(fmt.Sprintf(format, params...)) + return errors.New(msg) +} + +func formatError(v ...interface{}) error { + msg := scrubMessage(fmt.Sprint(v...)) + return errors.New(msg) +} + +func formatErrorc(message string, context ...interface{}) error { + // Build a format string like this: + // message (%s:%v, %s:%v, ... %s:%v) + var fmtBuffer bytes.Buffer + fmtBuffer.WriteString(message) + if len(context) > 0 && len(context)%2 == 0 { + fmtBuffer.WriteString(" (") + for i := 0; i < len(context); i += 2 { + fmtBuffer.WriteString("%s:%v") + if i != len(context)-2 { + fmtBuffer.WriteString(", ") + } + } + fmtBuffer.WriteString(")") + } + + msg := fmt.Sprintf(fmtBuffer.String(), context...) + return errors.New(scrubMessage(msg)) +} + +// Trace logs at the trace level +func Trace(v ...interface{}) { + log(TraceLvl, func() { Trace(v...) }, logger.trace, v...) +} + +// Tracef logs with format at the trace level +func Tracef(format string, params ...interface{}) { + logFormat(TraceLvl, func() { Tracef(format, params...) }, logger.tracef, format, params...) +} + +// TracefStackDepth logs with format at the trace level and the current stack depth plus the given depth +func TracefStackDepth(depth int, format string, params ...interface{}) { + currentLevel, _ := GetLogLevel() + if currentLevel > TraceLvl { + return + } + msg := fmt.Sprintf(format, params...) + log(TraceLvl, func() { TraceStackDepth(depth, msg) }, func(s string) { + logger.traceStackDepth(s, depth) + }, msg) +} + +// TracecStackDepth logs at the trace level with context and the current stack depth plus the additional given one +func TracecStackDepth(message string, depth int, context ...interface{}) { + logContext(TraceLvl, func() { Tracec(message, context...) }, logger.trace, message, depth, context...) +} + +// Tracec logs at the trace level with context +func Tracec(message string, context ...interface{}) { + TracecStackDepth(message, 1, context...) +} + +// TraceFunc calls and logs the result of 'logFunc' if and only if Trace (or more verbose) logs are enabled +func TraceFunc(logFunc func() string) { + currentLevel, _ := GetLogLevel() + if currentLevel <= TraceLvl { + TraceStackDepth(2, logFunc()) + } +} + +// Debug logs at the debug level +func Debug(v ...interface{}) { + log(DebugLvl, func() { Debug(v...) }, logger.debug, v...) +} + +// Debugf logs with format at the debug level +func Debugf(format string, params ...interface{}) { + logFormat(DebugLvl, func() { Debugf(format, params...) }, logger.debugf, format, params...) +} + +// DebugfStackDepth logs with format at the debug level and the current stack depth plus the given depth +func DebugfStackDepth(depth int, format string, params ...interface{}) { + currentLevel, _ := GetLogLevel() + if currentLevel > DebugLvl { + return + } + msg := fmt.Sprintf(format, params...) + log(DebugLvl, func() { DebugStackDepth(depth, msg) }, func(s string) { + logger.debugStackDepth(s, depth) + }, msg) +} + +// DebugcStackDepth logs at the debug level with context and the current stack depth plus the additional given one +func DebugcStackDepth(message string, depth int, context ...interface{}) { + logContext(DebugLvl, func() { Debugc(message, context...) }, logger.debug, message, depth, context...) +} + +// Debugc logs at the debug level with context +func Debugc(message string, context ...interface{}) { + DebugcStackDepth(message, 1, context...) +} + +// DebugFunc calls and logs the result of 'logFunc' if and only if Debug (or more verbose) logs are enabled +func DebugFunc(logFunc func() string) { + currentLevel, _ := GetLogLevel() + if currentLevel <= DebugLvl { + DebugStackDepth(2, logFunc()) + } +} + +// Info logs at the info level +func Info(v ...interface{}) { + log(InfoLvl, func() { Info(v...) }, logger.info, v...) +} + +// Infof logs with format at the info level +func Infof(format string, params ...interface{}) { + logFormat(InfoLvl, func() { Infof(format, params...) }, logger.infof, format, params...) +} + +// InfofStackDepth logs with format at the info level and the current stack depth plus the given depth +func InfofStackDepth(depth int, format string, params ...interface{}) { + currentLevel, _ := GetLogLevel() + if currentLevel > InfoLvl { + return + } + msg := fmt.Sprintf(format, params...) + log(InfoLvl, func() { InfoStackDepth(depth, msg) }, func(s string) { + logger.infoStackDepth(s, depth) + }, msg) +} + +// InfocStackDepth logs at the info level with context and the current stack depth plus the additional given one +func InfocStackDepth(message string, depth int, context ...interface{}) { + logContext(InfoLvl, func() { Infoc(message, context...) }, logger.info, message, depth, context...) +} + +// Infoc logs at the info level with context +func Infoc(message string, context ...interface{}) { + InfocStackDepth(message, 1, context...) +} + +// InfoFunc calls and logs the result of 'logFunc' if and only if Info (or more verbose) logs are enabled +func InfoFunc(logFunc func() string) { + currentLevel, _ := GetLogLevel() + if currentLevel <= InfoLvl { + InfoStackDepth(2, logFunc()) + } +} + +// Warn logs at the warn level and returns an error containing the formated log message +func Warn(v ...interface{}) error { + return logWithError(WarnLvl, func() { _ = Warn(v...) }, logger.warn, false, v...) +} + +// Warnf logs with format at the warn level and returns an error containing the formated log message +func Warnf(format string, params ...interface{}) error { + return logFormatWithError(WarnLvl, func() { _ = Warnf(format, params...) }, logger.warnf, format, false, params...) +} + +// WarnfStackDepth logs with format at the warn level and the current stack depth plus the given depth +func WarnfStackDepth(depth int, format string, params ...interface{}) error { + msg := fmt.Sprintf(format, params...) + return logWithError(WarnLvl, func() { _ = WarnStackDepth(depth, msg) }, func(s string) error { + return logger.warnStackDepth(s, depth) + }, false, msg) +} + +// WarncStackDepth logs at the warn level with context and the current stack depth plus the additional given one and returns an error containing the formated log message +func WarncStackDepth(message string, depth int, context ...interface{}) error { + return logContextWithError(WarnLvl, func() { _ = Warnc(message, context...) }, logger.warn, message, false, depth, context...) +} + +// Warnc logs at the warn level with context and returns an error containing the formated log message +func Warnc(message string, context ...interface{}) error { + return WarncStackDepth(message, 1, context...) +} + +// WarnFunc calls and logs the result of 'logFunc' if and only if Warn (or more verbose) logs are enabled +func WarnFunc(logFunc func() string) { + currentLevel, _ := GetLogLevel() + if currentLevel <= WarnLvl { + _ = WarnStackDepth(2, logFunc()) + } +} + +// Error logs at the error level and returns an error containing the formated log message +func Error(v ...interface{}) error { + return logWithError(ErrorLvl, func() { _ = Error(v...) }, logger.error, true, v...) +} + +// Errorf logs with format at the error level and returns an error containing the formated log message +func Errorf(format string, params ...interface{}) error { + return logFormatWithError(ErrorLvl, func() { _ = Errorf(format, params...) }, logger.errorf, format, true, params...) +} + +// ErrorfStackDepth logs with format at the error level and the current stack depth plus the given depth +func ErrorfStackDepth(depth int, format string, params ...interface{}) error { + msg := fmt.Sprintf(format, params...) + return logWithError(ErrorLvl, func() { _ = ErrorStackDepth(depth, msg) }, func(s string) error { + return logger.errorStackDepth(s, depth) + }, true, msg) +} + +// ErrorcStackDepth logs at the error level with context and the current stack depth plus the additional given one and returns an error containing the formated log message +func ErrorcStackDepth(message string, depth int, context ...interface{}) error { + return logContextWithError(ErrorLvl, func() { _ = Errorc(message, context...) }, logger.error, message, true, depth, context...) +} + +// Errorc logs at the error level with context and returns an error containing the formated log message +func Errorc(message string, context ...interface{}) error { + return ErrorcStackDepth(message, 1, context...) +} + +// ErrorFunc calls and logs the result of 'logFunc' if and only if Error (or more verbose) logs are enabled +func ErrorFunc(logFunc func() string) { + currentLevel, _ := GetLogLevel() + if currentLevel <= ErrorLvl { + _ = ErrorStackDepth(2, logFunc()) + } +} + +// Critical logs at the critical level and returns an error containing the formated log message +func Critical(v ...interface{}) error { + return logWithError(CriticalLvl, func() { _ = Critical(v...) }, logger.critical, true, v...) +} + +// Criticalf logs with format at the critical level and returns an error containing the formated log message +func Criticalf(format string, params ...interface{}) error { + return logFormatWithError(CriticalLvl, func() { _ = Criticalf(format, params...) }, logger.criticalf, format, true, params...) +} + +// CriticalfStackDepth logs with format at the critical level and the current stack depth plus the given depth +func CriticalfStackDepth(depth int, format string, params ...interface{}) error { + msg := fmt.Sprintf(format, params...) + return logWithError(CriticalLvl, func() { _ = CriticalStackDepth(depth, msg) }, func(s string) error { + return logger.criticalStackDepth(s, depth) + }, false, msg) +} + +// CriticalcStackDepth logs at the critical level with context and the current stack depth plus the additional given one and returns an error containing the formated log message +func CriticalcStackDepth(message string, depth int, context ...interface{}) error { + return logContextWithError(CriticalLvl, func() { _ = Criticalc(message, context...) }, logger.critical, message, true, depth, context...) +} + +// Criticalc logs at the critical level with context and returns an error containing the formated log message +func Criticalc(message string, context ...interface{}) error { + return CriticalcStackDepth(message, 1, context...) +} + +// CriticalFunc calls and logs the result of 'logFunc' if and only if Critical (or more verbose) logs are enabled +func CriticalFunc(logFunc func() string) { + currentLevel, _ := GetLogLevel() + if currentLevel <= CriticalLvl { + _ = CriticalStackDepth(2, logFunc()) + } +} + +// InfoStackDepth logs at the info level and the current stack depth plus the additional given one +func InfoStackDepth(depth int, v ...interface{}) { + log(InfoLvl, func() { InfoStackDepth(depth, v...) }, func(s string) { + logger.infoStackDepth(s, depth) + }, v...) +} + +// WarnStackDepth logs at the warn level and the current stack depth plus the additional given one and returns an error containing the formated log message +func WarnStackDepth(depth int, v ...interface{}) error { + return logWithError(WarnLvl, func() { _ = WarnStackDepth(depth, v...) }, func(s string) error { + return logger.warnStackDepth(s, depth) + }, false, v...) +} + +// DebugStackDepth logs at the debug level and the current stack depth plus the additional given one and returns an error containing the formated log message +func DebugStackDepth(depth int, v ...interface{}) { + log(DebugLvl, func() { DebugStackDepth(depth, v...) }, func(s string) { + logger.debugStackDepth(s, depth) + }, v...) +} + +// TraceStackDepth logs at the trace level and the current stack depth plus the additional given one and returns an error containing the formated log message +func TraceStackDepth(depth int, v ...interface{}) { + log(TraceLvl, func() { TraceStackDepth(depth, v...) }, func(s string) { + logger.traceStackDepth(s, depth) + }, v...) +} + +// ErrorStackDepth logs at the error level and the current stack depth plus the additional given one and returns an error containing the formated log message +func ErrorStackDepth(depth int, v ...interface{}) error { + return logWithError(ErrorLvl, func() { _ = ErrorStackDepth(depth, v...) }, func(s string) error { + return logger.errorStackDepth(s, depth) + }, true, v...) +} + +// CriticalStackDepth logs at the critical level and the current stack depth plus the additional given one and returns an error containing the formated log message +func CriticalStackDepth(depth int, v ...interface{}) error { + return logWithError(CriticalLvl, func() { _ = CriticalStackDepth(depth, v...) }, func(s string) error { + return logger.criticalStackDepth(s, depth) + }, true, v...) +} + +/* +* JMX Logger Section + */ + +// JMXError Logs for JMX check +func JMXError(v ...interface{}) error { + return logWithError(ErrorLvl, func() { _ = JMXError(v...) }, jmxLogger.error, true, v...) +} + +// JMXInfo Logs +func JMXInfo(v ...interface{}) { + log(InfoLvl, func() { JMXInfo(v...) }, jmxLogger.info, v...) +} + +// SetupJMXLogger setup JMXfetch specific logger +func SetupJMXLogger(i LoggerInterface, level string) { + jmxLogger.Store(setupCommonLogger(i, level)) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_limit.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_limit.go new file mode 100644 index 00000000..0ce8e64d --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_limit.go @@ -0,0 +1,38 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import ( + "time" + + "golang.org/x/time/rate" +) + +// Limit is a utility that can be used to avoid logging noisily +type Limit struct { + s rate.Sometimes +} + +// NewLogLimit creates a Limit where shouldLog will return +// true the first N times it is called, and will return true once every +// interval thereafter. +func NewLogLimit(n int, interval time.Duration) *Limit { + return &Limit{ + s: rate.Sometimes{ + First: n, + Interval: interval, + }, + } +} + +// ShouldLog returns true if the caller should log +func (l *Limit) ShouldLog() bool { + shouldLog := false + l.s.Do(func() { + shouldLog = true + }) + return shouldLog +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_not_serverless.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_not_serverless.go new file mode 100644 index 00000000..8b7d7688 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_not_serverless.go @@ -0,0 +1,18 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !serverless + +package log + +// DebugServerless logs at the debug level only in a serverless context +// no-op in a non serverless context +func DebugServerless(_ ...interface{}) { +} + +// DebugfServerless logs with format at the debug level only in a serverless context +// no-op in a non serverless context +func DebugfServerless(_ string, _ ...interface{}) { +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_podman_util.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_podman_util.go new file mode 100644 index 00000000..43275728 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_podman_util.go @@ -0,0 +1,27 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import ( + "strings" +) + +// The paths below are set in podman code and cannot be modified by the user. +// Ref: https://github.com/containers/podman/blob/7c38ee756592d95e718967fcd3983b81abd95e76/test/e2e/run_transient_test.go#L19-L45 +const ( + sqlDBSuffix string = "/storage/db.sql" + boltDBSuffix string = "/storage/libpod/bolt_state.db" +) + +// ExtractPodmanRootDirFromDBPath extracts the podman base path for the containers directory based on the user-provided `podman_db_path`. +func ExtractPodmanRootDirFromDBPath(podmanDBPath string) string { + if strings.HasSuffix(podmanDBPath, sqlDBSuffix) { + return strings.TrimSuffix(podmanDBPath, sqlDBSuffix) + } else if strings.HasSuffix(podmanDBPath, boltDBSuffix) { + return strings.TrimSuffix(podmanDBPath, boltDBSuffix) + } + return "" +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_serverless.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_serverless.go new file mode 100644 index 00000000..9ad3aa42 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_serverless.go @@ -0,0 +1,18 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build serverless + +package log + +// DebugServerless logs at the debug level only in a serverless context +func DebugServerless(v ...interface{}) { + Debug(v...) +} + +// DebugfServerless logs with format at the debug level only in a serverless context +func DebugfServerless(format string, params ...interface{}) { + Debugf(format, params...) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_test_init.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_test_init.go new file mode 100644 index 00000000..b1b5e3bb --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_test_init.go @@ -0,0 +1,20 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package log + +import ( + "os" +) + +func init() { + level := os.Getenv("DD_LOG_LEVEL") + if level == "" { + level = "debug" + } + SetupLogger(Default(), level) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/logger.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/logger.go new file mode 100644 index 00000000..5de956c8 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/logger.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import ( + "io" + + "github.com/cihub/seelog" +) + +// LoggerInterface provides basic logging methods. +type LoggerInterface seelog.LoggerInterface + +// Default returns a default logger +func Default() LoggerInterface { + return seelog.Default +} + +// Disabled returns a disabled logger +func Disabled() LoggerInterface { + return seelog.Disabled +} + +// LoggerFromWriterWithMinLevelAndFormat creates a new logger from a writer, a minimum log level and a format. +func LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) { + return seelog.LoggerFromWriterWithMinLevelAndFormat(output, seelog.LogLevel(minLevel), format) +} + +// LoggerFromWriterWithMinLevel creates a new logger from a writer and a minimum log level. +func LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) { + return seelog.LoggerFromWriterWithMinLevel(output, seelog.LogLevel(minLevel)) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/wrapper.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/wrapper.go new file mode 100644 index 00000000..52ff4c08 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/wrapper.go @@ -0,0 +1,77 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +// Wrapper wraps all the logger function on a struct. This is meant to be used by the comp/core/log component to expose +// the logger functionnality to components. This should only be use by the log component. +type Wrapper struct { + stackDepth int +} + +// NewWrapper returns a new Wrapper. This should only be use by the log component. +func NewWrapper(stackDepth int) *Wrapper { + return &Wrapper{stackDepth: stackDepth} +} + +// Until the log migration to component is done, we use *StackDepth to pkglog. The log component add 1 layer to the call +// stack and *StackDepth add another. +// +// We check the current log level to avoid calling Sprintf when it's not needed (Sprintf from Tracef uses a lot a CPU) + +// Trace implements Component#Trace. +func (l *Wrapper) Trace(v ...interface{}) { TraceStackDepth(l.stackDepth, v...) } + +// Tracef implements Component#Tracef. +func (l *Wrapper) Tracef(format string, params ...interface{}) { + TracefStackDepth(l.stackDepth, format, params...) +} + +// Debug implements Component#Debug. +func (l *Wrapper) Debug(v ...interface{}) { DebugStackDepth(l.stackDepth, v...) } + +// Debugf implements Component#Debugf. +func (l *Wrapper) Debugf(format string, params ...interface{}) { + DebugfStackDepth(l.stackDepth, format, params...) +} + +// Info implements Component#Info. +func (l *Wrapper) Info(v ...interface{}) { InfoStackDepth(l.stackDepth, v...) } + +// Infof implements Component#Infof. +func (l *Wrapper) Infof(format string, params ...interface{}) { + InfofStackDepth(l.stackDepth, format, params...) +} + +// Warn implements Component#Warn. +func (l *Wrapper) Warn(v ...interface{}) error { return WarnStackDepth(l.stackDepth, v...) } + +// Warnf implements Component#Warnf. +func (l *Wrapper) Warnf(format string, params ...interface{}) error { + return WarnfStackDepth(l.stackDepth, format, params...) +} + +// Error implements Component#Error. +func (l *Wrapper) Error(v ...interface{}) error { return ErrorStackDepth(l.stackDepth, v...) } + +// Errorf implements Component#Errorf. +func (l *Wrapper) Errorf(format string, params ...interface{}) error { + return ErrorfStackDepth(l.stackDepth, format, params...) +} + +// Critical implements Component#Critical. +func (l *Wrapper) Critical(v ...interface{}) error { + return CriticalStackDepth(l.stackDepth, v...) +} + +// Criticalf implements Component#Criticalf. +func (l *Wrapper) Criticalf(format string, params ...interface{}) error { + return CriticalfStackDepth(l.stackDepth, format, params...) +} + +// Flush implements Component#Flush. +func (l *Wrapper) Flush() { + Flush() +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/LICENSE new file mode 100644 index 00000000..b370545b --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/LICENSE @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-present Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/default.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/default.go new file mode 100644 index 00000000..daed8bd1 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/default.go @@ -0,0 +1,421 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package scrubber + +import ( + "fmt" + "regexp" + "slices" + "strings" + "sync" +) + +var ( + // DefaultScrubber is the scrubber used by the package-level cleaning functions. + // + // It includes a set of agent-specific replacers. It can scrub DataDog App + // and API keys, passwords from URLs, and multi-line PEM-formatted TLS keys and + // certificates. It contains special handling for YAML-like content (with + // lines of the form "key: value") and can scrub passwords, tokens, and SNMP + // community strings in such content. + // + // See default.go for details of these replacers. + DefaultScrubber = &Scrubber{} + + defaultReplacement = "********" + + // dynamicReplacers are replacers added at runtime. New Replacer can be added through configuration or by the + // secrets package for example. + dynamicReplacers = []Replacer{} + dynamicReplacersMutex = sync.Mutex{} + + // defaultVersion is the first version of the agent scrubber. + // https://github.com/DataDog/datadog-agent/pull/9618 + defaultVersion = parseVersion("7.33.0") +) + +func init() { + AddDefaultReplacers(DefaultScrubber) +} + +// AddDefaultReplacers to a scrubber. This is called automatically for +// DefaultScrubber, but can be used to initialize other, custom scrubbers with +// the default replacers. +func AddDefaultReplacers(scrubber *Scrubber) { + hintedAPIKeyReplacer := Replacer{ + // If hinted, mask the value regardless if it doesn't match 32-char hexadecimal string + Regex: regexp.MustCompile(`(api_?key=)\b[a-zA-Z0-9]+([a-zA-Z0-9]{5})\b`), + Hints: []string{"api_key", "apikey"}, + Repl: []byte(`$1***************************$2`), + + LastUpdated: defaultVersion, + } + hintedAPPKeyReplacer := Replacer{ + // If hinted, mask the value regardless if it doesn't match 40-char hexadecimal string + Regex: regexp.MustCompile(`(ap(?:p|plication)_?key=)\b[a-zA-Z0-9]+([a-zA-Z0-9]{5})\b`), + Hints: []string{"app_key", "appkey", "application_key"}, + Repl: []byte(`$1***********************************$2`), + + LastUpdated: defaultVersion, + } + + // replacers are check one by one in order. We first try to scrub 64 bytes token, keeping the last 5 digit. If + // the token has a different size we scrub it entirely. + hintedBearerReplacer := Replacer{ + Regex: regexp.MustCompile(`\bBearer [a-fA-F0-9]{59}([a-fA-F0-9]{5})\b`), + Hints: []string{"Bearer"}, + Repl: []byte(`Bearer ***********************************************************$1`), + + // https://github.com/DataDog/datadog-agent/pull/12338 + LastUpdated: parseVersion("7.38.0"), + } + // For this one we match any characters + hintedBearerInvalidReplacer := Replacer{ + Regex: regexp.MustCompile(`\bBearer\s+[^*]+\b`), + Hints: []string{"Bearer"}, + Repl: []byte("Bearer " + defaultReplacement), + + // https://github.com/DataDog/datadog-agent/pull/23448 + LastUpdated: parseVersion("7.53.0"), + } + + apiKeyReplacerYAML := Replacer{ + Regex: regexp.MustCompile(`(\-|\:|,|\[|\{)(\s+)?\b[a-fA-F0-9]{27}([a-fA-F0-9]{5})\b`), + Repl: []byte(`$1$2"***************************$3"`), + + // https://github.com/DataDog/datadog-agent/pull/12605 + LastUpdated: parseVersion("7.39.0"), + } + apiKeyReplacer := Replacer{ + Regex: regexp.MustCompile(`\b[a-fA-F0-9]{27}([a-fA-F0-9]{5})\b`), + Repl: []byte(`***************************$1`), + + LastUpdated: defaultVersion, + } + appKeyReplacerYAML := Replacer{ + Regex: regexp.MustCompile(`(\-|\:|,|\[|\{)(\s+)?\b[a-fA-F0-9]{35}([a-fA-F0-9]{5})\b`), + Repl: []byte(`$1$2"***********************************$3"`), + + // https://github.com/DataDog/datadog-agent/pull/12605 + LastUpdated: parseVersion("7.39.0"), + } + appKeyReplacer := Replacer{ + Regex: regexp.MustCompile(`\b[a-fA-F0-9]{35}([a-fA-F0-9]{5})\b`), + Repl: []byte(`***********************************$1`), + + LastUpdated: defaultVersion, + } + rcAppKeyReplacer := Replacer{ + Regex: regexp.MustCompile(`\bDDRCM_[A-Z0-9]+([A-Z0-9]{5})\b`), + Repl: []byte(`***********************************$1`), + + // https://github.com/DataDog/datadog-agent/pull/14681 + LastUpdated: parseVersion("7.42.0"), + } + + // URI Generic Syntax + // https://tools.ietf.org/html/rfc3986 + uriPasswordReplacer := Replacer{ + Regex: regexp.MustCompile(`(?i)([a-z][a-z0-9+-.]+://|\b)([^:\s]+):([^\s|"]+)@`), + Repl: []byte(`$1$2:********@`), + + // https://github.com/DataDog/datadog-agent/pull/32503 + LastUpdated: parseVersion("7.62.0"), + } + + yamlPasswordReplacer := matchYAMLKeyPart( + `(pass(word)?|pwd)`, + []string{"pass", "pwd"}, + []byte(`$1 "********"`), + ) + yamlPasswordReplacer.LastUpdated = defaultVersion + passwordReplacer := Replacer{ + // this regex has three parts: + // * key: case-insensitive, optionally quoted (pass | password | pswd | pwd), not anchored to match on args like --mysql_password= etc. + // * separator: (= or :) with optional opening quote we don't want to match as part of the password + // * password string: alphanum + special chars except quotes and semicolon + Regex: regexp.MustCompile(`(?i)(\"?(?:pass(?:word)?|pswd|pwd)\"?)((?:=| = |: )\"?)([0-9A-Za-z#!$%&()*+,\-./:<=>?@[\\\]^_{|}~]+)`), + // replace the 3rd capture group (password string) with ******** + Repl: []byte(`$1$2********`), + + // https://github.com/DataDog/datadog-agent/pull/28144 + LastUpdated: parseVersion("7.57.0"), + } + tokenReplacer := matchYAMLKeyEnding( + `token`, + []string{"token"}, + []byte(`$1 "********"`), + ) + tokenReplacer.LastUpdated = defaultVersion + snmpReplacer := matchYAMLKey( + `(community_string|auth[Kk]ey|priv[Kk]ey|community|authentication_key|privacy_key|Authorization|authorization)`, + []string{"community_string", "authKey", "authkey", "privKey", "privkey", "community", "authentication_key", "privacy_key", "Authorization", "authorization"}, + []byte(`$1 "********"`), + ) + snmpReplacer.LastUpdated = parseVersion("7.64.0") // https://github.com/DataDog/datadog-agent/pull/33742 + snmpMultilineReplacer := matchYAMLKeyWithListValue( + "(community_strings)", + "community_strings", + []byte(`$1 "********"`), + ) + snmpMultilineReplacer.LastUpdated = parseVersion("7.34.0") // https://github.com/DataDog/datadog-agent/pull/10305 + certReplacer := Replacer{ + /* + Try to match as accurately as possible. RFC 7468's ABNF + Backreferences are not available in go, so we cannot verify + here that the BEGIN label is the same as the END label. + */ + Regex: regexp.MustCompile(`-----BEGIN (?:.*)-----[A-Za-z0-9=\+\/\s]*-----END (?:.*)-----`), + Hints: []string{"BEGIN"}, + Repl: []byte(`********`), + + LastUpdated: defaultVersion, + } + + // The following replacers works on YAML object only + + apiKeyYaml := matchYAMLOnly( + `api_key`, + func(data interface{}) interface{} { + if apiKey, ok := data.(string); ok { + apiKey := strings.TrimSpace(apiKey) + if apiKey == "" { + return "" + } + if len(apiKey) == 32 { + return HideKeyExceptLastFiveChars(apiKey) + } + } + return defaultReplacement + }, + ) + apiKeyYaml.LastUpdated = parseVersion("7.44.0") // https://github.com/DataDog/datadog-agent/pull/15707 + + appKeyYaml := matchYAMLOnly( + `ap(?:p|plication)_?key`, + func(data interface{}) interface{} { + if appKey, ok := data.(string); ok { + appKey := strings.TrimSpace(appKey) + if appKey == "" { + return "" + } + if len(appKey) == 40 { + return HideKeyExceptLastFiveChars(appKey) + } + } + return defaultReplacement + }, + ) + appKeyYaml.LastUpdated = parseVersion("7.44.0") // https://github.com/DataDog/datadog-agent/pull/15707 + + scrubber.AddReplacer(SingleLine, hintedAPIKeyReplacer) + scrubber.AddReplacer(SingleLine, hintedAPPKeyReplacer) + scrubber.AddReplacer(SingleLine, hintedBearerReplacer) + scrubber.AddReplacer(SingleLine, hintedBearerInvalidReplacer) + scrubber.AddReplacer(SingleLine, apiKeyReplacerYAML) + scrubber.AddReplacer(SingleLine, apiKeyReplacer) + scrubber.AddReplacer(SingleLine, appKeyReplacerYAML) + scrubber.AddReplacer(SingleLine, appKeyReplacer) + scrubber.AddReplacer(SingleLine, rcAppKeyReplacer) + scrubber.AddReplacer(SingleLine, uriPasswordReplacer) + scrubber.AddReplacer(SingleLine, yamlPasswordReplacer) + scrubber.AddReplacer(SingleLine, passwordReplacer) + scrubber.AddReplacer(SingleLine, tokenReplacer) + scrubber.AddReplacer(SingleLine, snmpReplacer) + + scrubber.AddReplacer(SingleLine, apiKeyYaml) + scrubber.AddReplacer(SingleLine, appKeyYaml) + + scrubber.AddReplacer(MultiLine, snmpMultilineReplacer) + scrubber.AddReplacer(MultiLine, certReplacer) + + dynamicReplacersMutex.Lock() + for _, r := range dynamicReplacers { + scrubber.AddReplacer(SingleLine, r) + } + dynamicReplacersMutex.Unlock() +} + +// Yaml helpers produce replacers that work on both a yaml object (aka map[interface{}]interface{}) and on a serialized +// YAML string. + +func matchYAMLKeyPart(part string, hints []string, repl []byte) Replacer { + return Replacer{ + Regex: regexp.MustCompile(fmt.Sprintf(`(\s*(\w|_)*%s(\w|_)*\s*:).+`, part)), + YAMLKeyRegex: regexp.MustCompile(part), + Hints: hints, + Repl: repl, + } +} + +func matchYAMLKey(key string, hints []string, repl []byte) Replacer { + return Replacer{ + Regex: regexp.MustCompile(fmt.Sprintf(`(\s*%s\s*:).+`, key)), + YAMLKeyRegex: regexp.MustCompile(fmt.Sprintf(`^%s$`, key)), + Hints: hints, + Repl: repl, + } +} + +func matchYAMLKeyEnding(ending string, hints []string, repl []byte) Replacer { + return Replacer{ + Regex: regexp.MustCompile(fmt.Sprintf(`(^\s*(\w|_)*%s\s*:).+`, ending)), + YAMLKeyRegex: regexp.MustCompile(fmt.Sprintf(`^.*%s$`, ending)), + Hints: hints, + Repl: repl, + } +} + +// This only works on a YAML object not on serialized YAML data +func matchYAMLOnly(key string, cb func(interface{}) interface{}) Replacer { + return Replacer{ + YAMLKeyRegex: regexp.MustCompile(key), + ProcessValue: cb, + } +} + +// matchYAMLKeyWithListValue matches YAML keys with array values. +// caveat: doesn't work if the array contain nested arrays. +// +// Example: +// +// key: [ +// [a, b, c], +// def] +func matchYAMLKeyWithListValue(key string, hints string, repl []byte) Replacer { + /* + Example 1: + network_devices: + snmp_traps: + community_strings: + - 'pass1' + - 'pass2' + + Example 2: + network_devices: + snmp_traps: + community_strings: ['pass1', 'pass2'] + + Example 3: + network_devices: + snmp_traps: + community_strings: [ + 'pass1', + 'pass2'] + */ + return Replacer{ + Regex: regexp.MustCompile(fmt.Sprintf(`(\s*%s\s*:)\s*(?:\n(?:\s+-\s+.*)*|\[(?:\n?.*?)*?\])`, key)), + /* ----------- --------------- ------------- + match key(s) | | + match multiple match anything + lines starting enclosed between `[` and `]` + with `-` + */ + YAMLKeyRegex: regexp.MustCompile(key), + Hints: []string{hints}, + Repl: repl, + } +} + +// ScrubFile scrubs credentials from the given file, using the +// default scrubber. +func ScrubFile(filePath string) ([]byte, error) { + return DefaultScrubber.ScrubFile(filePath) +} + +// ScrubBytes scrubs credentials from the given slice of bytes, +// using the default scrubber. +func ScrubBytes(file []byte) ([]byte, error) { + return DefaultScrubber.ScrubBytes(file) +} + +// ScrubYaml scrubs credentials from the given YAML by loading the data and scrubbing the object instead of the +// serialized string, using the default scrubber. +func ScrubYaml(data []byte) ([]byte, error) { + return DefaultScrubber.ScrubYaml(data) +} + +// ScrubYamlString scrubs credentials from the given YAML string by loading the data and scrubbing the object instead of +// the serialized string, using the default scrubber. +func ScrubYamlString(data string) (string, error) { + res, err := DefaultScrubber.ScrubYaml([]byte(data)) + if err != nil { + return "", err + } + return string(res), nil +} + +// ScrubJSON scrubs credentials from the given JSON by loading the data and scrubbing the object instead of the +// serialized string, using the default scrubber. +func ScrubJSON(data []byte) ([]byte, error) { + return DefaultScrubber.ScrubJSON(data) +} + +// ScrubJSONString scrubs credentials from the given JSON string by loading the data and scrubbing the object instead of +// the serialized string, using the default scrubber. +func ScrubJSONString(data string) (string, error) { + res, err := ScrubJSON([]byte(data)) + if err != nil { + return "", err + } + return string(res), nil +} + +// ScrubString scrubs credentials from the given string, using the default scrubber. +func ScrubString(data string) (string, error) { + res, err := DefaultScrubber.ScrubBytes([]byte(data)) + if err != nil { + return "", err + } + return string(res), nil +} + +// ScrubLine scrubs credentials from a single line of text, using the default +// scrubber. It can be safely applied to URLs or to strings containing URLs. +// It does not run multi-line replacers, and should not be used on multi-line +// inputs. +func ScrubLine(url string) string { + return DefaultScrubber.ScrubLine(url) +} + +// ScrubDataObj scrubs credentials from the data interface by recursively walking over all the nodes +func ScrubDataObj(data *interface{}) { + DefaultScrubber.ScrubDataObj(data) +} + +// HideKeyExceptLastFiveChars replaces all characters in the key with "*", except +// for the last 5 characters. If the key is an unrecognized length, replace +// all of it with the default string of "*"s instead. +func HideKeyExceptLastFiveChars(key string) string { + if len(key) != 32 && len(key) != 40 { + return defaultReplacement + } + return strings.Repeat("*", len(key)-5) + key[len(key)-5:] +} + +// AddStrippedKeys adds to the set of YAML keys that will be recognized and have their values stripped. This modifies +// the DefaultScrubber directly and be added to any created scrubbers. +func AddStrippedKeys(strippedKeys []string) { + // API and APP keys are already handled by default rules + strippedKeys = slices.Clone(strippedKeys) + strippedKeys = slices.DeleteFunc(strippedKeys, func(s string) bool { + return s == "api_key" || s == "app_key" + }) + + if len(strippedKeys) > 0 { + replacer := matchYAMLKey( + fmt.Sprintf("(%s)", strings.Join(strippedKeys, "|")), + strippedKeys, + []byte(`$1 "********"`), + ) + // We add the new replacer to the default scrubber and to the list of dynamicReplacers so any new + // scubber will inherit it. + DefaultScrubber.AddReplacer(SingleLine, replacer) + dynamicReplacersMutex.Lock() + dynamicReplacers = append(dynamicReplacers, replacer) + dynamicReplacersMutex.Unlock() + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/json_scrubber.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/json_scrubber.go new file mode 100644 index 00000000..dbf624b1 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/json_scrubber.go @@ -0,0 +1,33 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package scrubber + +import ( + "fmt" + "os" + + "encoding/json" +) + +// ScrubJSON scrubs credentials from the given json by loading the data and scrubbing the +// object instead of the serialized string. +func (c *Scrubber) ScrubJSON(input []byte) ([]byte, error) { + var data *interface{} + err := json.Unmarshal(input, &data) + + // if we can't load the json run the default scrubber on the input + if len(input) != 0 && err == nil { + c.ScrubDataObj(data) + + newInput, err := json.MarshalIndent(data, "", " ") + if err == nil { + return newInput, nil + } + // Since the scrubber is a dependency of the logger we can't use it here. + fmt.Fprintf(os.Stderr, "error scrubbing json, falling back on text scrubber: %s\n", err) + } + return c.ScrubBytes(input) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/scrubber.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/scrubber.go new file mode 100644 index 00000000..c37ede03 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/scrubber.go @@ -0,0 +1,223 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package scrubber implements support for cleaning sensitive information out of strings +// and files. +// +// # Compatibility +// +// This module's API is not yet stable, and may change incompatibly from version to version. +package scrubber + +import ( + "bufio" + "bytes" + "io" + "os" + "regexp" + + "github.com/DataDog/datadog-agent/pkg/version" +) + +// Replacer represents a replacement of sensitive information with a "clean" version. +type Replacer struct { + // Regex must match the sensitive information + Regex *regexp.Regexp + // YAMLKeyRegex matches the key of sensitive information in a dict/map. This is used when iterating over a + // map[string]interface{} to scrub data for all matching key before being serialized. + YAMLKeyRegex *regexp.Regexp + // ProcessValue is a callback to be executed when YAMLKeyRegex matches the key of a map/dict in a YAML object. The + // value is passed to the function and replaced by the returned interface. This is useful to produce custom + // scrubbing. Example: keeping the last 5 digit of an api key. + ProcessValue func(data interface{}) interface{} + // Hints, if given, are strings which must also be present in the text for the regexp to match. + // Especially in single-line replacers, this can be used to limit the contexts where an otherwise + // very broad Regex is actually replaced. + Hints []string + // Repl is the text to replace the substring matching Regex. It can use the regexp package's + // replacement characters ($1, etc.) (see regexp#Regexp.ReplaceAll). + Repl []byte + // ReplFunc, if set, is called with the matched bytes (see regexp#Regexp.ReplaceAllFunc). Only + // one of Repl and ReplFunc should be set. + ReplFunc func(b []byte) []byte + + // LastUpdated is the last version when the replacer was updated. + // This is used to track when a replacer was last updated to compare with the flare version on the rapid side to decide to apply the replacer or not. + LastUpdated *version.Version +} + +func parseVersion(versionString string) *version.Version { + v, err := version.New(versionString, "") + if err != nil { + panic(err) + } + return &v +} + +// ReplacerKind modifies how a Replacer is applied +type ReplacerKind int + +const ( + // SingleLine indicates to Cleaner#AddReplacer that the replacer applies to + // single lines. + SingleLine ReplacerKind = iota + // MultiLine indicates to Cleaner#AddReplacer that the replacer applies to + // entire multiline text values. + MultiLine +) + +var commentRegex = regexp.MustCompile(`^\s*#.*$`) +var blankRegex = regexp.MustCompile(`^\s*$`) + +// Scrubber implements support for cleaning sensitive information out of strings +// and files. Its intended use is to "clean" data before it is logged or +// transmitted to a remote system, so that the meaning of the data remains +// clear without disclosing any sensitive information. +// +// Scrubber works by applying a set of replacers, in order. It first applies +// all SingleLine replacers to each non-comment, non-blank line of the input. +// +// Comments and blank lines are omitted. Comments are considered to begin with `#`. +// +// It then applies all MultiLine replacers to the entire text of the input. +type Scrubber struct { + singleLineReplacers []Replacer + multiLineReplacers []Replacer + + // shouldApply is a function that can be used to conditionally apply a replacer. + // If the function returns false, the replacer will not be applied. + shouldApply func(repl Replacer) bool +} + +// New creates a new scrubber with no replacers installed. +func New() *Scrubber { + return &Scrubber{ + singleLineReplacers: make([]Replacer, 0), + multiLineReplacers: make([]Replacer, 0), + } +} + +// NewWithDefaults creates a new scrubber with the default replacers installed. +func NewWithDefaults() *Scrubber { + s := New() + AddDefaultReplacers(s) + return s +} + +// AddReplacer adds a replacer of the given kind to the scrubber. +func (c *Scrubber) AddReplacer(kind ReplacerKind, replacer Replacer) { + switch kind { + case SingleLine: + c.singleLineReplacers = append(c.singleLineReplacers, replacer) + case MultiLine: + c.multiLineReplacers = append(c.multiLineReplacers, replacer) + } +} + +// SetShouldApply sets a condition function to the scrubber. If the function returns false, the replacer will not be applied. +func (c *Scrubber) SetShouldApply(shouldApply func(repl Replacer) bool) { + c.shouldApply = shouldApply +} + +// ScrubFile scrubs credentials from file given by pathname +func (c *Scrubber) ScrubFile(filePath string) ([]byte, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer file.Close() + + var sizeHint int + stats, err := file.Stat() + if err == nil { + sizeHint = int(stats.Size()) + } + + return c.scrubReader(file, sizeHint) +} + +// ScrubBytes scrubs credentials from slice of bytes +func (c *Scrubber) ScrubBytes(data []byte) ([]byte, error) { + r := bytes.NewReader(data) + return c.scrubReader(r, r.Len()) +} + +// ScrubLine scrubs credentials from a single line of text. It can be safely +// applied to URLs or to strings containing URLs. It does not run multi-line +// replacers, and should not be used on multi-line inputs. +func (c *Scrubber) ScrubLine(message string) string { + return string(c.scrub([]byte(message), c.singleLineReplacers)) +} + +// scrubReader applies the cleaning algorithm to a Reader +func (c *Scrubber) scrubReader(file io.Reader, sizeHint int) ([]byte, error) { + var cleanedBuffer bytes.Buffer + if sizeHint > 0 { + cleanedBuffer.Grow(sizeHint) + } + + scanner := bufio.NewScanner(file) + if sizeHint+1 > bufio.MaxScanTokenSize { + buffer := make([]byte, 0, sizeHint+1) + scanner.Buffer(buffer, sizeHint+1) + } + + // First, we go through the file line by line, applying any + // single-line replacer that matches the line. + first := true + for scanner.Scan() { + b := scanner.Bytes() + if blankRegex.Match(b) { + cleanedBuffer.WriteRune('\n') + } else if !commentRegex.Match(b) { + b = c.scrub(b, c.singleLineReplacers) + if !first { + cleanedBuffer.WriteRune('\n') + } + + cleanedBuffer.Write(b) + first = false + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // Then we apply multiline replacers on the cleaned file + cleanedFile := c.scrub(cleanedBuffer.Bytes(), c.multiLineReplacers) + + return cleanedFile, nil +} + +// scrub applies the given replacers to the given data. +func (c *Scrubber) scrub(data []byte, replacers []Replacer) []byte { + for _, repl := range replacers { + if repl.Regex == nil { + // ignoring YAML only replacers + continue + } + + if c.shouldApply != nil && !c.shouldApply(repl) { + continue + } + + containsHint := false + for _, hint := range repl.Hints { + if bytes.Contains(data, []byte(hint)) { + containsHint = true + break + } + } + if len(repl.Hints) == 0 || containsHint { + if repl.ReplFunc != nil { + data = repl.Regex.ReplaceAllFunc(data, repl.ReplFunc) + } else { + data = repl.Regex.ReplaceAll(data, repl.Repl) + } + } + } + return data +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/yaml_scrubber.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/yaml_scrubber.go new file mode 100644 index 00000000..90f144c6 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/yaml_scrubber.go @@ -0,0 +1,126 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package scrubber + +import ( + "bytes" + "fmt" + "os" + + "gopkg.in/yaml.v3" +) + +type scrubCallback = func(string, interface{}) (bool, interface{}) + +func walkSlice(data []interface{}, callback scrubCallback) { + for _, k := range data { + switch v := k.(type) { + case map[interface{}]interface{}: + walkHash(v, callback) + case []interface{}: + walkSlice(v, callback) + case map[string]interface{}: + walkStringMap(v, callback) + } + } +} + +func walkHash(data map[interface{}]interface{}, callback scrubCallback) { + for k, v := range data { + if keyString, ok := k.(string); ok { + if match, newValue := callback(keyString, v); match { + data[keyString] = newValue + continue + } + } + + switch v := data[k].(type) { + case map[interface{}]interface{}: + walkHash(v, callback) + case []interface{}: + walkSlice(v, callback) + } + } +} + +func walkStringMap(data map[string]interface{}, callback scrubCallback) { + for k, v := range data { + if match, newValue := callback(k, v); match { + data[k] = newValue + continue + } + switch v := data[k].(type) { + case map[string]interface{}: + walkStringMap(v, callback) + case []interface{}: + walkSlice(v, callback) + } + + } +} + +// walk will go through loaded data and call callback on every strings allowing +// the callback to overwrite the string value +func walk(data *interface{}, callback scrubCallback) { + if data == nil { + return + } + + switch v := (*data).(type) { + case map[interface{}]interface{}: + walkHash(v, callback) + case []interface{}: + walkSlice(v, callback) + case map[string]interface{}: + walkStringMap(v, callback) + } +} + +// ScrubDataObj scrubs credentials from the data interface by recursively walking over all the nodes +func (c *Scrubber) ScrubDataObj(data *interface{}) { + walk(data, func(key string, value interface{}) (bool, interface{}) { + for _, replacer := range c.singleLineReplacers { + if replacer.YAMLKeyRegex == nil { + continue + } + + if c.shouldApply != nil && !c.shouldApply(replacer) { + continue + } + + if replacer.YAMLKeyRegex.Match([]byte(key)) { + if replacer.ProcessValue != nil { + return true, replacer.ProcessValue(value) + } + return true, defaultReplacement + } + } + return false, "" + }) +} + +// ScrubYaml scrubs credentials from the given YAML by loading the data and scrubbing the object instead of the +// serialized string. +func (c *Scrubber) ScrubYaml(input []byte) ([]byte, error) { + var data *interface{} + err := yaml.Unmarshal(input, &data) + + // if we can't load the yaml run the default scrubber on the input + if len(input) != 0 && err == nil { + c.ScrubDataObj(data) + + var buffer bytes.Buffer + encoder := yaml.NewEncoder(&buffer) + encoder.SetIndent(2) + if err := encoder.Encode(&data); err != nil { + fmt.Fprintf(os.Stderr, "error scrubbing YAML, falling back on text scrubber: %s\n", err) + } else { + input = buffer.Bytes() + } + encoder.Close() + } + return c.ScrubBytes(input) +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/version/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/version/LICENSE new file mode 100644 index 00000000..b370545b --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/version/LICENSE @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-present Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/version/base.go b/vendor/github.com/DataDog/datadog-agent/pkg/version/base.go new file mode 100644 index 00000000..cca26667 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/version/base.go @@ -0,0 +1,31 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package version defines the version of the agent +package version + +// AgentVersion contains the version of the Agent. +// It is populated at build time using build flags, see get_version_ldflags in tasks/utils.py +var AgentVersion string + +// AgentPackageVersion contains the version of the datadog-agent package when installed by the updater. +// It has more info than AgentVersion and +// it is populated at build time using build flags, see get_version_ldflags in tasks/utils.py +var AgentPackageVersion string + +// Commit is populated with the short commit hash from which the Agent was built +var Commit string + +// AgentPayloadVersion is the versions of the agent-payload repository +// used to serialize to protobuf +var AgentPayloadVersion string + +var agentVersionDefault = "6.0.0" + +func init() { + if AgentVersion == "" { + AgentVersion = agentVersionDefault + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/version/version.go b/vendor/github.com/DataDog/datadog-agent/pkg/version/version.go new file mode 100644 index 00000000..58a233fd --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/version/version.go @@ -0,0 +1,95 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package version + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// Version holds SemVer infos for the agent and friends +type Version struct { + Major int64 + Minor int64 + Patch int64 + Pre string + Meta string + Commit string +} + +var versionRx = regexp.MustCompile(`(\d+\.\d+\.\d+)(\-[^\+]+)*(\+.+)*`) + +// Agent returns the Datadog Agent version. +func Agent() (Version, error) { + return New(AgentVersion, Commit) +} + +// New parses a version string like `0.0.0` and a commit identifier and returns a Version instance +func New(version, commit string) (Version, error) { + toks := versionRx.FindStringSubmatch(version) + if len(toks) == 0 || toks[0] != version { + // if regex didn't match or partially matched, raise an error + return Version{}, fmt.Errorf("Version string has wrong format") + } + + // split version info (group 1 in regexp) + parts := strings.Split(toks[1], ".") + major, _ := strconv.ParseInt(parts[0], 10, 64) + minor, _ := strconv.ParseInt(parts[1], 10, 64) + patch, _ := strconv.ParseInt(parts[2], 10, 64) + + // save Pre infos after removing leading `-` + pre := strings.Replace(toks[2], "-", "", 1) + + // save Meta infos after removing leading `+` + meta := strings.Replace(toks[3], "+", "", 1) + + av := Version{ + Major: major, + Minor: minor, + Patch: patch, + Pre: pre, + Meta: meta, + Commit: commit, + } + + return av, nil +} + +func (v *Version) String() string { + ver := v.GetNumber() + if v.Pre != "" { + ver = fmt.Sprintf("%s-%s", ver, v.Pre) + } + if v.Meta != "" { + ver = fmt.Sprintf("%s+%s", ver, v.Meta) + } + if v.Commit != "" { + if v.Meta != "" { + ver = fmt.Sprintf("%s.commit.%s", ver, v.Commit) + } else { + ver = fmt.Sprintf("%s+commit.%s", ver, v.Commit) + } + } + + return ver +} + +// GetNumber returns a string containing version numbers only, e.g. `0.0.0` +func (v *Version) GetNumber() string { + return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) +} + +// GetNumberAndPre returns a string containing version number and the pre only, e.g. `0.0.0-beta.1` +func (v *Version) GetNumberAndPre() string { + version := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) + if v.Pre != "" { + version = fmt.Sprintf("%s-%s", version, v.Pre) + } + return version +} diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/container_linux.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/container_linux.go index ad74f7ab..12513234 100644 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/container_linux.go +++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/container_linux.go @@ -35,9 +35,9 @@ const ( // ContainerRegexpStr defines the regexp used to match container IDs // ([0-9a-f]{64}) is standard container id used pretty much everywhere - // ([0-9a-f]{32}-[0-9]{10}) is container id used by AWS ECS + // ([0-9a-f]{32}-\d+) is container id used by AWS ECS // ([0-9a-f]{8}(-[0-9a-f]{4}){4}$) is container id used by Garden - containerRegexpStr = "([0-9a-f]{64})|([0-9a-f]{32}-[0-9]{10})|([0-9a-f]{8}(-[0-9a-f]{4}){4}$)" + containerRegexpStr = "([0-9a-f]{64})|([0-9a-f]{32}-\\d+)|([0-9a-f]{8}(-[0-9a-f]{4}){4}$)" // cIDRegexpStr defines the regexp used to match container IDs in /proc/self/mountinfo cIDRegexpStr = `.*/([^\s/]+)/(` + containerRegexpStr + `)/[\S]*hostname` @@ -184,23 +184,36 @@ func inodeForPath(path string) string { // internalInitContainerID initializes the container ID. // It can either be provided by the user or read from cgroups. -func internalInitContainerID(userProvidedID string, cgroupFallback bool) { +func internalInitContainerID(userProvidedID string, cgroupFallback, isHostCgroupNs bool) { initOnce.Do(func() { - if userProvidedID != "" { - containerID = userProvidedID + readCIDOrInode(userProvidedID, cgroupPath, selfMountInfoPath, defaultCgroupMountPath, cgroupFallback, isHostCgroupNs) + }) +} + +// readCIDOrInode reads the container ID from the user provided ID, cgroups or mountinfo. +func readCIDOrInode(userProvidedID, cgroupPath, selfMountInfoPath, defaultCgroupMountPath string, cgroupFallback, isHostCgroupNs bool) { + if userProvidedID != "" { + containerID = userProvidedID + return + } + + if cgroupFallback { + containerID = readContainerID(cgroupPath) + if containerID != "" { + return + } + + containerID = readMountinfo(selfMountInfoPath) + if containerID != "" { return } - if cgroupFallback { - isHostCgroupNs := isHostCgroupNamespace() - if isHostCgroupNs { - containerID = readContainerID(cgroupPath) - return - } - containerID = readMountinfo(selfMountInfoPath) - if containerID != "" { - containerID = getCgroupInode(defaultCgroupMountPath, cgroupPath) - } + // If we're in the host cgroup namespace, the cid should be retrievable in /proc/self/cgroup + // In private cgroup namespace, we can retrieve the cgroup controller inode. + if containerID == "" && isHostCgroupNs { + return } - }) + + containerID = getCgroupInode(defaultCgroupMountPath, cgroupPath) + } } diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/container_stub.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/container_stub.go index 5a143d19..29ab7f2c 100644 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/container_stub.go +++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/container_stub.go @@ -3,7 +3,11 @@ package statsd -var initContainerID = func(userProvidedID string, cgroupFallback bool) { +func isHostCgroupNamespace() bool { + return false +} + +var initContainerID = func(userProvidedID string, _, _ bool) { initOnce.Do(func() { if userProvidedID != "" { containerID = userProvidedID diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go index 29e09800..e007505a 100644 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go +++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/options.go @@ -373,9 +373,10 @@ func WithTelemetryAddr(addr string) Option { // WithoutOriginDetection disables the client origin detection. // When enabled, the client tries to discover its container ID and sends it to the Agent // to enrich the metrics with container tags. +// If the container id is not found and the client is running in a private cgroup namespace, the client +// sends the base cgroup controller inode. // Origin detection can also be disabled by configuring the environment variabe DD_ORIGIN_DETECTION_ENABLED=false // The client tries to read the container ID by parsing the file /proc/self/cgroup, this is not supported on Windows. -// The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID. // // More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp func WithoutOriginDetection() Option { @@ -389,9 +390,9 @@ func WithoutOriginDetection() Option { // This feature requires Datadog Agent version >=6.35.0 && <7.0.0 or Agent versions >=7.35.0. // When enabled, the client tries to discover its container ID and sends it to the Agent // to enrich the metrics with container tags. -// Origin detection can be disabled by configuring the environment variabe DD_ORIGIN_DETECTION_ENABLED=false -// The client tries to read the container ID by parsing the file /proc/self/cgroup, this is not supported on Windows. -// The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID. +// If the container id is not found and the client is running in a private cgroup namespace, the client +// sends the base cgroup controller inode. +// Origin detection can be disabled by configuring the environment variable DD_ORIGIN_DETECTION_ENABLED=false // // More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp func WithOriginDetection() Option { diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go index 33792a53..c0137b52 100644 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go +++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/statsd.go @@ -369,7 +369,6 @@ func parseAgentURL(agentURL string) string { } func createWriter(addr string, writeTimeout time.Duration, connectTimeout time.Duration) (Transport, string, error) { - addr = resolveAddr(addr) if addr == "" { return nil, "", errors.New("No address passed and autodetection from environment failed") } @@ -401,6 +400,7 @@ func New(addr string, options ...Option) (*Client, error) { return nil, err } + addr = resolveAddr(addr) w, writerType, err := createWriter(addr, o.writeTimeout, o.connectTimeout) if err != nil { return nil, err @@ -454,21 +454,14 @@ func newWithWriter(w Transport, o *Options, writerName string) (*Client, error) errorHandler: o.errorHandler, } - hasEntityID := false // Inject values of DD_* environment variables as global tags. for _, mapping := range ddEnvTagsMapping { if value := os.Getenv(mapping.envName); value != "" { - if mapping.envName == ddEntityID { - hasEntityID = true - } c.tags = append(c.tags, fmt.Sprintf("%s:%s", mapping.tagName, value)) } } - if !hasEntityID { - initContainerID(o.containerID, isOriginDetectionEnabled(o, hasEntityID)) - } - + initContainerID(o.containerID, isOriginDetectionEnabled(o), isHostCgroupNamespace()) isUDS := writerName == writerNameUDS if o.maxBytesPerPayload == 0 { @@ -888,16 +881,11 @@ func (c *Client) Close() error { // isOriginDetectionEnabled returns whether the clients should fill the container field. // -// If DD_ENTITY_ID is set, we don't send the container ID -// If a user-defined container ID is provided, we don't ignore origin detection -// as dd.internal.entity_id is prioritized over the container field for backward compatibility. -// If DD_ENTITY_ID is not set, we try to fill the container field automatically unless -// DD_ORIGIN_DETECTION_ENABLED is explicitly set to false. -func isOriginDetectionEnabled(o *Options, hasEntityID bool) bool { - if !o.originDetection || hasEntityID || o.containerID != "" { - // originDetection is explicitly disabled - // or DD_ENTITY_ID was found - // or a user-defined container ID was provided +// Disable origin detection only in one of the following cases: +// - DD_ORIGIN_DETECTION_ENABLED is explicitly set to false +// - o.originDetection is explicitly set to false, which is true by default +func isOriginDetectionEnabled(o *Options) bool { + if !o.originDetection || o.containerID != "" { return false } diff --git a/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go b/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go index 61025c37..feda764b 100644 --- a/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go +++ b/vendor/github.com/DataDog/datadog-go/v5/statsd/telemetry.go @@ -141,6 +141,7 @@ func newTelemetryClient(c *Client, aggregationEnabled bool) *telemetryClient { func newTelemetryClientWithCustomAddr(c *Client, telemetryAddr string, aggregationEnabled bool, pool *bufferPool, writeTimeout time.Duration, connectTimeout time.Duration, ) (*telemetryClient, error) { + telemetryAddr = resolveAddr(telemetryAddr) telemetryWriter, _, err := createWriter(telemetryAddr, writeTimeout, connectTimeout) if err != nil { return nil, fmt.Errorf("Could not resolve telemetry address: %v", err) diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/LICENSE b/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/LICENSE new file mode 100644 index 00000000..f760d366 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/LICENSE @@ -0,0 +1,234 @@ +## License + +This work is dual-licensed under Apache 2.0 or BSD3. +You may select, at your option, one of the above-listed licenses. + +`SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause` + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--- + +Copyright (c) 2016-Present, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Datadog nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/mux.go b/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/mux.go new file mode 100644 index 00000000..b440bc3b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/mux.go @@ -0,0 +1,147 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package mux provides tracing functions for tracing the gorilla/mux package (https://github.com/gorilla/mux). +package mux // import "github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2" + +import ( + "net/http" + + httptrace "github.com/DataDog/dd-trace-go/contrib/net/http/v2" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/instrumentation" + instrhttptrace "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace" + "github.com/DataDog/dd-trace-go/v2/instrumentation/options" + + "github.com/gorilla/mux" +) + +var instr *instrumentation.Instrumentation + +func init() { + instr = instrumentation.Load(instrumentation.PackageGorillaMux) +} + +// Router registers routes to be matched and dispatches a handler. +type Router struct { + *mux.Router + config *routerConfig +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will perform a redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for +// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed +// request will be made as a GET by most clients. Use middleware or client settings +// to modify this behaviour as needed. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.Router.StrictSlash(value) + return r +} + +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.Router.SkipClean(value) + return r +} + +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.Router.UseEncodedPath() + return r +} + +// NewRouter returns a new router instance traced with the global tracer. +func NewRouter(opts ...RouterOption) *Router { + return WrapRouter(mux.NewRouter(), opts...) +} + +// ServeHTTP dispatches the request to the handler +// whose pattern most closely matches the request URL. +// We only need to rewrite this function to be able to trace +// all the incoming requests to the underlying multiplexer +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if r.config.ignoreRequest(req) { + r.Router.ServeHTTP(w, req) + return + } + var ( + match mux.RouteMatch + route string + ) + spanopts := options.Expand(r.config.spanOpts, 0, 2) + // get the resource associated to this request + if r.Match(req, &match) && match.Route != nil { + if h, err := match.Route.GetHostTemplate(); err == nil { + spanopts = append(spanopts, tracer.Tag("mux.host", h)) + } + route, _ = match.Route.GetPathTemplate() + } + spanopts = append(spanopts, instrhttptrace.HeaderTagsFromRequest(req, r.config.headerTags)) + resource := r.config.resourceNamer(r, req) + httptrace.TraceAndServe(r.Router, w, req, &httptrace.ServeConfig{ + Framework: "github.com/gorilla/mux", + Service: r.config.serviceName, + Resource: resource, + FinishOpts: r.config.finishOpts, + SpanOpts: spanopts, + QueryParams: r.config.queryParams, + RouteParams: match.Vars, + Route: route, + IsStatusError: r.config.isStatusError, + }) +} + +// WrapRouter returns the given router wrapped with the tracing of the HTTP +// requests and responses served by the router. +func WrapRouter(router *mux.Router, opts ...RouterOption) *Router { + cfg := newConfig(opts) + cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.Component, instrumentation.PackageGorillaMux)) + cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.SpanKind, ext.SpanKindServer)) + instr.Logger().Debug("contrib/gorilla/mux: Configuring Router: %#v", cfg) + return &Router{ + Router: router, + config: cfg, + } +} + +// defaultResourceNamer attempts to quantize the resource for an HTTP request by +// retrieving the path template associated with the route from the request. +func defaultResourceNamer(router *Router, req *http.Request) string { + var match mux.RouteMatch + // get the resource associated with the given request + if router.Match(req, &match) && match.Route != nil { + if r, err := match.Route.GetPathTemplate(); err == nil { + return req.Method + " " + r + } + } + return req.Method + " unknown" +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/option.go b/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/option.go new file mode 100644 index 00000000..09c42357 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/option.go @@ -0,0 +1,149 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package mux + +import ( + "math" + "net/http" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/instrumentation" +) + +type routerConfig struct { + serviceName string + spanOpts []tracer.StartSpanOption // additional span options to be applied + finishOpts []tracer.FinishOption // span finish options to be applied + analyticsRate float64 + resourceNamer func(*Router, *http.Request) string + ignoreRequest func(*http.Request) bool + queryParams bool + headerTags instrumentation.HeaderTags + isStatusError func(statusCode int) bool +} + +// RouterOption describes options for the Gorilla mux integration. +type RouterOption interface { + apply(config *routerConfig) +} + +// RouterOptionFn represents options applicable to NewRouter and WrapRouter. +type RouterOptionFn func(*routerConfig) + +func (fn RouterOptionFn) apply(cfg *routerConfig) { + fn(cfg) +} + +func newConfig(opts []RouterOption) *routerConfig { + cfg := new(routerConfig) + defaults(cfg) + for _, fn := range opts { + fn.apply(cfg) + } + if !math.IsNaN(cfg.analyticsRate) { + cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.EventSampleRate, cfg.analyticsRate)) + } + return cfg +} + +func defaults(cfg *routerConfig) { + cfg.analyticsRate = instr.AnalyticsRate(true) + cfg.headerTags = instr.HTTPHeadersAsTags() + cfg.serviceName = instr.ServiceName(instrumentation.ComponentServer, nil) + cfg.resourceNamer = defaultResourceNamer + cfg.ignoreRequest = func(_ *http.Request) bool { return false } +} + +// WithIgnoreRequest holds the function to use for determining if the +// incoming HTTP request tracing should be skipped. +func WithIgnoreRequest(f func(*http.Request) bool) RouterOptionFn { + return func(cfg *routerConfig) { + cfg.ignoreRequest = f + } +} + +// WithService sets the given service name for the router. +func WithService(name string) RouterOptionFn { + return func(cfg *routerConfig) { + cfg.serviceName = name + } +} + +// WithSpanOptions applies the given set of options to the spans started +// by the router. +func WithSpanOptions(opts ...tracer.StartSpanOption) RouterOptionFn { + return func(cfg *routerConfig) { + cfg.spanOpts = opts + } +} + +// NoDebugStack prevents stack traces from being attached to spans finishing +// with an error. This is useful in situations where errors are frequent and +// performance is critical. +func NoDebugStack() RouterOptionFn { + return func(cfg *routerConfig) { + cfg.finishOpts = append(cfg.finishOpts, tracer.NoDebugStack()) + } +} + +// WithAnalytics enables Trace Analytics for all started spans. +func WithAnalytics(on bool) RouterOptionFn { + return func(cfg *routerConfig) { + if on { + cfg.analyticsRate = 1.0 + } else { + cfg.analyticsRate = math.NaN() + } + } +} + +// WithAnalyticsRate sets the sampling rate for Trace Analytics events +// correlated to started spans. +func WithAnalyticsRate(rate float64) RouterOptionFn { + return func(cfg *routerConfig) { + if rate >= 0.0 && rate <= 1.0 { + cfg.analyticsRate = rate + } else { + cfg.analyticsRate = math.NaN() + } + } +} + +// WithResourceNamer specifies a quantizing function which will be used to +// obtain the resource name for a given request. +func WithResourceNamer(namer func(router *Router, req *http.Request) string) RouterOptionFn { + return func(cfg *routerConfig) { + cfg.resourceNamer = namer + } +} + +// WithHeaderTags enables the integration to attach HTTP request headers as span tags. +// Warning: +// Using this feature can risk exposing sensitive data such as authorization tokens to Datadog. +// Special headers can not be sub-selected. E.g., an entire Cookie header would be transmitted, without the ability to choose specific Cookies. +func WithHeaderTags(headers []string) RouterOptionFn { + return func(cfg *routerConfig) { + cfg.headerTags = instrumentation.NewHeaderTags(headers) + } +} + +// WithQueryParams specifies that the integration should attach request query parameters as APM tags. +// Warning: using this feature can risk exposing sensitive data such as authorization tokens +// to Datadog. +func WithQueryParams() RouterOptionFn { + return func(cfg *routerConfig) { + cfg.queryParams = true + } +} + +// WithStatusCheck specifies a function fn which reports whether the passed +// statusCode should be considered an error. +func WithStatusCheck(fn func(statusCode int) bool) RouterOptionFn { + return func(cfg *routerConfig) { + cfg.isStatusError = fn + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/orchestrion.yml b/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/orchestrion.yml new file mode 100644 index 00000000..723019c2 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2/orchestrion.yml @@ -0,0 +1,143 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2023-present Datadog, Inc. +--- +# yaml-language-server: $schema=https://datadoghq.dev/orchestrion/schema.json +meta: + name: github.com/DataDog/dd-trace-go/contrib/gorilla/mux/v2 + description: Package gorilla/mux implements a request router and dispatcher for matching incoming requests to their respective handler. + +aspects: + # TODO(romain.marcadier): This is a temporary solution to instrument + # mux.Router without doing any refactor work in dd-trace-go at the moment. It + # contains a lot of code copied from the contrib that should be refactored so + # it can be re-used instead. + - id: Router.__dd_config + join-point: + struct-definition: github.com/gorilla/mux.Router + advice: + - inject-declarations: + imports: + http: net/http + instrumentation: github.com/DataDog/dd-trace-go/v2/instrumentation + tracer: github.com/DataDog/dd-trace-go/v2/ddtrace/tracer + lang: go1.18 # some parts of our codebase use generics, so ensure we can build if using old versions of gorilla/mux (e.g. if using a replace). + template: |- + var __dd_instr *instrumentation.Instrumentation + + func init() { + __dd_instr = instrumentation.Load(instrumentation.PackageGorillaMux) + } + + type ddRouterConfig struct { + ignoreRequest func(*http.Request) bool + headerTags instrumentation.HeaderTags + resourceNamer func(*Router, *http.Request) string + serviceName string + spanOpts []tracer.StartSpanOption + } + + func ddDefaultResourceNamer(router *Router, req *http.Request) string { + var ( + match RouteMatch + route = "unknown" + ) + if router.Match(req, &match) && match.Route != nil { + if r, err := match.Route.GetPathTemplate(); err == nil { + route = r + } + } + return fmt.Sprintf("%s %s", req.Method, route) + } + - add-struct-field: + name: __dd_config + type: ddRouterConfig + + - id: NewRouter + join-point: + all-of: + - import-path: github.com/gorilla/mux + - function-body: + function: + - name: NewRouter + advice: + - prepend-statements: + imports: + ext: github.com/DataDog/dd-trace-go/v2/ddtrace/ext + http: net/http + math: math + tracer: github.com/DataDog/dd-trace-go/v2/ddtrace/tracer + instrumentation: github.com/DataDog/dd-trace-go/v2/instrumentation + lang: go1.18 # some parts of our codebase use generics, so ensure we can build if using old versions of gorilla/mux (e.g. if using a replace). + template: |- + {{- $res := .Function.Result 0 -}} + defer func() { + analyticsRate := __dd_instr.AnalyticsRate(true) + {{ $res }}.__dd_config.headerTags = __dd_instr.HTTPHeadersAsTags() + {{ $res }}.__dd_config.serviceName = __dd_instr.ServiceName(instrumentation.ComponentServer, nil) + {{ $res }}.__dd_config.resourceNamer = ddDefaultResourceNamer + {{ $res }}.__dd_config.ignoreRequest = func(_ *http.Request) bool { return false } + + {{ $res }}.__dd_config.spanOpts = []tracer.StartSpanOption{ + tracer.Tag(ext.Component, instrumentation.PackageGorillaMux), + tracer.Tag(ext.SpanKind, ext.SpanKindServer), + } + if !math.IsNaN(analyticsRate) { + {{ $res }}.__dd_config.spanOpts = append( + {{ $res }}.__dd_config.spanOpts, + tracer.Tag(ext.EventSampleRate, analyticsRate), + ) + } + }() + + - id: Router.ServeHTTP + join-point: + function-body: + function: + - receiver: '*github.com/gorilla/mux.Router' + - name: ServeHTTP + advice: + - prepend-statements: + imports: + http: net/http + httptrace: github.com/DataDog/dd-trace-go/contrib/net/http/v2 + instrhttptrace: github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace + options: github.com/DataDog/dd-trace-go/v2/instrumentation/options + tracer: github.com/DataDog/dd-trace-go/v2/ddtrace/tracer + lang: go1.18 # some parts of our codebase use generics, so ensure we can build if using old versions of gorilla/mux (e.g. if using a replace). + template: |- + {{- $r := .Function.Receiver -}} + {{- $w := .Function.Argument 0 -}} + {{- $req := .Function.Argument 1 -}} + if !{{ $r }}.__dd_config.ignoreRequest({{ $req }}) { + var ( + match RouteMatch + route string + spanOpts = options.Copy({{ $r }}.__dd_config.spanOpts) + ) + if {{ $r }}.Match({{ $req }}, &match) && match.Route != nil { + if h, err := match.Route.GetHostTemplate(); err == nil { + spanOpts = append(spanOpts, tracer.Tag("mux.host", h)) + } + route, _ = match.Route.GetPathTemplate() + } + spanOpts = append(spanOpts, instrhttptrace.HeaderTagsFromRequest({{ $req }}, {{ $r }}.__dd_config.headerTags)) + resource := {{ $r }}.__dd_config.resourceNamer({{ $r }}, {{ $req }}) + + // This is a temporary workaround/hack to prevent endless recursion via httptrace.TraceAndServe, which + // basically implies passing a shallow copy of this router that ignores all requests down to + // httptrace.TraceAndServe. + var rCopy Router + rCopy = *{{ $r }} + rCopy.__dd_config.ignoreRequest = func(*http.Request) bool { return true } + + httptrace.TraceAndServe(&rCopy, {{ $w }}, {{ $req }}, &httptrace.ServeConfig{ + Service: {{ $r }}.__dd_config.serviceName, + Resource: resource, + SpanOpts: spanOpts, + RouteParams: match.Vars, + Route: route, + }) + return + } diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/LICENSE b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/LICENSE new file mode 100644 index 00000000..f760d366 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/LICENSE @@ -0,0 +1,234 @@ +## License + +This work is dual-licensed under Apache 2.0 or BSD3. +You may select, at your option, one of the above-listed licenses. + +`SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause` + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--- + +Copyright (c) 2016-Present, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Datadog nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/client/client.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/client/client.go new file mode 100644 index 00000000..de34911c --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/client/client.go @@ -0,0 +1,51 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package client provides [context.Context]-aware alternatives to the +// short-hand request functions [http.Get], [http.Head], [http.Post], and +// [http.PostForm]. Using these functions allows for better control over the +// trace context propagation. +package client + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// Get is a [context.Context] aware version of [http.Get]. +func Get(ctx context.Context, url string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + return http.DefaultClient.Do(req) +} + +// Head is a [context.Context] aware version of [http.Head]. +func Head(ctx context.Context, url string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) + if err != nil { + return nil, err + } + return http.DefaultClient.Do(req) +} + +// Post is a [context.Context] aware version of [http.Post]. +func Post(ctx context.Context, url string, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return http.DefaultClient.Do(req) +} + +// PostForm is a [context.Context] aware version of [http.PostForm]. +func PostForm(ctx context.Context, url string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/http.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/http.go new file mode 100644 index 00000000..17739767 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/http.go @@ -0,0 +1,27 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package http provides functions to trace the net/http package (https://golang.org/pkg/net/http). +package http // import "github.com/DataDog/dd-trace-go/contrib/net/http/v2" + +import ( + "net/http" + + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap" +) + +type ServeMux = wrap.ServeMux + +// NewServeMux allocates and returns an http.ServeMux augmented with the +// global tracer. +func NewServeMux(opts ...Option) *ServeMux { + return wrap.NewServeMux(opts...) +} + +// WrapHandler wraps an http.Handler with tracing using the given service and resource. +// If the WithResourceNamer option is provided as part of opts, it will take precedence over the resource argument. +func WrapHandler(h http.Handler, service, resource string, opts ...Option) http.Handler { + return wrap.Handler(h, service, resource, opts...) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/config.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/config.go new file mode 100644 index 00000000..176f1b55 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/config.go @@ -0,0 +1,116 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package config + +import ( + "math" + "net/http" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/instrumentation" + "github.com/DataDog/dd-trace-go/v2/instrumentation/options" +) + +const ComponentName = instrumentation.PackageNetHTTP + +// Option describes options for http.ServeMux. +type Option interface { + apply(*Config) +} + +// OptionFn represents options applicable to NewServeMux and WrapHandler. +type OptionFn func(*CommonConfig) + +func (o OptionFn) apply(cfg *Config) { + o(&cfg.CommonConfig) +} + +func (o OptionFn) applyRoundTripper(cfg *RoundTripperConfig) { + o(&cfg.CommonConfig) +} + +type HandlerOptionFn func(*Config) + +func (o HandlerOptionFn) apply(cfg *Config) { + o(cfg) +} + +type CommonConfig struct { + AnalyticsRate float64 + IgnoreRequest func(*http.Request) bool + ServiceName string + ResourceNamer func(*http.Request) string + SpanOpts []tracer.StartSpanOption + IsStatusError func(int) bool +} + +type Config struct { + CommonConfig + FinishOpts []tracer.FinishOption + HeaderTags instrumentation.HeaderTags +} + +func (c *Config) ApplyOpts(opts ...Option) { + for _, fn := range opts { + fn.apply(c) + } +} + +func Default(instr *instrumentation.Instrumentation) *Config { + cfg := new(Config) + if options.GetBoolEnv("DD_TRACE_HTTP_ANALYTICS_ENABLED", false) { + cfg.AnalyticsRate = 1.0 + } else { + cfg.AnalyticsRate = instr.AnalyticsRate(true) + } + cfg.ServiceName = instr.ServiceName(instrumentation.ComponentServer, nil) + cfg.HeaderTags = instr.HTTPHeadersAsTags() + cfg.SpanOpts = []tracer.StartSpanOption{tracer.Measured()} + if !math.IsNaN(cfg.AnalyticsRate) { + cfg.SpanOpts = append(cfg.SpanOpts, tracer.Tag(ext.EventSampleRate, cfg.AnalyticsRate)) + } + cfg.IgnoreRequest = func(_ *http.Request) bool { return false } + cfg.ResourceNamer = func(_ *http.Request) string { return "" } + return cfg +} + +// A RoundTripperBeforeFunc can be used to modify a span before an http +// RoundTrip is made. +type RoundTripperBeforeFunc func(*http.Request, *tracer.Span) + +// A RoundTripperAfterFunc can be used to modify a span after an http +// RoundTrip is made. It is possible for the http Response to be nil. +type RoundTripperAfterFunc func(*http.Response, *tracer.Span) + +type RoundTripperConfig struct { + CommonConfig + Before RoundTripperBeforeFunc + After RoundTripperAfterFunc + SpanNamer func(req *http.Request) string + Propagation bool + ErrCheck func(err error) bool + QueryString bool // reports whether the query string is included in the URL tag for http client spans + ClientTimings bool // reports whether httptrace.ClientTrace should be enabled for detailed timing +} + +func (c *RoundTripperConfig) ApplyOpts(opts ...RoundTripperOption) { + for _, fn := range opts { + fn.applyRoundTripper(c) + } +} + +// RoundTripperOption describes options for http.RoundTripper. +type RoundTripperOption interface { + applyRoundTripper(*RoundTripperConfig) +} + +// RoundTripperOptionFn represents options applicable to WrapClient and WrapRoundTripper. +type RoundTripperOptionFn func(*RoundTripperConfig) + +func (o RoundTripperOptionFn) applyRoundTripper(cfg *RoundTripperConfig) { + o(cfg) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/const.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/const.go new file mode 100644 index 00000000..07685b38 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/const.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package config + +const ( + // EnvClientQueryStringEnabled is the name of the env var used to specify whether query string collection is enabled for http client spans. + EnvClientQueryStringEnabled = "DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING" + // EnvClientErrorStatuses is the name of the env var that specifies error status codes on http client spans + EnvClientErrorStatuses = "DD_TRACE_HTTP_CLIENT_ERROR_STATUSES" + // EnvQueryStringRegexp is the name of the env var used to specify the regexp to use for query string obfuscation. + EnvQueryStringRegexp = "DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/instrumentation.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/instrumentation.go new file mode 100644 index 00000000..9f806bec --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/instrumentation.go @@ -0,0 +1,14 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package config + +import "github.com/DataDog/dd-trace-go/v2/instrumentation" + +var Instrumentation *instrumentation.Instrumentation + +func init() { + Instrumentation = instrumentation.Load(instrumentation.PackageNetHTTP) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/options.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/options.go new file mode 100644 index 00000000..9d0a1822 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config/options.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package config + +import ( + "net/http" +) + +// WithResourceNamer populates the name of a resource based on a custom function. +func WithResourceNamer(namer func(req *http.Request) string) OptionFn { + return func(cfg *CommonConfig) { + cfg.ResourceNamer = namer + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion/roundtrip.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion/roundtrip.go new file mode 100644 index 00000000..aa6fa4ad --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion/roundtrip.go @@ -0,0 +1,68 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package orchestrion + +import ( + "fmt" + "net/http" + "sync" + + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config" + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap" + "github.com/DataDog/dd-trace-go/v2/instrumentation" + "github.com/DataDog/dd-trace-go/v2/instrumentation/env" + "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace" + "github.com/DataDog/dd-trace-go/v2/instrumentation/options" +) + +func ObserveRoundTrip(req *http.Request) (*http.Request, wrap.AfterRoundTrip, error) { + return wrap.ObserveRoundTrip(defaultRoundTripperConfig(), req) +} + +var ( + cfg *config.RoundTripperConfig + cfgOnce sync.Once +) + +func defaultRoundTripperConfig() *config.RoundTripperConfig { + cfgOnce.Do(func() { + cfg = &config.RoundTripperConfig{ + CommonConfig: config.CommonConfig{ + AnalyticsRate: func() float64 { + if options.GetBoolEnv("DD_TRACE_HTTP_ANALYTICS_ENABLED", false) { + return 1.0 + } else { + return config.Instrumentation.AnalyticsRate(true) + } + }(), + IgnoreRequest: func(*http.Request) bool { return false }, + ResourceNamer: func() func(req *http.Request) string { + if options.GetBoolEnv("DD_TRACE_HTTP_CLIENT_RESOURCE_NAME_QUANTIZE", false) { + return func(req *http.Request) string { + return fmt.Sprintf("%s %s", req.Method, httptrace.QuantizeURL(req.URL.Path)) + } + } + + return func(req *http.Request) string { return fmt.Sprintf("%s %s", req.Method, req.URL.Path) } + }(), + IsStatusError: func() func(int) bool { + envVal := env.Get(config.EnvClientErrorStatuses) + if fn := httptrace.GetErrorCodesFromInput(envVal); fn != nil { + return fn + } + return func(statusCode int) bool { return statusCode >= 400 && statusCode < 500 } + }(), + ServiceName: config.Instrumentation.ServiceName(instrumentation.ComponentClient, nil), + }, + Propagation: true, + QueryString: options.GetBoolEnv(config.EnvClientQueryStringEnabled, true), + SpanNamer: func(*http.Request) string { + return config.Instrumentation.OperationName(instrumentation.ComponentClient, nil) + }, + } + }) + return cfg +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion/wrap-handler.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion/wrap-handler.go new file mode 100644 index 00000000..6472a4b3 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion/wrap-handler.go @@ -0,0 +1,40 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package orchestrion + +import ( + "fmt" + "net/http" + + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config" + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap" + "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace" + "github.com/DataDog/dd-trace-go/v2/instrumentation/options" +) + +func WrapHandler(handler http.Handler) http.Handler { + switch handler := handler.(type) { + case *wrap.ServeMux, wrap.WrappedHandler: + return handler + case *http.ServeMux: + tracedMux := wrap.NewServeMux() + tracedMux.ServeMux = handler + return tracedMux + default: + if options.GetBoolEnv("DD_TRACE_HTTP_HANDLER_RESOURCE_NAME_QUANTIZE", false) { + return wrap.Handler(handler, "", "", config.WithResourceNamer(quantizeResourceNamer)) + } + return wrap.Handler(handler, "", "", config.WithResourceNamer(resourceNamer)) + } +} + +func resourceNamer(r *http.Request) string { + return fmt.Sprintf("%s %s", r.Method, r.URL.Path) +} + +func quantizeResourceNamer(r *http.Request) string { + return fmt.Sprintf("%s %s", r.Method, httptrace.QuantizeURL(r.URL.Path)) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/pattern/pattern.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/pattern/pattern.go new file mode 100644 index 00000000..b58a0ab1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/pattern/pattern.go @@ -0,0 +1,172 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package pattern + +import ( + "errors" + "fmt" + "net/http" + "strings" + "unicode" + + internal "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config" + "github.com/puzpuzpuz/xsync/v3" +) + +// Route returns the route part of a go1.22 style ServeMux pattern. I.e. +// it returns "/foo" for the pattern "/foo" as well as the pattern "GET /foo". +func Route(s string) string { + // Support go1.22 serve mux patterns: [METHOD ][HOST]/[PATH] + // Consider any text before a space or tab to be the method of the pattern. + // See net/http.parsePattern and the link below for more information. + // https://pkg.go.dev/net/http#hdr-Patterns-ServeMux + if i := strings.IndexAny(s, " \t"); i > 0 && len(s) >= i+1 { + return strings.TrimLeft(s[i+1:], " \t") + } + return s +} + +// PathParameters return the path parameter values and names from the request. +func PathParameters(pattern string, request *http.Request) map[string]string { + if pattern == "" { + return nil + } + names := patternNames(pattern) + res := make(map[string]string, len(names)) + for _, name := range names { + res[name] = request.PathValue(name) + } + return res +} + +var patternSegmentsCache = xsync.NewMapOf[string, []string]() + +func patternNames(pattern string) []string { + v, _ := patternSegmentsCache.LoadOrCompute(pattern, func() []string { + segments, err := parsePatternNames(pattern) + if err != nil { + // Ignore the error: Something as gone wrong, but we are not eager to find out why. + // We will just log it as a telemetry logs warning (and Debug to the user-facing log). + internal.Instrumentation.Logger().Warn("instrumentation/net/http/pattern: failed to parse mux path pattern %q: %s", pattern, err.Error()) + // here we fallthrough instead of returning to load a nil value into the cache to avoid reparsing the pattern. + } + return segments + }) + return v +} + +// parsePatternNames returns the names of the wildcards in the pattern. +// Based on https://cs.opensource.google/go/go/+/refs/tags/go1.23.4:src/net/http/pattern.go;l=84 +// but very simplified as we know that the pattern returned must be valid or `net/http` would have panicked earlier. +// +// The pattern string's syntax is +// +// [METHOD] [HOST]/[PATH] +// +// where: +// - METHOD is an HTTP method +// - HOST is a hostname +// - PATH consists of slash-separated segments, where each segment is either +// a literal or a wildcard of the form "{name}", "{name...}", or "{$}". +// +// METHOD, HOST and PATH are all optional; that is, the string can be "/". +// If METHOD is present, it must be followed by at least one space or tab. +// Wildcard names must be valid Go identifiers. +// The "{$}" and "{name...}" wildcard must occur at the end of PATH. +// PATH may end with a '/'. +// Wildcard names in a path must be distinct. +// +// Some examples could be: +// - "/foo/{bar}" returns ["bar"] +// - "/foo/{bar}/{baz}" returns ["bar", "baz"] +// - "/foo" returns [] +func parsePatternNames(pattern string) ([]string, error) { + if len(pattern) == 0 { + return nil, errors.New("empty pattern") + } + method, rest, found := pattern, "", false + if i := strings.IndexAny(pattern, " \t"); i >= 0 { + method, rest, found = pattern[:i], strings.TrimLeft(pattern[i+1:], " \t"), true + } + if !found { + rest = method + method = "" + } + + i := strings.IndexByte(rest, '/') + if i < 0 { + return nil, errors.New("host/path missing /") + } + host := rest[:i] + rest = rest[i:] + if j := strings.IndexByte(host, '{'); j >= 0 { + return nil, errors.New("host contains '{' (missing initial '/'?)") + } + + // At this point, rest is the path. + var names []string + seenNames := make(map[string]bool) + for len(rest) > 0 { + // Invariant: rest[0] == '/'. + rest = rest[1:] + if len(rest) == 0 { + // Trailing slash. + break + } + i := strings.IndexByte(rest, '/') + if i < 0 { + i = len(rest) + } + var seg string + seg, rest = rest[:i], rest[i:] + if i := strings.IndexByte(seg, '{'); i >= 0 { + // Wildcard. + if i != 0 { + return nil, errors.New("bad wildcard segment (must start with '{')") + } + if seg[len(seg)-1] != '}' { + return nil, errors.New("bad wildcard segment (must end with '}')") + } + name := seg[1 : len(seg)-1] + if name == "$" { + if len(rest) != 0 { + return nil, errors.New("{$} not at end") + } + break + } + name, multi := strings.CutSuffix(name, "...") + if multi && len(rest) != 0 { + return nil, errors.New("{...} wildcard not at end") + } + if name == "" { + return nil, errors.New("empty wildcard name") + } + if !isValidWildcardName(name) { + return nil, fmt.Errorf("bad wildcard name %q", name) + } + if seenNames[name] { + return nil, fmt.Errorf("duplicate wildcard name %q", name) + } + seenNames[name] = true + names = append(names, name) + } + } + + return names, nil +} + +func isValidWildcardName(s string) bool { + if s == "" { + return false + } + // Valid Go identifier. + for i, c := range s { + if !unicode.IsLetter(c) && c != '_' && (i == 0 || !unicode.IsDigit(c)) { + return false + } + } + return true +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/handler.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/handler.go new file mode 100644 index 00000000..1a4e9942 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/handler.go @@ -0,0 +1,59 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package wrap + +import ( + "net/http" + + internal "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config" + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/pattern" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace" +) + +type WrappedHandler struct { + http.HandlerFunc +} + +// Handler wraps an [http.Handler] with tracing using the given service and resource. +// If the WithResourceNamer option is provided as part of opts, it will take precedence over the resource argument. +func Handler(h http.Handler, service, resource string, opts ...internal.Option) http.Handler { + instr := internal.Instrumentation + cfg := internal.Default(instr) + cfg.ApplyOpts(opts...) + cfg.SpanOpts = append(cfg.SpanOpts, tracer.Tag(ext.SpanKind, ext.SpanKindServer)) + cfg.SpanOpts = append(cfg.SpanOpts, tracer.Tag(ext.Component, internal.ComponentName)) + instr.Logger().Debug("contrib/net/http: Wrapping Handler: Service: %s, Resource: %s, %#v", service, resource, cfg) + // if the service provided from parameters is empty, + // use the one from the config (which should default to DD_SERVICE / "http.router") + if service == "" { + service = cfg.ServiceName + } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if cfg.IgnoreRequest(req) { + h.ServeHTTP(w, req) + return + } + resc := resource + if r := cfg.ResourceNamer(req); r != "" { + resc = r + } + so := make([]tracer.StartSpanOption, len(cfg.SpanOpts), len(cfg.SpanOpts)+1) + copy(so, cfg.SpanOpts) + so = append(so, httptrace.HeaderTagsFromRequest(req, cfg.HeaderTags)) + TraceAndServe(h, w, req, &httptrace.ServeConfig{ + Framework: "net/http", + Service: service, + Resource: resc, + FinishOpts: cfg.FinishOpts, + SpanOpts: so, + IsStatusError: cfg.IsStatusError, + Route: pattern.Route(req.Pattern), + RouteParams: pattern.PathParameters(req.Pattern, req), + }) + }) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/mux.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/mux.go new file mode 100644 index 00000000..6d03a265 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/mux.go @@ -0,0 +1,92 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package wrap + +import ( + "net/http" + + internal "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config" + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/pattern" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec" + "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace" +) + +// ServeMux is an HTTP request multiplexer that traces all the incoming requests. +type ServeMux struct { + *http.ServeMux + cfg *internal.Config +} + +// NewServeMux allocates and returns an http.ServeMux augmented with the +// global tracer. +func NewServeMux(opts ...internal.Option) *ServeMux { + instr := internal.Instrumentation + cfg := internal.Default(instr) + cfg.ApplyOpts(opts...) + cfg.SpanOpts = append(cfg.SpanOpts, tracer.Tag(ext.SpanKind, ext.SpanKindServer)) + cfg.SpanOpts = append(cfg.SpanOpts, tracer.Tag(ext.Component, internal.ComponentName)) + instr.Logger().Debug("contrib/net/http: Configuring ServeMux: %#v", cfg) + return &ServeMux{ + ServeMux: http.NewServeMux(), + cfg: cfg, + } +} + +// Handle registers the handler for the given pattern. +func (mux *ServeMux) Handle(pttrn string, inner http.Handler) { + handlerFunc := inner + if internal.Instrumentation.AppSecEnabled() { + // Calling TraceAndServe before `http.ServeMux.ServeHTTP` does not give enough information + // about routing for AppSec to work properly when using the ServeMux tracing wrapper. + // Therefore, we need to wrap the handlerFunc with a handler that finished the job here + // after pattern data and matches are available + // This also means stopping the handle from being called if security rules disallow it + handlerFunc = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if httpsec.RouteMatched(r.Context(), pattern.Route(r.Pattern), pattern.PathParameters(r.Pattern, r)) != nil { + return + } + inner.ServeHTTP(w, r) + }) + } + + mux.ServeMux.Handle(pttrn, handlerFunc) +} + +// HandleFunc registers the handler function for the given pattern. +func (mux *ServeMux) HandleFunc(pttrn string, handlerFunc func(http.ResponseWriter, *http.Request)) { + mux.Handle(pttrn, http.HandlerFunc(handlerFunc)) +} + +// ServeHTTP dispatches the request to the handler +// whose pattern most closely matches the request URL. +// We only need to rewrite this function to be able to trace +// all the incoming requests to the underlying multiplexer +func (mux *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if mux.cfg.IgnoreRequest(r) { + mux.ServeMux.ServeHTTP(w, r) + return + } + // get the resource associated to this request + _, pttrn := mux.Handler(r) + route := pattern.Route(pttrn) + resource := mux.cfg.ResourceNamer(r) + if resource == "" { + resource = r.Method + " " + route + } + so := make([]tracer.StartSpanOption, len(mux.cfg.SpanOpts), len(mux.cfg.SpanOpts)+1) + copy(so, mux.cfg.SpanOpts) + so = append(so, httptrace.HeaderTagsFromRequest(r, mux.cfg.HeaderTags)) + TraceAndServe(mux.ServeMux, w, r, &httptrace.ServeConfig{ + Framework: "net/http", + Service: mux.cfg.ServiceName, + Resource: resource, + SpanOpts: so, + Route: route, + IsStatusError: mux.cfg.IsStatusError, + }) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/roundtrip.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/roundtrip.go new file mode 100644 index 00000000..4113e3c4 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/roundtrip.go @@ -0,0 +1,196 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package wrap + +import ( + "crypto/tls" + "fmt" + "math" + "net/http" + "net/http/httptrace" + "os" + "strconv" + "time" + + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config" + "github.com/DataDog/dd-trace-go/v2/appsec/events" + "github.com/DataDog/dd-trace-go/v2/ddtrace/baggage" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec" + instrumentationhttptrace "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace" +) + +type AfterRoundTrip = func(*http.Response, error) (*http.Response, error) + +// httpTraceTimings captures key timing events from httptrace.ClientTrace +type httpTraceTimings struct { + dnsStart, dnsEnd time.Time + connectStart, connectEnd time.Time + tlsStart, tlsEnd time.Time + getConnStart, gotConn time.Time + wroteHeaders, gotFirstByte time.Time + connectErr error + tlsErr error +} + +// addDurationTag adds a timing tag to the span if both timestamps are valid +func (t *httpTraceTimings) addDurationTag(span *tracer.Span, tagName string, start, end time.Time) { + if !start.IsZero() && !end.IsZero() { + duration := float64(end.Sub(start).Nanoseconds()) / 1e6 + span.SetTag(tagName, duration) + } +} + +// addTimingTags adds all timing information to the span +func (t *httpTraceTimings) addTimingTags(span *tracer.Span) { + t.addDurationTag(span, "http.dns.duration_ms", t.dnsStart, t.dnsEnd) + t.addDurationTag(span, "http.connect.duration_ms", t.connectStart, t.connectEnd) + t.addDurationTag(span, "http.tls.duration_ms", t.tlsStart, t.tlsEnd) + t.addDurationTag(span, "http.get_conn.duration_ms", t.getConnStart, t.gotConn) + t.addDurationTag(span, "http.first_byte.duration_ms", t.wroteHeaders, t.gotFirstByte) + + // Add error information if present + if t.connectErr != nil { + span.SetTag("http.connect.error", t.connectErr.Error()) + } + if t.tlsErr != nil { + span.SetTag("http.tls.error", t.tlsErr.Error()) + } +} + +// newClientTrace creates a ClientTrace that captures timing information +func newClientTrace(timings *httpTraceTimings) *httptrace.ClientTrace { + return &httptrace.ClientTrace{ + DNSStart: func(httptrace.DNSStartInfo) { timings.dnsStart = time.Now() }, + DNSDone: func(httptrace.DNSDoneInfo) { timings.dnsEnd = time.Now() }, + ConnectStart: func(network, addr string) { timings.connectStart = time.Now() }, + ConnectDone: func(network, addr string, err error) { timings.connectEnd = time.Now(); timings.connectErr = err }, + TLSHandshakeStart: func() { timings.tlsStart = time.Now() }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { timings.tlsEnd = time.Now(); timings.tlsErr = err }, + GetConn: func(hostPort string) { timings.getConnStart = time.Now() }, + GotConn: func(httptrace.GotConnInfo) { timings.gotConn = time.Now() }, + WroteHeaders: func() { timings.wroteHeaders = time.Now() }, + GotFirstResponseByte: func() { timings.gotFirstByte = time.Now() }, + } +} + +// ObserveRoundTrip performs actions before the base [http.RoundTripper.RoundTrip] using the +// provided [*config.RoundTripperConfig] (which cannot be nil). It returns the possibly modified +// [*http.Request] and a function to be called after the base [http.RoundTripper.RoundTrip] function +// is executed, and before returning control to the caller. +// +// If RASP features are enabled, an error will be returned if the request should be blocked, in +// which case the caller must immediately abort the [http.RoundTripper.RoundTrip] and forward the +// error as-is. An error is never returned in RASP features are not enabled. +func ObserveRoundTrip(cfg *config.RoundTripperConfig, req *http.Request) (*http.Request, AfterRoundTrip, error) { + if cfg.IgnoreRequest(req) { + return req, identityAfterRoundTrip, nil + } + + resourceName := cfg.ResourceNamer(req) + spanName := cfg.SpanNamer(req) + // Make a copy of the URL so we don't modify the outgoing request + url := *req.URL + url.User = nil // Do not include userinfo in the HTTPURL tag. + opts := []tracer.StartSpanOption{ + tracer.SpanType(ext.SpanTypeHTTP), + tracer.ResourceName(resourceName), + tracer.Tag(ext.HTTPMethod, req.Method), + tracer.Tag(ext.HTTPURL, instrumentationhttptrace.URLFromRequest(req, cfg.QueryString)), + tracer.Tag(ext.Component, config.ComponentName), + tracer.Tag(ext.SpanKind, ext.SpanKindClient), + tracer.Tag(ext.NetworkDestinationName, url.Hostname()), + } + if !math.IsNaN(cfg.AnalyticsRate) { + opts = append(opts, tracer.Tag(ext.EventSampleRate, cfg.AnalyticsRate)) + } + if cfg.ServiceName != "" { + opts = append(opts, tracer.ServiceName(cfg.ServiceName)) + } + if port, err := strconv.Atoi(url.Port()); err == nil { + opts = append(opts, tracer.Tag(ext.NetworkDestinationPort, port)) + } + if len(cfg.SpanOpts) > 0 { + opts = append(opts, cfg.SpanOpts...) + } + + // Start a new span + span, ctx := tracer.StartSpanFromContext(req.Context(), spanName, opts...) + + // Apply the before hook, if any + if cfg.Before != nil { + cfg.Before(req, span) + } + + // Setup ClientTrace for detailed timing if enabled + var timings *httpTraceTimings + if cfg.ClientTimings { + timings = &httpTraceTimings{} + ctx = httptrace.WithClientTrace(ctx, newClientTrace(timings)) + } + + // Clone the request so we can modify it without causing visible side-effects to the caller... + req = req.Clone(ctx) + for k, v := range baggage.All(ctx) { + span.SetBaggageItem(k, v) + } + if cfg.Propagation { + // inject the span context into the http request copy + err := tracer.Inject(span.Context(), tracer.HTTPHeadersCarrier(req.Header)) + if err != nil { + // this should never happen + fmt.Fprintf(os.Stderr, "contrib/net/http.Roundtrip: failed to inject http headers: %s\n", err.Error()) + } + } + + // if RASP is enabled, check whether the request is supposed to be blocked. + if config.Instrumentation.AppSecRASPEnabled() { + if err := httpsec.ProtectRoundTrip(ctx, req.URL.String()); err != nil { + span.Finish() // Finish the span as we're blocking the request... + return nil, nil, err + } + } + + after := func(resp *http.Response, err error) (*http.Response, error) { + // Register http errors and observe the status code... + if err != nil { + span.SetTag("http.errors", err.Error()) + if cfg.ErrCheck == nil || cfg.ErrCheck(err) { + span.SetTag(ext.Error, err) + } + } else { + span.SetTag(ext.HTTPCode, strconv.Itoa(resp.StatusCode)) + if cfg.IsStatusError(resp.StatusCode) { + span.SetTag("http.errors", resp.Status) + span.SetTag(ext.Error, fmt.Errorf("%d: %s", resp.StatusCode, http.StatusText(resp.StatusCode))) + } + } + + if cfg.ClientTimings && timings != nil { + timings.addTimingTags(span) + } + + // Run the after hooks & finish the span + if cfg.After != nil { + cfg.After(resp, span) + } + if !events.IsSecurityError(err) && (cfg.ErrCheck == nil || cfg.ErrCheck(err)) { + span.Finish(tracer.WithError(err)) + } else { + span.Finish() + } + + // Finally, forward the response and error back to the caller + return resp, err + } + + return req, after, nil +} + +func identityAfterRoundTrip(resp *http.Response, err error) (*http.Response, error) { + return resp, err +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/trace.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/trace.go new file mode 100644 index 00000000..1f35e9c0 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap/trace.go @@ -0,0 +1,24 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package wrap + +import ( + "net/http" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace" +) + +// TraceAndServe serves the handler h using the given ResponseWriter and Request, applying tracing +// according to the specified config. +func TraceAndServe(h http.Handler, w http.ResponseWriter, r *http.Request, cfg *httptrace.ServeConfig) { + tw, tr, afterHandle, handled := httptrace.BeforeHandle(cfg, w, r) + defer afterHandle() + + if handled { + return + } + h.ServeHTTP(tw, tr) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/option.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/option.go new file mode 100644 index 00000000..4f69e767 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/option.go @@ -0,0 +1,209 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package http + +import ( + "math" + "net/http" + + internal "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/instrumentation" + "github.com/DataDog/dd-trace-go/v2/instrumentation/env" + "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace" + "github.com/DataDog/dd-trace-go/v2/instrumentation/options" +) + +// Option describes options for http.ServeMux. +type Option = internal.Option + +// OptionFn represents options applicable to NewServeMux and WrapHandler. +type OptionFn = internal.OptionFn + +// HandlerOptionFn represents options applicable to NewServeMux and WrapHandler. +type HandlerOptionFn = internal.HandlerOptionFn + +// WithIgnoreRequest holds the function to use for determining if the +// incoming HTTP request should not be traced. +func WithIgnoreRequest(f func(*http.Request) bool) OptionFn { + return func(cfg *internal.CommonConfig) { + cfg.IgnoreRequest = f + } +} + +// WithService sets the given service name for the returned ServeMux. +func WithService(name string) OptionFn { + return func(cfg *internal.CommonConfig) { + cfg.ServiceName = name + } +} + +// WithHeaderTags enables the integration to attach HTTP request headers as span tags. +// Warning: +// Using this feature can risk exposing sensitive data such as authorization tokens to Datadog. +// Special headers can not be sub-selected. E.g., an entire Cookie header would be transmitted, without the ability to choose specific Cookies. +func WithHeaderTags(headers []string) HandlerOptionFn { + return func(cfg *internal.Config) { + cfg.HeaderTags = instrumentation.NewHeaderTags(headers) + } +} + +// WithStatusCheck sets a span to be an error if the passed function +// returns true for a given status code. +func WithStatusCheck(fn func(statusCode int) bool) OptionFn { + return func(cfg *internal.CommonConfig) { + cfg.IsStatusError = fn + } +} + +// WithAnalytics enables Trace Analytics for all started spans. +func WithAnalytics(on bool) OptionFn { + return func(cfg *internal.CommonConfig) { + if on { + cfg.AnalyticsRate = 1.0 + cfg.SpanOpts = append(cfg.SpanOpts, tracer.Tag(ext.EventSampleRate, cfg.AnalyticsRate)) + } else { + cfg.AnalyticsRate = math.NaN() + } + } +} + +// WithAnalyticsRate sets the sampling rate for Trace Analytics events +// correlated to started spans. +func WithAnalyticsRate(rate float64) OptionFn { + return func(cfg *internal.CommonConfig) { + if rate >= 0.0 && rate <= 1.0 { + cfg.AnalyticsRate = rate + cfg.SpanOpts = append(cfg.SpanOpts, tracer.Tag(ext.EventSampleRate, cfg.AnalyticsRate)) + } else { + cfg.AnalyticsRate = math.NaN() + } + } +} + +// WithSpanOptions defines a set of additional tracer.StartSpanOption to be added +// to spans started by the integration. +func WithSpanOptions(opts ...tracer.StartSpanOption) OptionFn { + return func(cfg *internal.CommonConfig) { + cfg.SpanOpts = append(cfg.SpanOpts, opts...) + } +} + +// WithResourceNamer populates the name of a resource based on a custom function. +func WithResourceNamer(namer func(req *http.Request) string) OptionFn { + return internal.WithResourceNamer(namer) +} + +// NoDebugStack prevents stack traces from being attached to spans finishing +// with an error. This is useful in situations where errors are frequent and +// performance is critical. +func NoDebugStack() HandlerOptionFn { + return func(cfg *internal.Config) { + cfg.FinishOpts = append(cfg.FinishOpts, tracer.NoDebugStack()) + } +} + +// RoundTripperOption describes options for http.RoundTripper. +type RoundTripperOption = internal.RoundTripperOption + +// RoundTripperOptionFn represents options applicable to WrapClient and WrapRoundTripper. +type RoundTripperOptionFn = internal.RoundTripperOptionFn + +func newRoundTripperConfig() *internal.RoundTripperConfig { + defaultResourceNamer := func(_ *http.Request) string { + return "http.request" + } + instr := internal.Instrumentation + spanName := instr.OperationName(instrumentation.ComponentClient, nil) + defaultSpanNamer := func(_ *http.Request) string { + return spanName + } + sharedCfg := internal.CommonConfig{ + ServiceName: instr.ServiceName(instrumentation.ComponentClient, nil), + AnalyticsRate: instr.GlobalAnalyticsRate(), + ResourceNamer: defaultResourceNamer, + IgnoreRequest: func(_ *http.Request) bool { return false }, + IsStatusError: isClientError, + } + + v := env.Get(internal.EnvClientErrorStatuses) + if fn := httptrace.GetErrorCodesFromInput(v); fn != nil { + sharedCfg.IsStatusError = fn + } + + rtConfig := internal.RoundTripperConfig{ + CommonConfig: sharedCfg, + Propagation: true, + SpanNamer: defaultSpanNamer, + QueryString: options.GetBoolEnv(internal.EnvClientQueryStringEnabled, true), + } + + return &rtConfig +} + +// A RoundTripperBeforeFunc can be used to modify a span before an http +// RoundTrip is made. +type RoundTripperBeforeFunc = internal.RoundTripperBeforeFunc + +// A RoundTripperAfterFunc can be used to modify a span after an http +// RoundTrip is made. It is possible for the http Response to be nil. +type RoundTripperAfterFunc = internal.RoundTripperAfterFunc + +// WithBefore adds a RoundTripperBeforeFunc to the RoundTripper +// config. +func WithBefore(f RoundTripperBeforeFunc) RoundTripperOptionFn { + return func(cfg *internal.RoundTripperConfig) { + cfg.Before = f + } +} + +// WithAfter adds a RoundTripperAfterFunc to the RoundTripper +// config. +func WithAfter(f RoundTripperAfterFunc) RoundTripperOptionFn { + return func(cfg *internal.RoundTripperConfig) { + cfg.After = f + } +} + +// WithSpanNamer specifies a function which will be used to +// obtain the span operation name for a given request. +func WithSpanNamer(namer func(req *http.Request) string) RoundTripperOptionFn { + return func(cfg *internal.RoundTripperConfig) { + cfg.SpanNamer = namer + } +} + +// WithPropagation enables/disables propagation for tracing headers. +// Disabling propagation will disconnect this trace from any downstream traces. +func WithPropagation(propagation bool) RoundTripperOptionFn { + return func(cfg *internal.RoundTripperConfig) { + cfg.Propagation = propagation + } +} + +// WithErrorCheck specifies a function fn which determines whether the passed +// error should be marked as an error. The fn is called whenever an http operation +// finishes with an error +func WithErrorCheck(fn func(err error) bool) RoundTripperOptionFn { + return func(cfg *internal.RoundTripperConfig) { + cfg.ErrCheck = fn + } +} + +// WithClientTimings enables detailed HTTP request tracing using httptrace.ClientTrace. +// When enabled, the integration will add timing information for DNS lookups, +// connection establishment, TLS handshakes, and other HTTP request events as span tags. +// This feature is disabled by default and adds minimal overhead when enabled. +func WithClientTimings(enabled bool) RoundTripperOptionFn { + return func(cfg *internal.RoundTripperConfig) { + cfg.ClientTimings = enabled + } +} + +func isClientError(statusCode int) bool { + return statusCode >= 400 && statusCode < 500 +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.client.yml b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.client.yml new file mode 100644 index 00000000..6bf7d034 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.client.yml @@ -0,0 +1,118 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2023-present Datadog, Inc. +--- +# yaml-language-server: $schema=https://datadoghq.dev/orchestrion/schema.json +meta: + name: github.com/DataDog/dd-trace-go/contrib/net/http/v2.Client + description: HTTP client implementation. + +aspects: + # Add tracing to the default http.RoundTripper implementation. + - id: Transport.DD__tracer_internal + join-point: + struct-definition: net/http.Transport + advice: + - add-struct-field: + name: DD__tracer_internal + type: bool + # In tracer internals, set the DD__tracer_internal field to true so that we do not end up + # instrumenting the tracer's internal HTTP clients (this would be a span bomb!) + - id: Transport.DD__tracer_internal=true + tracer-internal: true + join-point: + all-of: + - one-of: + - import-path: github.com/DataDog/dd-trace-go/v2/ddtrace/tracer + - import-path: github.com/DataDog/dd-trace-go/v2/internal/hostname/httputils + - import-path: github.com/DataDog/dd-trace-go/v2/internal/remoteconfig + - import-path: github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal + - import-path: github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/net + - import-path: github.com/DataDog/dd-trace-go/v2/profiler + - struct-literal: + type: net/http.Transport + advice: + - wrap-expression: + template: |- + {{- .AST.Type -}}{ + DD__tracer_internal: true, + {{ range .AST.Elts }}{{ . }}, + {{ end }} + } + - id: Transport.RoundTrip + join-point: + function-body: + function: + - name: RoundTrip + - receiver: '*net/http.Transport' + advice: + - inject-declarations: + # We need to use go:linkname to refer to a these declarations in order to avoid creating + # circular dependencies, as these features have transitive dependencies on `net/http`... + links: + - github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion + template: |- + type ddAfterRoundTrip = func(*Response, error) (*Response, error) + + //go:linkname __dd_httptrace_ObserveRoundTrip github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion.ObserveRoundTrip + func __dd_httptrace_ObserveRoundTrip(*Request) (*Request, ddAfterRoundTrip, error) + - prepend-statements: + template: |- + {{- $t := .Function.Receiver -}} + {{- $req := .Function.Argument 0 -}} + {{- $res := .Function.Result 0 -}} + {{- $err := .Function.Result 1 -}} + if !{{ $t }}.DD__tracer_internal { + var ( + __after__ ddAfterRoundTrip + __err__ error + ) + {{ $req }}, __after__, __err__ = __dd_httptrace_ObserveRoundTrip({{ $req }}) + if __err__ != nil { + return nil, __err__ + } + defer func(){ + {{ $res }}, {{ $err }} = __after__({{ $res }}, {{ $err }}) + }() + } + + # Replace the http.Get, http.Head, http.Post, and http.PostForm short-hands with the longer forms if + # there is a context available from the surroundings. + - id: Get|Head|Post|PostForm + join-point: + all-of: + - not: + # We don't want to instrument in net/http, it'd create a circular dependency! + import-path: net/http + - one-of: + - function-call: net/http.Get + - function-call: net/http.Head + - function-call: net/http.Post + - function-call: net/http.PostForm + advice: + # Wire the context that is found to the handlers... + - wrap-expression: + imports: + # Temporarily add a namespaced alias to ensure we don't have symbol name collisions. + # The root issue is solved at https://github.com/DataDog/orchestrion/pull/678 but this should + # ensure dd-trace-go users using older Orchestrion versions don't have build errors. + __ddtrace_client: github.com/DataDog/dd-trace-go/contrib/net/http/v2/client + template: |- + {{- $ctx := .Function.ArgumentOfType "context.Context" -}} + {{- $req := .Function.ArgumentOfType "*net/http.Request" }} + {{- if $ctx -}} + __ddtrace_client.{{ .AST.Fun.Name }}( + {{ $ctx }}, + {{ range .AST.Args }}{{ . }}, + {{ end }} + ) + {{- else if $req -}} + __ddtrace_client.{{ .AST.Fun.Name }}( + {{ $req }}.Context(), + {{ range .AST.Args }}{{ . }}, + {{ end }} + ) + {{- else -}} + {{ . }} + {{- end -}} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.go new file mode 100644 index 00000000..7acd5ca7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +//nolint:revive +package http + +// Import "./internal/orchestrion" and "./client" so that they're present in the +// dependency closure when compile-time instrumentation is used. This is +// necessary for the `orchestrion.server.yml` configuraton to be valid. +import ( + _ "github.com/DataDog/dd-trace-go/contrib/net/http/v2/client" + _ "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.server.yml b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.server.yml new file mode 100644 index 00000000..c8ae66b5 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.server.yml @@ -0,0 +1,34 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2023-present Datadog, Inc. +--- +# yaml-language-server: $schema=https://datadoghq.dev/orchestrion/schema.json +meta: + name: github.com/DataDog/dd-trace-go/contrib/net/http/v2.Server + description: HTTP server implementation. + +aspects: + - id: Server.Serve + join-point: + function-body: + function: + - receiver: '*net/http.Server' + - name: Serve + advice: + - inject-declarations: + # We need to use go:linkname to refer to a number of declarations in order to avoid creating + # circular dependencies, as these features have transitive dependencies on `net/http`... + links: + - github.com/DataDog/dd-trace-go/contrib/net/http/v2 + template: |- + //go:linkname __dd_contrib_net_http_internal_orchestrion_WrapHandler github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/orchestrion.WrapHandler + func __dd_contrib_net_http_internal_orchestrion_WrapHandler(Handler) Handler + - prepend-statements: + template: |- + {{- $srv := .Function.Receiver -}} + if {{ $srv }}.Handler == nil { + {{ $srv }}.Handler = __dd_contrib_net_http_internal_orchestrion_WrapHandler(DefaultServeMux) + } else { + {{ $srv }}.Handler = __dd_contrib_net_http_internal_orchestrion_WrapHandler({{ $srv }}.Handler) + } diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.yml b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.yml new file mode 100644 index 00000000..0f1a38f1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/orchestrion.yml @@ -0,0 +1,13 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2023-present Datadog, Inc. +--- +# yaml-language-server: $schema=https://datadoghq.dev/orchestrion/schema.json +meta: + name: github.com/DataDog/dd-trace-go/contrib/net/http/v2 + description: HTTP stack implementation. + +extends: + - ./orchestrion.client.yml + - ./orchestrion.server.yml diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/roundtripper.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/roundtripper.go new file mode 100644 index 00000000..a6819631 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/roundtripper.go @@ -0,0 +1,58 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package http + +import ( + "net/http" + + internal "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/config" + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap" +) + +type roundTripper struct { + base http.RoundTripper + cfg *internal.RoundTripperConfig +} + +func (rt *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req, after, err := wrap.ObserveRoundTrip(rt.cfg, req) + if err != nil { + return nil, err + } + resp, err := rt.base.RoundTrip(req) + return after(resp, err) +} + +// Unwrap returns the original http.RoundTripper. +func (rt *roundTripper) Unwrap() http.RoundTripper { + return rt.base +} + +// WrapRoundTripper returns a new RoundTripper which traces all requests sent +// over the transport. +func WrapRoundTripper(rt http.RoundTripper, opts ...RoundTripperOption) http.RoundTripper { + if rt == nil { + rt = http.DefaultTransport + } + cfg := newRoundTripperConfig() + cfg.ApplyOpts(opts...) + if wrapped, ok := rt.(*roundTripper); ok { + rt = wrapped.base + } + return &roundTripper{ + base: rt, + cfg: cfg, + } +} + +// WrapClient modifies the given client's transport to augment it with tracing and returns it. +func WrapClient(c *http.Client, opts ...RoundTripperOption) *http.Client { + if c.Transport == nil { + c.Transport = http.DefaultTransport + } + c.Transport = WrapRoundTripper(c.Transport, opts...) + return c +} diff --git a/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/trace.go b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/trace.go new file mode 100644 index 00000000..ec197e5d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/contrib/net/http/v2/trace.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package http // import "github.com/DataDog/dd-trace-go/contrib/net/http/v2" + +import ( + "net/http" + + "github.com/DataDog/dd-trace-go/contrib/net/http/v2/internal/wrap" + "github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace" +) + +// ServeConfig specifies the tracing configuration when using TraceAndServe. +type ServeConfig = httptrace.ServeConfig + +// TraceAndServe serves the handler h using the given ResponseWriter and Request, applying tracing +// according to the specified config. +func TraceAndServe(h http.Handler, w http.ResponseWriter, r *http.Request, cfg *ServeConfig) { + wrap.TraceAndServe(h, w, r, cfg) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE b/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE new file mode 100644 index 00000000..f760d366 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE @@ -0,0 +1,234 @@ +## License + +This work is dual-licensed under Apache 2.0 or BSD3. +You may select, at your option, one of the above-listed licenses. + +`SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause` + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--- + +Copyright (c) 2016-Present, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Datadog nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE-3rdparty.csv b/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE-3rdparty.csv new file mode 100644 index 00000000..1b6a22fa --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE-3rdparty.csv @@ -0,0 +1,4 @@ +Component,Origin,License,Copyright +import,io.opentracing,Apache-2.0,Copyright 2016-2017 The OpenTracing Authors +appsec,https://github.com/DataDog/libddwaf,Apache-2.0 OR BSD-3-Clause,Copyright (c) 2021 Datadog +golang,https://go.googlesource.com/go,BSD-3-Clause,Copyright (c) 2009 The Go Authors diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE-APACHE b/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE-APACHE new file mode 100644 index 00000000..bff56b54 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE-APACHE @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE-BSD3 b/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE-BSD3 new file mode 100644 index 00000000..92373209 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/LICENSE-BSD3 @@ -0,0 +1,24 @@ +Copyright (c) 2016-Present, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Datadog nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/NOTICE b/vendor/github.com/DataDog/dd-trace-go/v2/NOTICE new file mode 100644 index 00000000..a53b8ade --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/NOTICE @@ -0,0 +1,4 @@ +Datadog dd-trace-go +Copyright 2016-Present Datadog, Inc. + +This product includes software developed at Datadog, Inc. (https://www.datadoghq.com/). diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/appsec/events/block.go b/vendor/github.com/DataDog/dd-trace-go/v2/appsec/events/block.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/appsec/events/block.go rename to vendor/github.com/DataDog/dd-trace-go/v2/appsec/events/block.go diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/datastreams/options/options.go b/vendor/github.com/DataDog/dd-trace-go/v2/datastreams/options/options.go new file mode 100644 index 00000000..066477f7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/datastreams/options/options.go @@ -0,0 +1,11 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package options + +type CheckpointParams struct { + PayloadSize int64 + ServiceOverride string +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/baggage/baggage.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/baggage/baggage.go new file mode 100644 index 00000000..a3a97889 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/baggage/baggage.go @@ -0,0 +1,83 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package baggage + +import ( + "context" + "maps" +) + +// baggageKey is an unexported type used as a context key. It is used to store baggage in the context. +// We use a struct{} so it won't conflict with keys from other packages. +type baggageKey struct{} + +// baggageMap returns the baggage map from the given context and a bool indicating +// whether the baggage exists or not. If the bool is false, the returned map is nil. +func baggageMap(ctx context.Context) (map[string]string, bool) { + val := ctx.Value(baggageKey{}) + bm, ok := val.(map[string]string) + if !ok { + // val was nil or not a map[string]string + return nil, false + } + return bm, true +} + +// withBaggage returns a new context with the given baggage map set. +func withBaggage(ctx context.Context, baggage map[string]string) context.Context { + return context.WithValue(ctx, baggageKey{}, baggage) +} + +// Set sets or updates a single baggage key/value pair in the context. +// If the key already exists, this function overwrites the existing value. +func Set(ctx context.Context, key, value string) context.Context { + bm, ok := baggageMap(ctx) + if !ok || bm == nil { + // If there's no baggage map yet, or it's nil, create one + bm = make(map[string]string) + } else { + bm = maps.Clone(bm) + } + bm[key] = value + return withBaggage(ctx, bm) +} + +// Get retrieves the value associated with a baggage key. +// If the key isn't found, it returns an empty string. +func Get(ctx context.Context, key string) (string, bool) { + bm, ok := baggageMap(ctx) + if !ok { + return "", false + } + value, ok := bm[key] + return value, ok +} + +// Remove removes the specified key from the baggage (if present). +func Remove(ctx context.Context, key string) context.Context { + bm, ok := baggageMap(ctx) + if !ok || bm == nil { + // nothing to remove + return ctx + } + bmCopy := maps.Clone(bm) + delete(bmCopy, key) + return withBaggage(ctx, bmCopy) +} + +// All returns a **copy** of all baggage items in the context, +func All(ctx context.Context) map[string]string { + bm, ok := baggageMap(ctx) + if !ok { + return nil + } + return maps.Clone(bm) +} + +// Clear completely removes all baggage items from the context. +func Clear(ctx context.Context) context.Context { + return withBaggage(ctx, nil) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ddtrace.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ddtrace.go new file mode 100644 index 00000000..50995204 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ddtrace.go @@ -0,0 +1,36 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package ddtrace contains the interfaces that specify the implementations of Datadog's +// tracing library, as well as a set of sub-packages containing various implementations: +// our native implementation ("tracer") and a mock tracer to be used for testing ("mocktracer"). +// Additionally, package "ext" provides a set of tag names and values specific to Datadog's APM product. +// +// To get started, visit the documentation for any of the packages you'd like to begin +// with by accessing the subdirectories of this package: https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/ddtrace#pkg-subdirectories. +package ddtrace // import "github.com/DataDog/dd-trace-go/v2/ddtrace" + +// SpanContext represents a span state that can propagate to descendant spans +// and across process boundaries. It contains all the information needed to +// spawn a direct descendant of the span that it belongs to. It can be used +// to create distributed tracing by propagating it using the provided interfaces. +type SpanContext interface { + // SpanID returns the span ID that this context is carrying. + SpanID() uint64 + + // TraceID returns the trace ID that this context is carrying. + TraceID() string + + // TraceID128 returns the raw bytes of the 128-bit trace ID that this context is carrying. + TraceIDBytes() [16]byte + + // TraceIDLower returns the lower part of the trace ID that this context is carrying. + TraceIDLower() uint64 + + // ForeachBaggageItem provides an iterator over the key/value pairs set as + // baggage within this context. Iteration stops when the handler returns + // false. + ForeachBaggageItem(handler func(k, v string) bool) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/app_types.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/app_types.go new file mode 100644 index 00000000..5561b63e --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/app_types.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package ext // import "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + +// App types determine how to categorize a trace in the Datadog application. +// For more fine-grained behaviour, use the SpanType* constants. +const ( + // AppTypeDB specifies the DB span type and can be used as a tag value + // for a span's SpanType tag. If possible, use one of the SpanType* + // constants for a more accurate indication. + AppTypeDB = "db" + + // AppTypeCache specifies the Cache span type and can be used as a tag value + // for a span's SpanType tag. If possible, consider using SpanTypeRedis or + // SpanTypeMemcached. + AppTypeCache = "cache" + + // AppTypeRPC specifies the RPC span type and can be used as a tag value + // for a span's SpanType tag. + AppTypeRPC = "rpc" +) + +// Span types have similar behaviour to "app types" and help categorize +// traces in the Datadog application. They can also help fine grain agent +// level behaviours such as obfuscation and quantization, when these are +// enabled in the agent's configuration. +const ( + // SpanTypeWeb marks a span as an HTTP server request. + SpanTypeWeb = "web" + + // SpanTypeHTTP marks a span as an HTTP client request. + SpanTypeHTTP = "http" + + // SpanTypeSQL marks a span as an SQL operation. These spans may + // have an "sql.command" tag. + SpanTypeSQL = "sql" + + // SpanTypeCassandra marks a span as a Cassandra operation. These + // spans may have an "sql.command" tag. + SpanTypeCassandra = "cassandra" + + // SpanTypeRedis marks a span as a Redis operation. These spans may + // also have a "redis.raw_command" tag. + SpanTypeRedis = "redis" + + // SpanTypeRedis marks a span as a Valkey operation. + SpanTypeValkey = "valkey" + + // SpanTypeMemcached marks a span as a memcached operation. + SpanTypeMemcached = "memcached" + + // SpanTypeMongoDB marks a span as a MongoDB operation. + SpanTypeMongoDB = "mongodb" + + // SpanTypeElasticSearch marks a span as an ElasticSearch operation. + // These spans may also have an "elasticsearch.body" tag. + SpanTypeElasticSearch = "elasticsearch" + + // SpanTypeLevelDB marks a span as a leveldb operation + SpanTypeLevelDB = "leveldb" + + // SpanTypeDNS marks a span as a DNS operation. + SpanTypeDNS = "dns" + + // SpanTypeMessageConsumer marks a span as a queue operation + SpanTypeMessageConsumer = "queue" + + // SpanTypeMessageProducer marks a span as a queue operation. + SpanTypeMessageProducer = "queue" + + // SpanTypeConsul marks a span as a Consul operation. + SpanTypeConsul = "consul" + + // SpanTypeGraphQL marks a span as a graphql operation. + SpanTypeGraphQL = "graphql" + + // SpanTypeLLM marks a span as an LLM operation. + SpanTypeLLM = "llm" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/aws.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/aws.go new file mode 100644 index 00000000..5665b079 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/aws.go @@ -0,0 +1,34 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package ext + +// Tags specific to AWS. +const ( + AWSServiceLegacy = "aws.service" + AWSRegionLegacy = "aws.region" + + AWSAgent = "aws.agent" + AWSService = "aws_service" + AWSOperation = "aws.operation" + AWSRegion = "region" + AWSRequestID = "aws.request_id" + AWSRetryCount = "aws.retry_count" + + SQSQueueName = "queuename" + + SNSTargetName = "targetname" + SNSTopicName = "topicname" + + DynamoDBTableName = "tablename" + + KinesisStreamName = "streamname" + + EventBridgeRuleName = "rulename" + + SFNStateMachineName = "statemachinename" + + S3BucketName = "bucketname" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/db.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/db.go new file mode 100644 index 00000000..e4c442a4 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/db.go @@ -0,0 +1,119 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package ext + +const ( + // DBApplication indicates the application using the database. + DBApplication = "db.application" + // DBName indicates the database name. + DBName = "db.name" + // DBType indicates the type of Database. + DBType = "db.type" + // DBInstance indicates the instance name of Database. + DBInstance = "db.instance" + // DBUser indicates the user name of Database, e.g. "readonly_user" or "reporting_user". + DBUser = "db.user" + // DBStatement records a database statement for the given database type. + DBStatement = "db.statement" + // DBSystem indicates the database management system (DBMS) product being used. + DBSystem = "db.system" +) + +// Available values for db.system. +const ( + DBSystemMemcached = "memcached" + DBSystemMySQL = "mysql" + DBSystemPostgreSQL = "postgresql" + DBSystemMicrosoftSQLServer = "mssql" + // DBSystemOtherSQL is used for other SQL databases not listed above. + DBSystemOtherSQL = "other_sql" + DBSystemElasticsearch = "elasticsearch" + DBSystemRedis = "redis" + DBSystemValkey = "valkey" + DBSystemMongoDB = "mongodb" + DBSystemCassandra = "cassandra" + DBSystemConsulKV = "consul" + DBSystemLevelDB = "leveldb" + DBSystemBuntDB = "buntdb" +) + +// MicrosoftSQLServer tags. +const ( + // MicrosoftSQLServerInstanceName indicates the Microsoft SQL Server instance name connecting to. + MicrosoftSQLServerInstanceName = "db.mssql.instance_name" +) + +// MongoDB tags. +const ( + // MongoDBCollection indicates the collection being accessed. + MongoDBCollection = "db.mongodb.collection" +) + +// Redis tags. +const ( + // RedisDatabaseIndex indicates the Redis database index connected to. + RedisDatabaseIndex = "db.redis.database_index" + + // RedisRawCommand allows to set the raw command for tags. + RedisRawCommand = "redis.raw_command" + + // RedisClientCacheHit is the remaining TTL in seconds of client side cache. + RedisClientCacheHit = "db.redis.client.cache.hit" + + // RedisClientCacheTTL captures the Time-To-Live (TTL) of a cached entry in the client. + RedisClientCacheTTL = "db.redis.client.cache.ttl" + + // RedisClientCachePTTL is the remaining PTTL in seconds of client side cache. + RedisClientCachePTTL = "db.redis.client.cache.pttl" + + // RedisClientCachePXAT is the remaining PXAT in seconds of client side cache. + RedisClientCachePXAT = "db.redis.client.cache.pxat" +) + +// Valkey tags. +const ( + // ValkeyRawCommand allows to set the raw command for tags. + ValkeyRawCommand = "valkey.raw_command" + + // ValkeyClientCacheHit is the remaining TTL in seconds of client side cache. + ValkeyClientCacheHit = "db.valkey.client.cache.hit" + + // ValkeyClientCacheTTL captures the Time-To-Live (TTL) of a cached entry in the client. + ValkeyClientCacheTTL = "db.valkey.client.cache.ttl" + + // ValkeyClientCachePTTL is the remaining PTTL in seconds of client side cache. + ValkeyClientCachePTTL = "db.valkey.client.cache.pttl" + + // ValkeyClientCachePXAT is the remaining PXAT in seconds of client side cache. + ValkeyClientCachePXAT = "db.valkey.client.cache.pxat" +) + +// Cassandra tags. +const ( + // CassandraConsistencyLevel is the tag name to set for consistency level. + CassandraConsistencyLevel = "cassandra.consistency_level" + + // CassandraCluster specifies the tag name that is used to set the cluster. + CassandraCluster = "cassandra.cluster" + + // CassandraDatacenter specifies the tag name that is used to set the datacenter. + CassandraDatacenter = "cassandra.datacenter" + + // CassandraRowCount specifies the tag name to use when settings the row count. + CassandraRowCount = "cassandra.row_count" + + // CassandraKeyspace is used as tag name for setting the key space. + CassandraKeyspace = "cassandra.keyspace" + + // CassandraPaginated specifies the tag name for paginated queries. + CassandraPaginated = "cassandra.paginated" + + // CassandraContactPoints holds the list of cassandra initial seed nodes used to discover the cluster. + CassandraContactPoints = "db.cassandra.contact.points" + + // CassandraHostID represents the host ID for this operation. + CassandraHostID = "db.cassandra.host.id" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/graphql.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/graphql.go new file mode 100644 index 00000000..df03bd89 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/graphql.go @@ -0,0 +1,10 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package ext + +const ( + GraphqlQueryErrorEvent = "dd.graphql.query.error" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/log_key.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/log_key.go new file mode 100644 index 00000000..b17e098f --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/log_key.go @@ -0,0 +1,13 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package ext + +const ( + // LogKeyTraceID is used by log integrations to correlate logs with a given trace. + LogKeyTraceID = "dd.trace_id" + // LogKeySpanID is used by log integrations to correlate logs with a given span. + LogKeySpanID = "dd.span_id" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/messaging.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/messaging.go new file mode 100644 index 00000000..31af602d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/messaging.go @@ -0,0 +1,28 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023 Datadog, Inc. + +package ext + +const ( + // MessagingSystem identifies which messaging system created this span (kafka, rabbitmq, amazonsqs, googlepubsub...) + MessagingSystem = "messaging.system" + // MessagingDestinationName identifies message destination name + MessagingDestinationName = "messaging.destination.name" +) + +// Available values for messaging.system. +const ( + MessagingSystemGCPPubsub = "googlepubsub" + MessagingSystemKafka = "kafka" + MessagingSystemSQS = "amazonsqs" +) + +// Kafka tags. +const ( + // MessagingKafkaPartition defines the Kafka partition the trace is associated with. + MessagingKafkaPartition = "messaging.kafka.partition" + // KafkaBootstrapServers holds a comma separated list of bootstrap servers as defined in producer or consumer config. + KafkaBootstrapServers = "messaging.kafka.bootstrap.servers" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/peer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/peer.go new file mode 100644 index 00000000..3bca040b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/peer.go @@ -0,0 +1,21 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package ext + +const ( + // PeerHostIPV4 records IPv4 host address of the peer. + PeerHostIPV4 = "peer.ipv4" + // PeerHostIPV6 records the IPv6 host address of the peer. + PeerHostIPV6 = "peer.ipv6" + // PeerService records the service name of the peer service. + PeerService = "peer.service" + // PeerHostname records the host name of the peer. + // Legacy: Kept for backwards compatability. Use NetworkDestinationName for hostname + // and NetworkDestinationIP for IP addresses + PeerHostname = "peer.hostname" + // PeerPort records the port number of the peer. + PeerPort = "peer.port" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/priority.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/priority.go new file mode 100644 index 00000000..8a5c0fc4 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/priority.go @@ -0,0 +1,27 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package ext + +// Priority is a hint given to the backend so that it knows which traces to reject or kept. +// In a distributed context, it should be set before any context propagation (fork, RPC calls) to be effective. + +const ( + // PriorityUserReject informs the backend that a trace should be rejected and not stored. + // This should be used by user code or configuration overriding default priority + PriorityUserReject = -1 + + // PriorityAutoReject informs the backend that a trace should be rejected and not stored. + // This is used by the builtin sampler. + PriorityAutoReject = 0 + + // PriorityAutoKeep informs the backend that a trace should be kept and not stored. + // This is used by the builtin sampler. + PriorityAutoKeep = 1 + + // PriorityUserKeep informs the backend that a trace should be kept and not stored. + // This should be used by user code or configuration overriding default priority + PriorityUserKeep = 2 +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/rpc.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/rpc.go new file mode 100644 index 00000000..e7c43082 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/rpc.go @@ -0,0 +1,34 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023 Datadog, Inc. + +package ext + +const ( + // RPCSystem identifies the RPC remoting system. + RPCSystem = "rpc.system" + // RPCService represents the full (logical) name of the service being called, including its package name, + // if applicable. Note this is the logical name of the service from the RPC interface perspective, + // which can be different from the name of any implementing class. + RPCService = "rpc.service" + // RPCMethod represents the name of the (logical) method being called. Note this is the logical name of the + // method from the RPC interface perspective, which can be different from the name of + // any implementing method/function. + RPCMethod = "rpc.method" +) + +// Well-known identifiers for rpc.system. +const ( + // RPCSystemGRPC identifies gRPC. + RPCSystemGRPC = "grpc" + // RPCSystemTwirp identifies Twirp. + RPCSystemTwirp = "twirp" +) + +// gRPC specific tags. +const ( + // GRPCFullMethod represents the full name of the logical method being called following the + // format: /$package.$service/$method + GRPCFullMethod = "rpc.grpc.full_method" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/span_kind.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/span_kind.go new file mode 100644 index 00000000..71a3ce50 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/span_kind.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package ext + +// span_kind values are set per span following the opentelemetry standard +// falls under the values of client, server, producer, consumer, and internal +const ( + + // SpanKindServer indicates that the span covers server-side handling of a synchronous RPC or other remote request + // This span should not have any local parents but can have other distributed parents + SpanKindServer = "server" + + // SpanKindClient indicates that the span describes a request to some remote service. + // This span should not have any local children but can have other distributed children + SpanKindClient = "client" + + // SpanKindConsumer indicates that the span describes the initiators of an asynchronous request. + // This span should not have any local parents but can have other distributed parents + SpanKindConsumer = "consumer" + + // SpanKindProducer indicates that the span describes a child of an asynchronous producer request. + // This span should not have any local children but can have other distributed children + SpanKindProducer = "producer" + + // SpanKindInternal indicates that the span represents an internal operation within an application, + // as opposed to an operations with remote parents or children. + // This is the default value and not explicitly set to save memory + SpanKindInternal = "internal" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/system.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/system.go new file mode 100644 index 00000000..163720a4 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/system.go @@ -0,0 +1,12 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package ext + +// Standard system metadata names +const ( + // The pid of the traced process + Pid = "process_id" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/tags.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/tags.go new file mode 100644 index 00000000..54505b2a --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/ext/tags.go @@ -0,0 +1,147 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package ext contains a set of Datadog-specific constants. Most of them are used +// for setting span metadata. +package ext + +const ( + // TargetHost sets the target host address. + // Legacy: Kept for backwards compatibility. Use NetworkDestinationName for hostname + // and NetworkDestinationIP for IP addresses + TargetHost = "out.host" + + // NetworkDestinationName is the remote hostname or similar where the outbound connection is being made to. + NetworkDestinationName = "network.destination.name" + + // NetworkDestinationIP is the remote address where the outbound connection is being made to. + NetworkDestinationIP = "network.destination.ip" + + // NetworkClientIP is the client IP address. + NetworkClientIP = "network.client.ip" + + // TargetPort sets the target host port. + // Legacy: Kept for backwards compatability. Use NetworkDestinationPort instead. + TargetPort = "out.port" + + // TargetDB sets the target db. + TargetDB = "out.db" + + // NetworkDestinationPort is the remote port number of the outbound connection. + NetworkDestinationPort = "network.destination.port" + + // SQLType sets the sql type tag. + SQLType = "sql" + + // SQLQuery sets the sql query tag on a span. + SQLQuery = "sql.query" + + // HTTPMethod specifies the HTTP method used in a span. + HTTPMethod = "http.method" + + // HTTPCode sets the HTTP status code as a tag. + HTTPCode = "http.status_code" + + // HTTPRoute is the route value of the HTTP request. + HTTPRoute = "http.route" + + // HTTPURL sets the HTTP URL for a span. + HTTPURL = "http.url" + + // HTTPUserAgent is the user agent header value of the HTTP request. + HTTPUserAgent = "http.useragent" + + // HTTPClientIP sets the HTTP client IP tag. + HTTPClientIP = "http.client_ip" + + // HTTPRequestHeaders sets the HTTP request headers partial tag + // This tag is meant to be composed, i.e http.request.headers.headerX, http.request.headers.headerY, etc... + // See https://docs.datadoghq.com/tracing/trace_collection/tracing_naming_convention/#http-requests + HTTPRequestHeaders = "http.request.headers" + + // SpanName is a pseudo-key for setting a span's operation name by means of + // a tag. It is mostly here to facilitate vendor-agnostic frameworks like Opentracing + // and OpenCensus. + SpanName = "span.name" + + // SpanType defines the Span type (web, db, cache). + SpanType = "span.type" + + // ServiceName defines the Service name for this Span. + ServiceName = "service.name" + + // Version is a tag that specifies the current application version. + Version = "version" + + // ResourceName defines the Resource name for the Span. + ResourceName = "resource.name" + + // Error specifies the error tag. It's value is usually of type "error". + Error = "error" + + // ErrorMsg specifies the error message. + ErrorMsg = "error.message" + + // ErrorType specifies the error type. + ErrorType = "error.type" + + // ErrorStack specifies the stack dump. + ErrorStack = "error.stack" + + // ErrorDetails holds details about an error which implements a formatter. + ErrorDetails = "error.details" + + // Environment specifies the environment to use with a trace. + Environment = "env" + + // EventSampleRate specifies the rate at which this span will be sampled + // as an APM event. + EventSampleRate = "_dd1.sr.eausr" + + // AnalyticsEvent specifies whether the span should be recorded as a Trace + // Search & Analytics event. + AnalyticsEvent = "analytics.event" + + // ManualKeep is a tag which specifies that the trace to which this span + // belongs to should be kept when set to true. + ManualKeep = "manual.keep" + + // ManualDrop is a tag which specifies that the trace to which this span + // belongs to should be dropped when set to true. + ManualDrop = "manual.drop" + + // RuntimeID is a tag that contains a unique id for this process. + RuntimeID = "runtime-id" + + // Component defines library integration the span originated from. + Component = "component" + + // SpanKind defines the kind of span based on Otel requirements (client, server, producer, consumer). + SpanKind = "span.kind" + + // MapSpanStart is used by Span.AsMap to store the span start. + MapSpanStart = "_ddtrace.span_start" + + // MapSpanDuration is used by Span.AsMap to store the span duration. + MapSpanDuration = "_ddtrace.span_duration" + + // MapSpanSpanID is used by Span.AsMap to store the span id. + MapSpanID = "_ddtrace.span_id" + + // MapSpanTraceID is used by Span.AsMap to store the span trace id. + MapSpanTraceID = "_ddtrace.span_traceid" + + // MapSpanParentID is used by Span.AsMap to store the span parent id. + MapSpanParentID = "_ddtrace.span_parentid" + + // MapSpanError is used by Span.AsMap to store the span error value. + MapSpanError = "_ddtrace.span_error" + + // MapSpanEvents is used by Span.AsMap to store the spanEvents value. + MapSpanEvents = "_ddtrace.span_events" + + // CloudResourceID is the cloud provider resource identifier. + CloudResourceID = "cloud.resource_id" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/internal/globaltracer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/internal/globaltracer.go new file mode 100644 index 00000000..cb8c1cc1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/internal/globaltracer.go @@ -0,0 +1,64 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package internal + +import "sync/atomic" + +var ( + // globalTracer stores the current tracer as *ddtrace/tracer.Tracer (pointer to interface). The + // atomic.Value type requires types to be consistent, which requires using the same type for the + // stored value. + globalTracer atomic.Value +) + +// tracerLike is an interface to restrict the types that can be stored in `globalTracer`. +// This interface doesn't leak to the users. We are leveraging the type system to generate +// the functions below for `tracer.Tracer` without creating an import cycle. +type tracerLike interface { + Flush() + Stop() +} + +// SetGlobalTracer sets the global tracer to t. +// It is the responsibility of the caller to ensure that the value is `tracer.Tracer`. +func SetGlobalTracer[T tracerLike](t T) { + if (tracerLike)(t) == nil { + panic("ddtrace/internal: SetGlobalTracer called with nil") + } + old := globalTracer.Swap(&t) + if old == nil { + return + } + oldTracer := *old.(*T) + oldTracer.Stop() +} + +// GetGlobalTracer returns the current global tracer. +// It is the responsability of the caller to ensure that calling code uses `tracer.Tracer` +// as generic type. +func GetGlobalTracer[T tracerLike]() T { + return *globalTracer.Load().(*T) +} + +// mockTracerLike is an interface to restrict the types that can be stored in `globalTracer`. +// This represents the mock tracer type used in tests. And prevent calling the StoreGlobalTracer +// function with a normal tracer.Tracer. +type mockTracerLike interface { + tracerLike + Reset() +} + +// StoreGlobalTracer is a helper function to set the global tracer internally without stopping the old one. +// WARNING: this is used by the civisibilitymocktracer working as a wrapper around the global tracer, hence we don't stop the tracer. +// DO NOT USE THIS FUNCTION ON NORMAL tracer.Tracer. +func StoreGlobalTracer[M mockTracerLike, T tracerLike](m M) { + if (mockTracerLike)(m) == nil { + panic("ddtrace/internal: StoreGlobalTracer called with nil") + } + // convert the mock tracer like to the actual tracer like type (avoid panic on storing different types in the atomic.Value) + t := (tracerLike)(m).(T) + globalTracer.Store(&t) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/internal/tracerstats/stats.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/internal/tracerstats/stats.go new file mode 100644 index 00000000..8b5d8402 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/internal/tracerstats/stats.go @@ -0,0 +1,91 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracerstats + +import "sync/atomic" + +// Events are things that happen in the tracer such as a trace being dropped or +// a span being started. These are counted and submitted as metrics. +type Event int + +const ( + SpanStarted Event = iota + SpansFinished + TracesDropped + DroppedP0Traces + DroppedP0Spans + PartialTraces + + // Read-only. We duplicate some of the stats so that we can send them to the + // agent in headers as well as counting them with statsd. + AgentDroppedP0Traces + AgentDroppedP0Spans +) + +// These integers track metrics about spans and traces as they are started, +// finished, and dropped +var spansStarted, spansFinished, tracesDropped uint32 + +// Records the number of dropped P0 traces and spans. +var droppedP0Traces, droppedP0Spans uint32 + +// partialTrace the number of partially dropped traces. +var partialTraces uint32 + +// Copies of the stats to be sent to the agent. +var agentDroppedP0Traces, agentDroppedP0Spans uint32 + +func Signal(e Event, count uint32) { + switch e { + case SpanStarted: + atomic.AddUint32(&spansStarted, count) + case SpansFinished: + atomic.AddUint32(&spansFinished, count) + case TracesDropped: + atomic.AddUint32(&tracesDropped, count) + case DroppedP0Traces: + atomic.AddUint32(&droppedP0Traces, count) + atomic.AddUint32(&agentDroppedP0Traces, count) + case DroppedP0Spans: + atomic.AddUint32(&droppedP0Spans, count) + atomic.AddUint32(&agentDroppedP0Spans, count) + case PartialTraces: + atomic.AddUint32(&partialTraces, count) + } +} + +func Count(e Event) uint32 { + switch e { + case SpanStarted: + return atomic.SwapUint32(&spansStarted, 0) + case SpansFinished: + return atomic.SwapUint32(&spansFinished, 0) + case TracesDropped: + return atomic.SwapUint32(&tracesDropped, 0) + case DroppedP0Traces: + return atomic.SwapUint32(&droppedP0Traces, 0) + case DroppedP0Spans: + return atomic.SwapUint32(&droppedP0Spans, 0) + case PartialTraces: + return atomic.SwapUint32(&partialTraces, 0) + case AgentDroppedP0Traces: + return atomic.SwapUint32(&agentDroppedP0Traces, 0) + case AgentDroppedP0Spans: + return atomic.SwapUint32(&agentDroppedP0Spans, 0) + } + return 0 +} + +func Reset() { + atomic.StoreUint32(&spansStarted, 0) + atomic.StoreUint32(&spansFinished, 0) + atomic.StoreUint32(&tracesDropped, 0) + atomic.StoreUint32(&droppedP0Traces, 0) + atomic.StoreUint32(&droppedP0Spans, 0) + atomic.StoreUint32(&partialTraces, 0) + atomic.StoreUint32(&agentDroppedP0Traces, 0) + atomic.StoreUint32(&agentDroppedP0Spans, 0) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/civisibilitymocktracer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/civisibilitymocktracer.go new file mode 100644 index 00000000..ded077ab --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/civisibilitymocktracer.go @@ -0,0 +1,174 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package mocktracer + +import ( + "sync" + "sync/atomic" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/datastreams" +) + +type civisibilitymocktracer struct { + mock *mocktracer // mock tracer + real tracer.Tracer // real tracer (for the testotimization/civisibility spans) + isnoop atomic.Bool +} + +var ( + _ tracer.Tracer = (*civisibilitymocktracer)(nil) + _ Tracer = (*civisibilitymocktracer)(nil) + + realSpans = make(map[*tracer.Span]bool) + realSpansMutex sync.Mutex +) + +// Creates a new CIVisibilityMockTracer that uses the mock tracer for all spans except the CIVisibility spans. +func newCIVisibilityMockTracer() *civisibilitymocktracer { + currentTracer := getGlobalTracer() + // let's check if the current tracer is already a civisibilitymocktracer + // if so, we need to get the real tracer from it + if currentCIVisibilityMockTracer, ok := currentTracer.(*civisibilitymocktracer); ok && currentCIVisibilityMockTracer != nil { + currentTracer = currentCIVisibilityMockTracer.real + } + return &civisibilitymocktracer{ + mock: newMockTracer(), + real: currentTracer, + } +} + +// SentDSMBacklogs returns the Data Streams Monitoring backlogs that have been sent by the mock tracer. +// If the tracer is in noop mode, it returns nil. Otherwise, it flushes the processor and returns +// all captured backlogs from the mock transport. +func (t *civisibilitymocktracer) SentDSMBacklogs() []datastreams.Backlog { + if t.isnoop.Load() { + return nil + } + t.mock.dsmProcessor.Flush() + return t.mock.dsmTransport.backlogs +} + +// Stop deactivates the CIVisibility mock tracer by setting it to noop mode and stopping +// the Data Streams Monitoring processor. This should be called when testing has finished. +func (t *civisibilitymocktracer) Stop() { + t.isnoop.Store(true) + t.mock.dsmProcessor.Stop() + if civisibility.GetState() == civisibility.StateExiting { + t.real.Stop() + t.real = &tracer.NoopTracer{} + } +} + +// StartSpan creates a new span with the given operation name and options. If the span type +// indicates it's a CI Visibility span (like a test session, module, suite, or individual test), +// it uses the real tracer to create the span. For all other spans, it uses the mock tracer. +// If the tracer is in noop mode, it returns nil. +func (t *civisibilitymocktracer) StartSpan(operationName string, opts ...tracer.StartSpanOption) *tracer.Span { + if t.real != nil { + var cfg tracer.StartSpanConfig + for _, fn := range opts { + fn(&cfg) + } + + if spanType, ok := cfg.Tags[ext.SpanType]; ok && + (spanType == constants.SpanTypeTestSession || spanType == constants.SpanTypeTestModule || + spanType == constants.SpanTypeTestSuite || spanType == constants.SpanTypeTest) { + // If the span is a civisibility span, use the real tracer to create it. + realSpan := t.real.StartSpan(operationName, opts...) + realSpansMutex.Lock() + defer realSpansMutex.Unlock() + realSpans[realSpan] = true + return realSpan + } + } + + if t.isnoop.Load() { + return nil + } + + // Otherwise, use the mock tracer to create it. + return t.mock.StartSpan(operationName, opts...) +} + +// FinishSpan marks the given span as finished in the mock tracer. This is called by spans +// when they finish, adding them to the list of finished spans for later inspection. +func (t *civisibilitymocktracer) FinishSpan(s *tracer.Span) { + realSpansMutex.Lock() + defer realSpansMutex.Unlock() + // Check if the span is a real span (i.e., created by the real tracer). + if _, isRealSpan := realSpans[s]; isRealSpan { + delete(realSpans, s) + return + } + if t.isnoop.Load() { + return + } + t.mock.FinishSpan(s) +} + +// GetDataStreamsProcessor returns the Data Streams Monitoring processor used by the mock tracer. +// If the tracer is in noop mode, it returns nil. This processor is used to monitor +// and record data stream metrics. +func (t *civisibilitymocktracer) GetDataStreamsProcessor() *datastreams.Processor { + if t.isnoop.Load() { + return nil + } + return t.mock.dsmProcessor +} + +// OpenSpans returns the set of started spans that have not been finished yet. +// This is useful for verifying spans are properly finished in tests. +func (t *civisibilitymocktracer) OpenSpans() []*Span { + return t.mock.OpenSpans() +} + +// FinishedSpans returns the set of spans that have been finished. +// This allows inspection of spans after they've completed for testing and verification. +func (t *civisibilitymocktracer) FinishedSpans() []*Span { + return t.mock.FinishedSpans() +} + +// Reset clears all spans (both open and finished) from the mock tracer. +// This is especially useful when running tests in a loop, where a clean state +// is desired between test iterations. +func (t *civisibilitymocktracer) Reset() { + t.mock.Reset() +} + +// Extract retrieves a SpanContext from the carrier using the mock tracer's propagator. +// If the tracer is in noop mode, it returns nil. This is used for distributed tracing +// to continue traces across process boundaries. +func (t *civisibilitymocktracer) Extract(carrier interface{}) (*tracer.SpanContext, error) { + if t.isnoop.Load() { + return nil, nil + } + return t.mock.Extract(carrier) +} + +// Inject injects the SpanContext into the carrier using the mock tracer's propagator. +// If the tracer is in noop mode, it returns nil. This is used for distributed tracing +// to propagate trace information across process boundaries. +func (t *civisibilitymocktracer) Inject(context *tracer.SpanContext, carrier interface{}) error { + if t.isnoop.Load() { + return nil + } + return t.mock.Inject(context, carrier) +} + +func (t *civisibilitymocktracer) TracerConf() tracer.TracerConf { + return t.real.TracerConf() +} + +// Flush forces a flush of both the mock tracer and the real tracer. +// This ensures that all buffered spans are processed and ready for inspection. +func (t *civisibilitymocktracer) Flush() { + t.mock.Flush() + t.real.Flush() +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/data_streams.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/data_streams.go new file mode 100644 index 00000000..100cc2b2 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/data_streams.go @@ -0,0 +1,45 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package mocktracer + +import ( + "compress/gzip" + "net/http" + + "github.com/tinylib/msgp/msgp" + + "github.com/DataDog/dd-trace-go/v2/internal/datastreams" +) + +type mockDSMTransport struct { + backlogs []datastreams.Backlog +} + +// RoundTrip does nothing and returns a dummy response. +func (t *mockDSMTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // You can customize the dummy response if needed. + gzipReader, err := gzip.NewReader(req.Body) + if err != nil { + return nil, err + } + var p datastreams.StatsPayload + err = msgp.Decode(gzipReader, &p) + if err != nil { + return nil, err + } + for _, bucket := range p.Stats { + t.backlogs = append(t.backlogs, bucket.Backlogs...) + } + return &http.Response{ + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: req, + ContentLength: -1, + Body: http.NoBody, + }, nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/mockspan.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/mockspan.go new file mode 100644 index 00000000..07b532a0 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/mockspan.go @@ -0,0 +1,269 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package mocktracer // import "github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer" + +import ( + "encoding/json" + "fmt" + "testing" + "time" + _ "unsafe" // Needed for go:linkname directive. + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +//go:linkname spanStart github.com/DataDog/dd-trace-go/v2/ddtrace/tracer.spanStart +func spanStart(operationName string, options ...tracer.StartSpanOption) *tracer.Span + +func newSpan(operationName string, cfg *tracer.StartSpanConfig) *tracer.Span { + return spanStart(operationName, func(c *tracer.StartSpanConfig) { + *c = *cfg + }) +} + +type Span struct { + sp *tracer.Span + m map[string]interface{} + links []tracer.SpanLink +} + +func MockSpan(s *tracer.Span) *Span { + if s == nil { + return nil + } + return &Span{sp: s, m: s.AsMap()} +} + +func (s *Span) OperationName() string { + if s == nil { + return "" + } + return s.m[ext.SpanName].(string) +} + +func (s *Span) SetTag(k string, v interface{}) { + if s == nil { + return + } + s.m[k] = v + s.sp.SetTag(k, v) +} + +func (s *Span) Tag(k string) interface{} { + if s == nil { + return nil + } + // It's possible that a tag wasn't set through mocktracer.Span.SetTag, + // in which case we need to retrieve it from the underlying tracer.Span. + v := s.sp.AsMap()[k] + if v != nil { + return v + } + v, ok := s.m[k] + if ok { + return v + } + return nil +} + +func (s *Span) Tags() map[string]interface{} { + if s == nil { + return make(map[string]interface{}) + } + tm := s.sp.AsMap() + m := make(map[string]interface{}, len(s.m)+len(tm)) + extractTags(s.m, m) + extractTags(tm, m) + return m +} + +func extractTags(src, m map[string]interface{}) { + for k, v := range src { + switch k { + case ext.MapSpanStart: + continue + case ext.MapSpanDuration: + continue + case ext.MapSpanID: + continue + case ext.MapSpanTraceID: + continue + case ext.MapSpanParentID: + continue + case ext.MapSpanError: + continue + case ext.MapSpanEvents: + continue + } + m[k] = v + } +} + +func (s *Span) String() string { + if s == nil { + return "" + } + sc := s.sp.Context() + baggage := make(map[string]string) + sc.ForeachBaggageItem(func(k, v string) bool { + baggage[k] = v + return true + }) + + return fmt.Sprintf(` +name: %s +tags: %#v +start: %s +duration: %s +id: %d +parent: %d +trace: %v +baggage: %#v +`, s.OperationName(), s.Tags(), s.StartTime(), s.Duration(), sc.SpanID(), s.ParentID(), sc.TraceID(), baggage) +} + +func (s *Span) ParentID() uint64 { + if s == nil { + return 0 + } + return s.m[ext.MapSpanParentID].(uint64) +} + +// Context returns the SpanContext of this Span. +func (s *Span) Context() *tracer.SpanContext { return s.sp.Context() } + +// SetUser associates user information to the current trace which the +// provided span belongs to. The options can be used to tune which user +// bit of information gets monitored. This mockup only sets the user +// information as span tags of the root span of the current trace. +func (s *Span) SetUser(id string, opts ...tracer.UserMonitoringOption) { + root := s.sp.Root() + if root == nil { + return + } + + cfg := tracer.UserMonitoringConfig{ + Metadata: make(map[string]string), + } + for _, fn := range opts { + fn(&cfg) + } + + root.SetTag("usr.id", id) + root.SetTag("usr.login", cfg.Login) + root.SetTag("usr.org", cfg.Org) + root.SetTag("usr.email", cfg.Email) + root.SetTag("usr.name", cfg.Name) + root.SetTag("usr.role", cfg.Role) + root.SetTag("usr.scope", cfg.Scope) + root.SetTag("usr.session_id", cfg.SessionID) + + for k, v := range cfg.Metadata { + root.SetTag(fmt.Sprintf("usr.%s", k), v) + } +} + +func (s *Span) SpanID() uint64 { + if s == nil { + return 0 + } + return s.m[ext.MapSpanID].(uint64) +} + +func (s *Span) TraceID() uint64 { + if s == nil { + return 0 + } + return s.m[ext.MapSpanTraceID].(uint64) +} + +func (s *Span) StartTime() time.Time { + if s == nil { + return time.Unix(0, 0) + } + return time.Unix(0, s.m[ext.MapSpanStart].(int64)) +} + +func (s *Span) Duration() time.Duration { + if s == nil { + return time.Duration(0) + } + return time.Duration(s.m[ext.MapSpanDuration].(int64)) +} + +func (s *Span) FinishTime() time.Time { + if s == nil { + return time.Unix(0, 0) + } + return s.StartTime().Add(s.Duration()) +} + +func (s *Span) Unwrap() *tracer.Span { + if s == nil { + return nil + } + return s.sp +} + +// Links returns the span's span links. +func (s *Span) Links() []tracer.SpanLink { + payload := s.Tag("_dd.span_links") + if payload == nil { + return nil + } + // Unmarshal the JSON payload into the SpanLink slice. + var links []tracer.SpanLink + json.Unmarshal([]byte(payload.(string)), &links) + return links +} + +// SpanEvent represents a span event from a mockspan. +type SpanEvent struct { + Name string `json:"name"` + TimeUnixNano uint64 `json:"time_unix_nano"` + Attributes map[string]any `json:"attributes"` +} + +// AssertAttributes compares the given attributes with the current event ones. +// The comparison is made against the JSON representation of both, since the data comes from +// the span.AsMap() function which provides the JSON representation of the events, and some types +// could have changed (e.g. 1 could be transformed to 1.0 after marshal/unmarshal). +func (s SpanEvent) AssertAttributes(t *testing.T, wantAttrs map[string]any) { + t.Helper() + want, err := json.Marshal(wantAttrs) + require.NoError(t, err) + got, err := json.Marshal(s.Attributes) + require.NoError(t, err) + assert.Equal(t, string(want), string(got)) +} + +// Events returns the current span events. +func (s *Span) Events() []SpanEvent { + if s == nil { + return nil + } + eventsJSON, ok := s.m[ext.MapSpanEvents].(string) + if !ok { + return nil + } + + var events []SpanEvent + if err := json.Unmarshal([]byte(eventsJSON), &events); err != nil { + log.Error("mocktracer: failed to unmarshal span events: %s", err.Error()) + return nil + } + return events +} + +// Integration returns the component from which the mockspan was created. +func (s *Span) Integration() string { + return s.Tag(ext.Component).(string) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/mockspancontext.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/mockspancontext.go new file mode 100644 index 00000000..981693e4 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/mockspancontext.go @@ -0,0 +1,84 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package mocktracer + +import ( + "strconv" + "sync" + "sync/atomic" + + "github.com/DataDog/dd-trace-go/v2/ddtrace" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" +) + +var _ ddtrace.SpanContext = (*spanContext)(nil) + +type spanContext struct { + sync.RWMutex // guards below fields + baggage map[string]string + priority int + hasPriority bool + + spanID uint64 + traceID uint64 + span *tracer.Span +} + +func (sc *spanContext) TraceID() string { return strconv.FormatUint(sc.traceID, 10) } + +func (sc *spanContext) TraceIDBytes() [16]byte { return [16]byte{} } + +func (sc *spanContext) TraceIDLower() uint64 { return sc.traceID } + +func (sc *spanContext) SpanID() uint64 { return sc.spanID } + +func (sc *spanContext) ForeachBaggageItem(handler func(k, v string) bool) { + sc.RLock() + defer sc.RUnlock() + for k, v := range sc.baggage { + if !handler(k, v) { + break + } + } +} + +func (sc *spanContext) setBaggageItem(k, v string) { + sc.Lock() + defer sc.Unlock() + if sc.baggage == nil { + sc.baggage = make(map[string]string, 1) + } + sc.baggage[k] = v +} + +func (sc *spanContext) baggageItem(k string) string { + sc.RLock() + defer sc.RUnlock() + return sc.baggage[k] +} + +func (sc *spanContext) setSamplingPriority(p int) { + sc.Lock() + defer sc.Unlock() + sc.priority = p + sc.hasPriority = true +} + +func (sc *spanContext) hasSamplingPriority() bool { + sc.RLock() + defer sc.RUnlock() + return sc.hasPriority +} + +func (sc *spanContext) samplingPriority() int { + sc.RLock() + defer sc.RUnlock() + return sc.priority +} + +var mockIDSource uint64 = 123 + +func nextID() uint64 { return atomic.AddUint64(&mockIDSource, 1) } diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/mocktracer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/mocktracer.go new file mode 100644 index 00000000..dbbb9dec --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/mocktracer/mocktracer.go @@ -0,0 +1,216 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package mocktracer provides a mock implementation of the tracer used in testing. It +// allows querying spans generated at runtime, without having them actually be sent to +// an agent. It provides a simple way to test that instrumentation is running correctly +// in your application. +// +// Simply call "Start" at the beginning of your tests to start and obtain an instance +// of the mock tracer. +package mocktracer + +import ( + "net/http" + "net/url" + "sync" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/internal" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + utils "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/datastreams" + + "github.com/DataDog/datadog-go/v5/statsd" +) + +var _ tracer.Tracer = (*mocktracer)(nil) +var _ Tracer = (*mocktracer)(nil) + +// DSMBacklog is an alias to datastreams.Backlog +type DSMBacklog = datastreams.Backlog + +// Tracer exposes an interface for querying the currently running mock tracer. +type Tracer interface { + tracer.Tracer + + // OpenSpans returns the set of started spans that have not been finished yet. + OpenSpans() []*Span + + FinishSpan(*tracer.Span) + // FinishedSpans returns the set of finished spans. + FinishedSpans() []*Span + + SentDSMBacklogs() []DSMBacklog + + // Reset resets the spans and services recorded in the tracer. This is + // especially useful when running tests in a loop, where a clean start + // is desired for FinishedSpans calls. + Reset() + + // Stop deactivates the mock tracer and allows a normal tracer to take over. + // It should always be called when testing has finished. + Stop() +} + +// Start sets the internal tracer to a mock and returns an interface +// which allows querying it. Call Start at the beginning of your tests +// to activate the mock tracer. When your test runs, use the returned +// interface to query the tracer's state. +func Start() Tracer { + if utils.BoolEnv(constants.CIVisibilityEnabledEnvironmentVariable, false) && !civisibility.IsTestMode() { + // If CI Visibility is enabled (and we are not in a CI Visibility testing mode), we need to use the CIVisibilityMockTracer + // to bypass the CI Visibility spans from the mocktracer. + // This supports the scenario where the mocktracer is used in a test (we need to keep reporting test spans) + t := newCIVisibilityMockTracer() + // Set the global tracer to the mock tracer without stopping the old one (inside the mock tracer) + internal.StoreGlobalTracer[Tracer, tracer.Tracer](t) + return t + } + + var t tracer.Tracer = newMockTracer() + internal.SetGlobalTracer(t) + return t.(Tracer) +} + +func getGlobalTracer() tracer.Tracer { + return internal.GetGlobalTracer[tracer.Tracer]() +} + +type mocktracer struct { + sync.RWMutex // guards below spans + finishedSpans []*Span + openSpans map[uint64]*Span + dsmTransport *mockDSMTransport + dsmProcessor *datastreams.Processor +} + +func (t *mocktracer) SentDSMBacklogs() []DSMBacklog { + t.dsmProcessor.Flush() + return t.dsmTransport.backlogs +} + +func newMockTracer() *mocktracer { + var t mocktracer + t.openSpans = make(map[uint64]*Span) + t.dsmTransport = &mockDSMTransport{} + client := &http.Client{ + Transport: t.dsmTransport, + } + t.dsmProcessor = datastreams.NewProcessor(&statsd.NoOpClientDirect{}, "env", "service", "v1", &url.URL{Scheme: "http", Host: "agent-address"}, client) + t.dsmProcessor.Start() + t.dsmProcessor.Flush() + return &t +} + +// This is called by the spans when they finish +func (t *mocktracer) FinishSpan(s *tracer.Span) { + t.addFinishedSpan(s) +} + +// Stop deactivates the mock tracer and sets the active tracer to a no-op. +func (t *mocktracer) Stop() { + // N.b.: The main reason for this call is to make TestTracerStop pass. + internal.SetGlobalTracer(tracer.Tracer(&tracer.NoopTracer{})) + t.dsmProcessor.Stop() +} + +func (t *mocktracer) StartSpan(operationName string, opts ...tracer.StartSpanOption) *tracer.Span { + var cfg tracer.StartSpanConfig + for _, fn := range opts { + fn(&cfg) + } + span := newSpan(operationName, &cfg) + + t.Lock() + t.openSpans[span.Context().SpanID()] = MockSpan(span) + t.Unlock() + + return span +} + +func (t *mocktracer) GetDataStreamsProcessor() *datastreams.Processor { + return t.dsmProcessor +} + +func UnwrapSlice(ss []*Span) []*tracer.Span { + ret := make([]*tracer.Span, len(ss)) + for i, sp := range ss { + ret[i] = sp.Unwrap() + } + return ret +} + +func (t *mocktracer) OpenSpans() []*Span { + t.RLock() + defer t.RUnlock() + spans := make([]*Span, 0, len(t.openSpans)) + for _, s := range t.openSpans { + spans = append(spans, s) + } + return spans +} + +func (t *mocktracer) FinishedSpans() []*Span { + t.RLock() + defer t.RUnlock() + return t.finishedSpans +} + +func (t *mocktracer) Reset() { + t.Lock() + defer t.Unlock() + for k := range t.openSpans { + delete(t.openSpans, k) + } + t.finishedSpans = nil +} + +func (t *mocktracer) addFinishedSpan(s *tracer.Span) { + t.Lock() + defer t.Unlock() + // If the span is not in the open spans, we may be finishing a span that was started + // before the mock tracer was started. In this case, we don't want to add it to the + // finished spans. + if _, ok := t.openSpans[s.Context().SpanID()]; !ok { + return + } + delete(t.openSpans, s.Context().SpanID()) + if t.finishedSpans == nil { + t.finishedSpans = make([]*Span, 0, 1) + } + t.finishedSpans = append(t.finishedSpans, MockSpan(s)) +} + +const ( + traceHeader = tracer.DefaultTraceIDHeader + spanHeader = tracer.DefaultParentIDHeader + priorityHeader = tracer.DefaultPriorityHeader + baggagePrefix = tracer.DefaultBaggageHeaderPrefix +) + +func (t *mocktracer) Extract(carrier interface{}) (*tracer.SpanContext, error) { + return tracer.NewPropagator(&tracer.PropagatorConfig{ + MaxTagsHeaderLen: 512, + }).Extract(carrier) +} + +func (t *mocktracer) Inject(context *tracer.SpanContext, carrier interface{}) error { + return tracer.NewPropagator(&tracer.PropagatorConfig{ + MaxTagsHeaderLen: 512, + }).Inject(context, carrier) +} + +func (t *mocktracer) TracerConf() tracer.TracerConf { + return tracer.TracerConf{} +} + +func (t *mocktracer) Flush() { + t.dsmProcessor.Flush() + for _, s := range t.OpenSpans() { + t.addFinishedSpan(s.sp) + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/README.md b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/README.md new file mode 100644 index 00000000..15303060 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/README.md @@ -0,0 +1,33 @@ +# Datadog Tracer for Go + +This package provides the Datadog APM tracer for Go. + +## API Stability + +The public API of this package is tracked in `api.txt`. This file is automatically generated and checked in CI to ensure API stability. If you make changes to the public API, you must update this file by running: + +```bash +go run ./scripts/apiextractor/api_extractor.go ./ddtrace/tracer > ./ddtrace/tracer/api.txt +``` + +The CI will fail if you make changes to the public API without updating `api.txt`. This helps us: + +1. Track API changes explicitly +2. Maintain backward compatibility +3. Make intentional API changes with clear visibility + +### What constitutes an API change? + +- Adding or removing exported functions, types, methods, or fields +- Changing function signatures +- Changing type definitions +- Changing interface definitions + +### Checking API changes locally + +You can check if your changes affect the public API by running: + +```bash +go run ./scripts/apiextractor/api_extractor.go ./ddtrace/tracer > current_api.txt +diff -u ./ddtrace/tracer/api.txt current_api.txt +``` diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/abandonedspans.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/abandonedspans.go similarity index 87% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/abandonedspans.go rename to vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/abandonedspans.go index defad418..7acb5c99 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/abandonedspans.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/abandonedspans.go @@ -14,7 +14,8 @@ import ( "sync/atomic" "time" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/internal/log" ) var ( @@ -46,16 +47,17 @@ func (b *bucket[K, T]) add(k K, v T) { b.index[k] = e } -func (b *bucket[K, T]) get(k K) (T, bool) { - e, ok := b.index[k] - if !ok { - // Compiler trick to return any zero value in generic code. - // https://stackoverflow.com/a/70589302 - var zero T - return zero, ok - } - return e.Value.(T), ok -} +// This function is currently not used. We can add it back if it is needed +// func (b *bucket[K, T]) get(k K) (T, bool) { +// e, ok := b.index[k] +// if !ok { +// // Compiler trick to return any zero value in generic code. +// // https://stackoverflow.com/a/70589302 +// var zero T +// return zero, ok +// } +// return e.Value.(T), ok +// } func (b *bucket[K, T]) remove(k K) { e, ok := b.index[k] @@ -77,27 +79,36 @@ type abandonedSpanCandidate struct { TraceID, SpanID uint64 Start int64 Finished bool + Integration string } -func newAbandonedSpanCandidate(s *span, finished bool) *abandonedSpanCandidate { +func newAbandonedSpanCandidate(s *Span, finished bool) *abandonedSpanCandidate { + var component string + if v, ok := s.meta[ext.Component]; ok { + component = v + } else { + component = "manual" + } // finished is explicit instead of implicit as s.finished may be not set // at the moment of calling this method. // Also, locking is not required as it's called while the span is already locked or it's // being initialized. - return &abandonedSpanCandidate{ - Name: s.Name, - TraceID: s.TraceID, - SpanID: s.SpanID, - Start: s.Start, - Finished: finished, + c := &abandonedSpanCandidate{ + Name: s.name, + TraceID: s.traceID, + SpanID: s.spanID, + Start: s.start, + Finished: finished, + Integration: component, } + return c } // String takes a span and returns a human-readable string representing that span. func (s *abandonedSpanCandidate) String() string { age := now() - s.Start a := fmt.Sprintf("%d sec", age/1e9) - return fmt.Sprintf("[name: %s, span_id: %d, trace_id: %d, age: %s],", s.Name, s.SpanID, s.TraceID, a) + return fmt.Sprintf("[name: %s, integration: %s, span_id: %d, trace_id: %d, age: %s],", s.Name, s.Integration, s.SpanID, s.TraceID, a) } type abandonedSpansDebugger struct { @@ -273,7 +284,7 @@ func (d *abandonedSpansDebugger) log(interval *time.Duration) { log.Warn("Too many abandoned spans. Truncating message.") sb.WriteString("...") } - log.Warn(sb.String()) + log.Warn("%s", sb.String()) } // formatAbandonedSpans takes a bucket and returns a human-readable string representing @@ -292,6 +303,9 @@ func formatAbandonedSpans(b *bucket[uint64, *abandonedSpanCandidate], interval * if interval != nil && curTime-s.Start < interval.Nanoseconds() { continue } + if t, ok := getGlobalTracer().(*tracer); ok { + t.statsd.Incr("datadog.tracer.abandoned_spans", []string{"name:" + s.Name, "integration:" + s.Integration}, 1) + } spanCount++ msg := s.String() sb.WriteString(msg) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/api.txt b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/api.txt new file mode 100644 index 00000000..afb404dd --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/api.txt @@ -0,0 +1,382 @@ +// API Stability Report +// Package: github.com/DataDog/dd-trace-go/v2/ddtrace/tracer +// Module: github.com/DataDog/dd-trace-go/v2 + +// File: context.go + +// Package Functions +func ContextWithSpan(context.Context, *Span) (context.Context) +func SpanFromContext(context.Context) (*Span, bool) +func StartSpanFromContext(context.Context, string, ...StartSpanOption) (*Span, context.Context) + +// File: data_streams.go + +// Package Functions +func SetDataStreamsCheckpoint(context.Context, ...string) (context.Context, bool) +func SetDataStreamsCheckpointWithParams(context.Context, options.CheckpointParams, ...string) (context.Context, bool) +func TrackKafkaCommitOffset(string, int32, int64) +func TrackKafkaHighWatermarkOffset(string, string, int32, int64) +func TrackKafkaProduceOffset(string, int32, int64) + +// File: logger.go + +// Package Functions +func AdaptLogger(func(LogLevel, string, ...any)()) (Logger) +func UseLogger(Logger) + +// Types +type LogLevel log.Level + +type Logger interface { + func Log(string) +} + +// File: noop.go + +// Types +type NoopTracer struct {} + +func (NoopTracer) Extract(interface{}) (*SpanContext, error) +func (NoopTracer) Flush() +func (NoopTracer) Inject(*SpanContext, interface{}) (error) +func (NoopTracer) SetServiceInfo(string) +func (NoopTracer) StartSpan(string, ...StartSpanOption) (*Span) +func (NoopTracer) Stop() +func (NoopTracer) TracerConf() (TracerConf) + +// File: option.go + +// Package Functions +func AnalyticsRate(float64) (StartSpanOption) +func ChildOf(*SpanContext) (StartSpanOption) +func MarkIntegrationImported(string) (bool) +func Measured() (StartSpanOption) +func ResourceName(string) (StartSpanOption) +func ServiceName(string) (StartSpanOption) +func SpanType(string) (StartSpanOption) +func StartTime(time.Time) (StartSpanOption) +func Tag(string, interface{}) (StartSpanOption) +func WithAgentAddr(string) (StartOption) +func WithAgentTimeout(int) (StartOption) +func WithAgentURL(string) (StartOption) +func WithAnalytics(bool) (StartOption) +func WithAnalyticsRate(float64) (StartOption) +func WithAppSecEnabled(bool) (StartOption) +func WithDebugMode(bool) (StartOption) +func WithDebugSpansMode(time.Duration) (StartOption) +func WithDebugStack(bool) (StartOption) +func WithDogstatsdAddr(string) (StartOption) +func WithEnv(string) (StartOption) +func WithFeatureFlags(...string) (StartOption) +func WithGlobalServiceName(bool) (StartOption) +func WithGlobalTag(string, interface{}) (StartOption) +func WithHTTPClient(*http.Client) (StartOption) +func WithHeaderTags([]string) (StartOption) +func WithHostname(string) (StartOption) +func WithLLMObsAgentlessEnabled(bool) (StartOption) +func WithLLMObsEnabled(bool) (StartOption) +func WithLLMObsMLApp(string) (StartOption) +func WithLLMObsProjectName(string) (StartOption) +func WithLambdaMode(bool) (StartOption) +func WithLogStartup(bool) (StartOption) +func WithLogger(Logger) (StartOption) +func WithPartialFlushing(int) (StartOption) +func WithPeerServiceDefaults(bool) (StartOption) +func WithPeerServiceMapping(string) (StartOption) +func WithProfilerCodeHotspots(bool) (StartOption) +func WithProfilerEndpoints(bool) (StartOption) +func WithPropagation() (UserMonitoringOption) +func WithPropagator(Propagator) (StartOption) +func WithRetryInterval(int) (StartOption) +func WithRuntimeMetrics() (StartOption) +func WithSampler(Sampler) (StartOption) +func WithSamplerRate(float64) (StartOption) +func WithSamplingRules([]SamplingRule) (StartOption) +func WithSendRetries(int) (StartOption) +func WithService(string) (StartOption) +func WithServiceMapping(string) (StartOption) +func WithServiceVersion(string) (StartOption) +func WithSpanID(uint64) (StartSpanOption) +func WithSpanLinks([]SpanLink) (StartSpanOption) +func WithStartSpanConfig(*StartSpanConfig) (StartSpanOption) +func WithStatsComputation(bool) (StartOption) +func WithTestDefaults(any) (StartOption) +func WithTraceEnabled(bool) (StartOption) +func WithUDS(string) (StartOption) +func WithUniversalVersion(string) (StartOption) +func WithUserEmail(string) (UserMonitoringOption) +func WithUserLogin(string) (UserMonitoringOption) +func WithUserMetadata(string) (UserMonitoringOption) +func WithUserName(string) (UserMonitoringOption) +func WithUserOrg(string) (UserMonitoringOption) +func WithUserRole(string) (UserMonitoringOption) +func WithUserScope(string) (UserMonitoringOption) +func WithUserSessionID(string) (UserMonitoringOption) + +// Types +type StartOption func(*config)() + +type UserMonitoringConfig struct { + Email string + Login string + Metadata map[string]string + Name string + Org string + PropagateID bool + Role string + Scope string + SessionID string +} + +type UserMonitoringOption func(*UserMonitoringConfig)() + +// File: propagator.go + +// Types +type Propagator interface { + func Extract(interface{}) (*SpanContext, error) + func Inject(*SpanContext, interface{}) (error) +} + +type TextMapReader interface { + func ForeachKey(func(string)(error)) (error) +} + +type TextMapWriter interface { + func Set(string) +} + +// File: rules_sampler.go + +// Package Functions +func EqualsFalseNegative([]SamplingRule) (bool) +func SpanSamplingRules(...Rule) ([]SamplingRule) +func TraceSamplingRules(...Rule) ([]SamplingRule) + +// Types +type Rule struct { + MaxPerSecond float64 + NameGlob string + Rate float64 + ResourceGlob string + ServiceGlob string + Tags map[string]string +} + +type SamplingRule struct { + MaxPerSecond float64 + Name *regexp.Regexp + Provenance provenance + Rate float64 + Resource *regexp.Regexp + Service *regexp.Regexp + Tags map[string]*regexp.Regexp +} + +func (*SamplingRule) EqualsFalseNegative(*SamplingRule) (bool) +func (SamplingRule) MarshalJSON() ([]byte, error) +func (SamplingRule) String() (string) +func (*SamplingRule) UnmarshalJSON([]byte) (error) + +type SamplingRuleType int + +// File: sampler.go + +// Package Functions +func NewAllSampler() (RateSampler) +func NewRateSampler(float64) (RateSampler) + +// Types +type RateSampler interface { + func Rate() (float64) + func SetRate(float64) +} + +type Sampler interface { + func Sample(*Span) (bool) +} + +// File: span.go + +// Types +type Span struct {} + +func (*Span) AddEvent(string, ...SpanEventOption) +func (*Span) AddLink(SpanLink) +func (*Span) AsMap() (map[string]interface{}) +func (*Span) BaggageItem(string) (string) +func (*Span) Context() (*SpanContext) +func (*Span) Finish(...FinishOption) +func (*Span) Format(fmt.State, rune) +func (*Span) Root() (*Span) +func (*Span) SetBaggageItem(string) +func (*Span) SetOperationName(string) +func (*Span) SetTag(string, interface{}) +func (*Span) SetUser(string, ...UserMonitoringOption) +func (*Span) StartChild(string, ...StartSpanOption) (*Span) +func (*Span) String() (string) + +// File: span_config.go + +// Package Functions +func FinishTime(time.Time) (FinishOption) +func NewFinishConfig(...FinishOption) (*FinishConfig) +func NewStartSpanConfig(...StartSpanOption) (*StartSpanConfig) +func NoDebugStack() (FinishOption) +func StackFrames(uint) (FinishOption) +func WithError(error) (FinishOption) +func WithFinishConfig(*FinishConfig) (FinishOption) + +// Types +type FinishConfig struct { + Error error + FinishTime time.Time + NoDebugStack bool + SkipStackFrames uint + StackFrames uint +} + +type FinishOption func(*FinishConfig)() + +type StartSpanConfig struct { + Context context.Context + Parent *SpanContext + SpanID uint64 + SpanLinks []SpanLink + StartTime time.Time + Tags map[string]interface{} +} + +type StartSpanOption func(*StartSpanConfig)() + +// File: span_event_config.go + +// Package Functions +func WithSpanEventAttributes(map[string]any) (SpanEventOption) +func WithSpanEventTimestamp(time.Time) (SpanEventOption) + +// Types +type SpanEventConfig struct { + Attributes map[string]any + Time time.Time +} + +type SpanEventOption func(*SpanEventConfig)() + +// File: spancontext.go + +// Package Functions +func FromGenericCtx(ddtrace.SpanContext) (*SpanContext) + +// Types +type SpanContext struct {} + +func (*SpanContext) ForeachBaggageItem(func(string)(bool)) +func (*SpanContext) SamplingPriority() (int, bool) +func (*SpanContext) SpanID() (uint64) +func (*SpanContext) SpanLinks() ([]SpanLink) +func (*SpanContext) TraceID() (string) +func (*SpanContext) TraceIDBytes() ([16]byte) +func (*SpanContext) TraceIDLower() (uint64) +func (*SpanContext) TraceIDUpper() (uint64) + +// File: spanlink.go + +// Types +type SpanLink struct { + Attributes map[string]string + Flags uint32 + SpanID uint64 + TraceID uint64 + TraceIDHigh uint64 + Tracestate string +} + +// File: sqlcomment.go + +// Types +type DBMPropagationMode string + +type SQLCommentCarrier struct { + DBServiceName string + Mode DBMPropagationMode + PeerDBHostname string + PeerDBName string + PeerService string + Query string + SpanID uint64 +} + +func (*SQLCommentCarrier) Extract() (*SpanContext, error) +func (*SQLCommentCarrier) Inject(*SpanContext) (error) + +// File: textmap.go + +// Package Functions +func NewPropagator(*PropagatorConfig, ...Propagator) (Propagator) + +// Types +type HTTPHeadersCarrier http.Header + +type PropagatorConfig struct { + B3 bool + BaggageHeader string + BaggagePrefix string + MaxTagsHeaderLen int + ParentHeader string + PriorityHeader string + TraceHeader string +} + +type TextMapCarrier map[string]string + +// File: tracer.go + +// Package Functions +func Extract(interface{}) (*SpanContext, error) +func Flush() +func Inject(*SpanContext, interface{}) (error) +func SetUser(*Span, string, ...UserMonitoringOption) +func Start(...StartOption) (error) +func StartSpan(string, ...StartSpanOption) (*Span) +func Stop() + +// Types +type Tracer interface { + func Extract(interface{}) (*SpanContext, error) + func Flush() + func Inject(*SpanContext, interface{}) (error) + func StartSpan(string, ...StartSpanOption) (*Span) + func Stop() + func TracerConf() (TracerConf) +} + +type TracerConf struct { + CanComputeStats bool + CanDropP0s bool + DebugAbandonedSpans bool + Disabled bool + EnvTag string + PartialFlush bool + PartialFlushMinSpans int + PeerServiceDefaults bool + PeerServiceMappings map[string]string + ServiceTag string + TracingAsTransport bool + VersionTag string +} + +// File: tracer_metadata.go + +// Types +type Metadata struct { + Hostname string + Language string + RuntimeID string + SchemaVersion uint8 + ServiceEnvironment string + ServiceName string + ServiceVersion string + Version string +} + diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_payload.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_payload.go new file mode 100644 index 00000000..b04ee29a --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_payload.go @@ -0,0 +1,191 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package tracer + +import ( + "bytes" + "time" + + "github.com/tinylib/msgp/msgp" + + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/version" +) + +// ciVisibilityPayload represents a payload specifically designed for CI Visibility events. +// It uses the generic payload interface and adds methods to handle CI Visibility specific data. +type ciVisibilityPayload struct { + payload payload + serializationTime time.Duration +} + +// push adds a new CI Visibility event to the payload buffer. +// It grows the buffer to accommodate the new event, encodes the event in MessagePack format, and updates the event count. +// +// Parameters: +// +// event - The CI Visibility event to be added to the payload. +// +// Returns: +// +// An error if encoding the event fails. +func (p *ciVisibilityPayload) push(event *ciVisibilityEvent) (size int, err error) { + p.payload.grow(event.Msgsize()) + startTime := time.Now() + defer func() { + p.serializationTime += time.Since(startTime) + }() + if err := msgp.Encode(p.payload, event); err != nil { + return 0, err + } + p.payload.recordItem() // This already calls updateHeader() internally. + return p.size(), nil +} + +// newCiVisibilityPayload creates a new instance of civisibilitypayload. +// +// Returns: +// +// A pointer to a newly initialized civisibilitypayload instance. +func newCiVisibilityPayload() *ciVisibilityPayload { + log.Debug("ciVisibilityPayload: creating payload instance") + return &ciVisibilityPayload{payload: newPayload(traceProtocolV04), serializationTime: 0} +} + +// getBuffer retrieves the complete body of the CI Visibility payload, including metadata. +// It reads the current payload buffer, adds metadata, and encodes the entire payload in MessagePack format. +// +// Parameters: +// +// config - A pointer to the config structure containing environment settings. +// +// Returns: +// +// A pointer to a bytes.Buffer containing the encoded CI Visibility payload. +// An error if reading from the buffer or encoding the payload fails. +func (p *ciVisibilityPayload) getBuffer(config *config) (*bytes.Buffer, error) { + startTime := time.Now() + log.Debug("ciVisibilityPayload: .getBuffer (count: %d)", p.payload.stats().itemCount) + + // Create a buffer to read the current payload + payloadBuf := new(bytes.Buffer) + if _, err := payloadBuf.ReadFrom(p.payload); err != nil { + return nil, err + } + + // Create the visibility payload + visibilityPayload := p.writeEnvelope(config.env, payloadBuf.Bytes()) + + // Create a new buffer to encode the visibility payload in MessagePack format + encodedBuf := new(bytes.Buffer) + if err := msgp.Encode(encodedBuf, visibilityPayload); err != nil { + return nil, err + } + + telemetry.EndpointPayloadEventsCount(telemetry.TestCycleEndpointType, float64(p.payload.stats().itemCount)) + telemetry.EndpointPayloadBytes(telemetry.TestCycleEndpointType, float64(encodedBuf.Len())) + telemetry.EndpointEventsSerializationMs(telemetry.TestCycleEndpointType, float64((p.serializationTime + time.Since(startTime)).Milliseconds())) + return encodedBuf, nil +} + +func (p *ciVisibilityPayload) writeEnvelope(env string, events []byte) *ciTestCyclePayload { + + /* + The Payload format in the CI Visibility protocol is like this: + { + "version": 1, + "metadata": { + "*": { + "runtime-id": "...", + "language": "...", + "library_version": "...", + "env": "..." + } + }, + "events": [ + // ... + ] + } + + The event format can be found in the `civisibility_tslv.go` file in the ciVisibilityEvent documentation + */ + + // Create the metadata map + allMetadata := map[string]string{ + "language": "go", + "runtime-id": globalconfig.RuntimeID(), + "library_version": version.Tag, + } + if env != "" { + allMetadata["env"] = env + } + + // Create the visibility payload + visibilityPayload := &ciTestCyclePayload{ + Version: 1, + Metadata: map[string]map[string]string{ + "*": allMetadata, + }, + Events: events, + } + + // Check for the test session name and append the tag at the metadata level + if testSessionName, ok := utils.GetCITags()[constants.TestSessionName]; ok { + testSessionMap := map[string]string{ + constants.TestSessionName: testSessionName, + } + visibilityPayload.Metadata["test_session_end"] = testSessionMap + visibilityPayload.Metadata["test_module_end"] = testSessionMap + visibilityPayload.Metadata["test_suite_end"] = testSessionMap + visibilityPayload.Metadata["test"] = testSessionMap + } + + return visibilityPayload +} + +// stats returns the current stats of the payload. +func (p *ciVisibilityPayload) stats() payloadStats { + return p.payload.stats() +} + +// size returns the payload size in bytes (for backward compatibility). +func (p *ciVisibilityPayload) size() int { + return p.payload.size() +} + +// itemCount returns the number of items available in the stream (for backward compatibility). +func (p *ciVisibilityPayload) itemCount() int { + return p.payload.itemCount() +} + +// protocol returns the protocol version of the payload. +func (p *ciVisibilityPayload) protocol() float64 { + return p.payload.protocol() +} + +// clear empties the payload buffers. +func (p *ciVisibilityPayload) clear() { + p.payload.clear() +} + +// reset sets up the payload to be read a second time. +func (p *ciVisibilityPayload) reset() { + p.payload.reset() +} + +// Read implements io.Reader by reading from the underlying payload. +func (p *ciVisibilityPayload) Read(b []byte) (n int, err error) { + return p.payload.Read(b) +} + +// Close implements io.Closer by closing the underlying payload. +func (p *ciVisibilityPayload) Close() error { + return p.payload.Close() +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_transport.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_transport.go similarity index 76% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_transport.go rename to vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_transport.go index db64b5d7..ff438b71 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_transport.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_transport.go @@ -11,15 +11,19 @@ import ( "fmt" "io" "net/http" - "os" "runtime" - "strconv" "strings" + "time" - "gopkg.in/DataDog/dd-trace-go.v1/internal" - "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" - "gopkg.in/DataDog/dd-trace-go.v1/internal/version" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/urlsanitizer" + "github.com/DataDog/dd-trace-go/v2/internal/version" ) // Constants for CI Visibility API paths and subdomains. @@ -74,7 +78,7 @@ func newCiVisibilityTransport(config *config) *ciVisibilityTransport { testCycleURL := "" if agentlessEnabled { // Agentless mode is enabled. - APIKeyValue := os.Getenv(constants.APIKeyEnvironmentVariable) + APIKeyValue := env.Get(constants.APIKeyEnvironmentVariable) if APIKeyValue == "" { log.Error("An API key is required for agentless mode. Use the DD_API_KEY env variable to set it") } @@ -83,14 +87,14 @@ func newCiVisibilityTransport(config *config) *ciVisibilityTransport { // Check for a custom agentless URL. agentlessURL := "" - if v := os.Getenv(constants.CIVisibilityAgentlessURLEnvironmentVariable); v != "" { + if v := env.Get(constants.CIVisibilityAgentlessURLEnvironmentVariable); v != "" { agentlessURL = v } if agentlessURL == "" { // Use the standard agentless URL format. site := "datadoghq.com" - if v := os.Getenv("DD_SITE"); v != "" { + if v := env.Get("DD_SITE"); v != "" { site = v } @@ -104,6 +108,7 @@ func newCiVisibilityTransport(config *config) *ciVisibilityTransport { defaultHeaders["X-Datadog-EVP-Subdomain"] = TestCycleSubdomain testCycleURL = fmt.Sprintf("%s/%s/%s", config.agentURL.String(), EvpProxyPath, TestCyclePath) } + log.Debug("ciVisibilityTransport: creating transport instance [agentless: %t, testcycleurl: %s]", agentlessEnabled, urlsanitizer.SanitizeURL(testCycleURL)) return &ciVisibilityTransport{ config: config, @@ -123,8 +128,8 @@ func newCiVisibilityTransport(config *config) *ciVisibilityTransport { // Returns: // // An io.ReadCloser for reading the response body, and an error if the operation fails. -func (t *ciVisibilityTransport) send(p *payload) (body io.ReadCloser, err error) { - ciVisibilityPayload := &ciVisibilityPayload{p} +func (t *ciVisibilityTransport) send(p payload) (body io.ReadCloser, err error) { + ciVisibilityPayload := &ciVisibilityPayload{payload: p, serializationTime: 0} buffer, bufferErr := ciVisibilityPayload.getBuffer(t.config) if bufferErr != nil { return nil, fmt.Errorf("cannot create buffer payload: %v", bufferErr) @@ -136,28 +141,31 @@ func (t *ciVisibilityTransport) send(p *payload) (body io.ReadCloser, err error) gzipWriter := gzip.NewWriter(&gzipBuffer) _, err = io.Copy(gzipWriter, buffer) if err != nil { - return nil, fmt.Errorf("cannot compress request body: %v", err) + return nil, fmt.Errorf("cannot compress request body: %s", err.Error()) } err = gzipWriter.Close() if err != nil { - return nil, fmt.Errorf("cannot compress request body: %v", err) + return nil, fmt.Errorf("cannot compress request body: %s", err.Error()) } buffer = &gzipBuffer } req, err := http.NewRequest("POST", t.testCycleURLPath, buffer) if err != nil { - return nil, fmt.Errorf("cannot create http request: %v", err) + return nil, fmt.Errorf("cannot create http request: %s", err.Error()) } + req.ContentLength = int64(buffer.Len()) for header, value := range t.headers { req.Header.Set(header, value) } - req.Header.Set("Content-Length", strconv.Itoa(buffer.Len())) if t.agentless { req.Header.Set("Content-Encoding", "gzip") } + log.Debug("ciVisibilityTransport: sending transport request: %d bytes", buffer.Len()) + startTime := time.Now() response, err := t.config.httpClient.Do(req) + telemetry.EndpointPayloadRequestsMs(telemetry.TestCycleEndpointType, float64(time.Since(startTime).Milliseconds())) if err != nil { return nil, err } @@ -168,6 +176,7 @@ func (t *ciVisibilityTransport) send(p *payload) (body io.ReadCloser, err error) n, _ := response.Body.Read(msg) _ = response.Body.Close() txt := http.StatusText(code) + telemetry.EndpointPayloadRequestsErrors(telemetry.TestCycleEndpointType, telemetry.GetErrorTypeFromStatusCode(code)) if n > 0 { return nil, fmt.Errorf("%s (Status: %s)", msg[:n], txt) } @@ -185,7 +194,7 @@ func (t *ciVisibilityTransport) send(p *payload) (body io.ReadCloser, err error) // Returns: // // An error indicating that stats are not supported. -func (t *ciVisibilityTransport) sendStats(*statsPayload) error { +func (t *ciVisibilityTransport) sendStats(*pb.ClientStatsPayload, int) error { // Stats are not supported by CI Visibility agentless / EVP proxy. return nil } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_tslv.go similarity index 87% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv.go rename to vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_tslv.go index 377f6d56..22b10877 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_tslv.go @@ -3,16 +3,16 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2024 Datadog, Inc. -//go:generate msgp -unexported -marshal=false -o=civisibility_tslv_msgp.go -tests=false +//go:generate go run github.com/tinylib/msgp -unexported -marshal=false -o=civisibility_tslv_msgp.go -tests=false package tracer import ( "strconv" + "github.com/DataDog/dd-trace-go/v2/ddtrace" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" "github.com/tinylib/msgp/msgp" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" - "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" ) type ( @@ -26,7 +26,6 @@ type ( // Ensure that ciVisibilityEvent and related types implement necessary interfaces. var ( - _ ddtrace.Span = (*ciVisibilityEvent)(nil) _ msgp.Encodable = (*ciVisibilityEvent)(nil) _ msgp.Decodable = (*ciVisibilityEvent)(nil) @@ -45,7 +44,6 @@ type ciTestCyclePayload struct { } // ciVisibilityEvent represents a CI visibility event, including type, version, and content. -// It implements the ddtrace.Span interface. // According to the CI Visibility event specification it has the following format for tests: // // { @@ -152,7 +150,7 @@ type ciVisibilityEvent struct { Version int32 `msg:"version"` // Version of the event type Content tslvSpan `msg:"content"` // Content of the event - span *span `msg:"-"` // Associated span (not marshaled) + span *Span `msg:"-"` // Associated span (not marshaled) } // SetTag sets a tag on the event's span and updates the content metadata and metrics. @@ -163,8 +161,8 @@ type ciVisibilityEvent struct { // value - The tag value. func (e *ciVisibilityEvent) SetTag(key string, value interface{}) { e.span.SetTag(key, value) - e.Content.Meta = e.span.Meta - e.Content.Metrics = e.span.Metrics + e.Content.Meta = e.span.meta + e.Content.Metrics = e.span.metrics } // SetOperationName sets the operation name of the event's span and updates the content name. @@ -174,7 +172,7 @@ func (e *ciVisibilityEvent) SetTag(key string, value interface{}) { // operationName - The new operation name. func (e *ciVisibilityEvent) SetOperationName(operationName string) { e.span.SetOperationName(operationName) - e.Content.Name = e.span.Name + e.Content.Name = e.span.name } // BaggageItem retrieves the baggage item associated with the given key from the event's span. @@ -205,7 +203,7 @@ func (e *ciVisibilityEvent) SetBaggageItem(key, val string) { // Parameters: // // opts - Optional finish options. -func (e *ciVisibilityEvent) Finish(opts ...ddtrace.FinishOption) { +func (e *ciVisibilityEvent) Finish(opts ...FinishOption) { e.span.Finish(opts...) } @@ -247,8 +245,8 @@ type tslvSpan struct { // Returns: // // A pointer to the created ciVisibilityEvent. -func getCiVisibilityEvent(span *span) *ciVisibilityEvent { - switch span.Type { +func getCiVisibilityEvent(span *Span) *ciVisibilityEvent { + switch span.spanType { case constants.SpanTypeTest: return createTestEventFromSpan(span) case constants.SpanTypeTestSuite: @@ -271,14 +269,15 @@ func getCiVisibilityEvent(span *span) *ciVisibilityEvent { // Returns: // // A pointer to the created ciVisibilityEvent. -func createTestEventFromSpan(span *span) *ciVisibilityEvent { +func createTestEventFromSpan(span *Span) *ciVisibilityEvent { tSpan := createTslvSpan(span) + tSpan.ParentID = 0 tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag) tSpan.ModuleID = getAndRemoveMetaToUInt64(span, constants.TestModuleIDTag) tSpan.SuiteID = getAndRemoveMetaToUInt64(span, constants.TestSuiteIDTag) tSpan.CorrelationID = getAndRemoveMeta(span, constants.ItrCorrelationIDTag) - tSpan.SpanID = span.SpanID - tSpan.TraceID = span.TraceID + tSpan.SpanID = span.spanID + tSpan.TraceID = span.traceID return &ciVisibilityEvent{ span: span, Type: constants.SpanTypeTest, @@ -296,8 +295,9 @@ func createTestEventFromSpan(span *span) *ciVisibilityEvent { // Returns: // // A pointer to the created ciVisibilityEvent. -func createTestSuiteEventFromSpan(span *span) *ciVisibilityEvent { +func createTestSuiteEventFromSpan(span *Span) *ciVisibilityEvent { tSpan := createTslvSpan(span) + tSpan.ParentID = 0 tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag) tSpan.ModuleID = getAndRemoveMetaToUInt64(span, constants.TestModuleIDTag) tSpan.SuiteID = getAndRemoveMetaToUInt64(span, constants.TestSuiteIDTag) @@ -318,8 +318,9 @@ func createTestSuiteEventFromSpan(span *span) *ciVisibilityEvent { // Returns: // // A pointer to the created ciVisibilityEvent. -func createTestModuleEventFromSpan(span *span) *ciVisibilityEvent { +func createTestModuleEventFromSpan(span *Span) *ciVisibilityEvent { tSpan := createTslvSpan(span) + tSpan.ParentID = 0 tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag) tSpan.ModuleID = getAndRemoveMetaToUInt64(span, constants.TestModuleIDTag) return &ciVisibilityEvent{ @@ -339,8 +340,9 @@ func createTestModuleEventFromSpan(span *span) *ciVisibilityEvent { // Returns: // // A pointer to the created ciVisibilityEvent. -func createTestSessionEventFromSpan(span *span) *ciVisibilityEvent { +func createTestSessionEventFromSpan(span *Span) *ciVisibilityEvent { tSpan := createTslvSpan(span) + tSpan.ParentID = 0 tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag) return &ciVisibilityEvent{ span: span, @@ -359,10 +361,10 @@ func createTestSessionEventFromSpan(span *span) *ciVisibilityEvent { // Returns: // // A pointer to the created ciVisibilityEvent. -func createSpanEventFromSpan(span *span) *ciVisibilityEvent { +func createSpanEventFromSpan(span *Span) *ciVisibilityEvent { tSpan := createTslvSpan(span) - tSpan.SpanID = span.SpanID - tSpan.TraceID = span.TraceID + tSpan.SpanID = span.spanID + tSpan.TraceID = span.traceID return &ciVisibilityEvent{ span: span, Type: constants.SpanTypeSpan, @@ -380,18 +382,18 @@ func createSpanEventFromSpan(span *span) *ciVisibilityEvent { // Returns: // // The created tslvSpan. -func createTslvSpan(span *span) tslvSpan { +func createTslvSpan(span *Span) tslvSpan { return tslvSpan{ - Name: span.Name, - Service: span.Service, - Resource: span.Resource, - Type: span.Type, - Start: span.Start, - Duration: span.Duration, - ParentID: span.ParentID, - Error: span.Error, - Meta: span.Meta, - Metrics: span.Metrics, + Name: span.name, + Service: span.service, + Resource: span.resource, + Type: span.spanType, + Start: span.start, + Duration: span.duration, + ParentID: span.parentID, + Error: span.error, + Meta: span.meta, + Metrics: span.metrics, } } @@ -405,16 +407,16 @@ func createTslvSpan(span *span) tslvSpan { // Returns: // // The retrieved metadata value. -func getAndRemoveMeta(span *span, key string) string { - span.Lock() - defer span.Unlock() - if span.Meta == nil { - span.Meta = make(map[string]string, 1) +func getAndRemoveMeta(span *Span, key string) string { + span.mu.Lock() + defer span.mu.Unlock() + if span.meta == nil { + span.meta = make(map[string]string, 1) } - if v, ok := span.Meta[key]; ok { - delete(span.Meta, key) - delete(span.Metrics, key) + if v, ok := span.meta[key]; ok { + delete(span.meta, key) + delete(span.metrics, key) return v } @@ -431,7 +433,7 @@ func getAndRemoveMeta(span *span, key string) string { // Returns: // // The retrieved and converted metadata value as a uint64. -func getAndRemoveMetaToUInt64(span *span, key string) uint64 { +func getAndRemoveMetaToUInt64(span *Span, key string) uint64 { strValue := getAndRemoveMeta(span, key) i, err := strconv.ParseUint(strValue, 10, 64) if err != nil { diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv_msgp.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_tslv_msgp.go similarity index 78% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv_msgp.go rename to vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_tslv_msgp.go index 63fa4b84..179ced68 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv_msgp.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_tslv_msgp.go @@ -497,25 +497,25 @@ func (z *tslvSpan) DecodeMsg(dc *msgp.Reader) (err error) { case "test_session_id": z.SessionID, err = dc.ReadUint64() if err != nil { - err = msgp.WrapError(err, "SessionId") + err = msgp.WrapError(err, "SessionID") return } case "test_module_id": z.ModuleID, err = dc.ReadUint64() if err != nil { - err = msgp.WrapError(err, "ModuleId") + err = msgp.WrapError(err, "ModuleID") return } case "test_suite_id": z.SuiteID, err = dc.ReadUint64() if err != nil { - err = msgp.WrapError(err, "SuiteId") + err = msgp.WrapError(err, "SuiteID") return } case "itr_correlation_id": z.CorrelationID, err = dc.ReadString() if err != nil { - err = msgp.WrapError(err, "CorrelationId") + err = msgp.WrapError(err, "CorrelationID") return } case "name": @@ -651,7 +651,7 @@ func (z *tslvSpan) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *tslvSpan) EncodeMsg(en *msgp.Writer) (err error) { - // omitempty: check for empty values + // check for omitted fields zb0001Len := uint32(16) var zb0001Mask uint16 /* 16 bits */ _ = zb0001Mask @@ -696,209 +696,210 @@ func (z *tslvSpan) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - if zb0001Len == 0 { - return - } - if (zb0001Mask & 0x1) == 0 { // if not empty - // write "test_session_id" - err = en.Append(0xaf, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64) - if err != nil { - return + + // skip if no fields are to be emitted + if zb0001Len != 0 { + if (zb0001Mask & 0x1) == 0 { // if not omitted + // write "test_session_id" + err = en.Append(0xaf, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.SessionID) + if err != nil { + err = msgp.WrapError(err, "SessionID") + return + } } - err = en.WriteUint64(z.SessionID) - if err != nil { - err = msgp.WrapError(err, "SessionID") - return + if (zb0001Mask & 0x2) == 0 { // if not omitted + // write "test_module_id" + err = en.Append(0xae, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.ModuleID) + if err != nil { + err = msgp.WrapError(err, "ModuleID") + return + } } - } - if (zb0001Mask & 0x2) == 0 { // if not empty - // write "test_module_id" - err = en.Append(0xae, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x69, 0x64) - if err != nil { - return + if (zb0001Mask & 0x4) == 0 { // if not omitted + // write "test_suite_id" + err = en.Append(0xad, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.SuiteID) + if err != nil { + err = msgp.WrapError(err, "SuiteID") + return + } } - err = en.WriteUint64(z.ModuleID) - if err != nil { - err = msgp.WrapError(err, "ModuleID") - return + if (zb0001Mask & 0x8) == 0 { // if not omitted + // write "itr_correlation_id" + err = en.Append(0xb2, 0x69, 0x74, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteString(z.CorrelationID) + if err != nil { + err = msgp.WrapError(err, "CorrelationID") + return + } } - } - if (zb0001Mask & 0x4) == 0 { // if not empty - // write "test_suite_id" - err = en.Append(0xad, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x5f, 0x69, 0x64) + // write "name" + err = en.Append(0xa4, 0x6e, 0x61, 0x6d, 0x65) if err != nil { return } - err = en.WriteUint64(z.SuiteID) + err = en.WriteString(z.Name) if err != nil { - err = msgp.WrapError(err, "SuiteID") + err = msgp.WrapError(err, "Name") return } - } - if (zb0001Mask & 0x8) == 0 { // if not empty - // write "itr_correlation_id" - err = en.Append(0xb2, 0x69, 0x74, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64) + // write "service" + err = en.Append(0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) if err != nil { return } - err = en.WriteString(z.CorrelationID) + err = en.WriteString(z.Service) if err != nil { - err = msgp.WrapError(err, "CorrelationID") + err = msgp.WrapError(err, "Service") return } - } - // write "name" - err = en.Append(0xa4, 0x6e, 0x61, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Name) - if err != nil { - err = msgp.WrapError(err, "Name") - return - } - // write "service" - err = en.Append(0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Service) - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - // write "resource" - err = en.Append(0xa8, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Resource) - if err != nil { - err = msgp.WrapError(err, "Resource") - return - } - // write "type" - err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Type) - if err != nil { - err = msgp.WrapError(err, "Type") - return - } - // write "start" - err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return - } - err = en.WriteInt64(z.Start) - if err != nil { - err = msgp.WrapError(err, "Start") - return - } - // write "duration" - err = en.Append(0xa8, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) - if err != nil { - return - } - err = en.WriteInt64(z.Duration) - if err != nil { - err = msgp.WrapError(err, "Duration") - return - } - if (zb0001Mask & 0x400) == 0 { // if not empty - // write "span_id" - err = en.Append(0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64) + // write "resource" + err = en.Append(0xa8, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) if err != nil { return } - err = en.WriteUint64(z.SpanID) + err = en.WriteString(z.Resource) if err != nil { - err = msgp.WrapError(err, "SpanID") + err = msgp.WrapError(err, "Resource") return } - } - if (zb0001Mask & 0x800) == 0 { // if not empty - // write "trace_id" - err = en.Append(0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64) + // write "type" + err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65) if err != nil { return } - err = en.WriteUint64(z.TraceID) + err = en.WriteString(z.Type) if err != nil { - err = msgp.WrapError(err, "TraceID") + err = msgp.WrapError(err, "Type") return } - } - if (zb0001Mask & 0x1000) == 0 { // if not empty - // write "parent_id" - err = en.Append(0xa9, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64) + // write "start" + err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) if err != nil { return } - err = en.WriteUint64(z.ParentID) + err = en.WriteInt64(z.Start) if err != nil { - err = msgp.WrapError(err, "ParentID") + err = msgp.WrapError(err, "Start") return } - } - // write "error" - err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72) - if err != nil { - return - } - err = en.WriteInt32(z.Error) - if err != nil { - err = msgp.WrapError(err, "Error") - return - } - if (zb0001Mask & 0x4000) == 0 { // if not empty - // write "meta" - err = en.Append(0xa4, 0x6d, 0x65, 0x74, 0x61) + // write "duration" + err = en.Append(0xa8, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) if err != nil { return } - err = en.WriteMapHeader(uint32(len(z.Meta))) + err = en.WriteInt64(z.Duration) if err != nil { - err = msgp.WrapError(err, "Meta") + err = msgp.WrapError(err, "Duration") return } - for za0001, za0002 := range z.Meta { - err = en.WriteString(za0001) + if (zb0001Mask & 0x400) == 0 { // if not omitted + // write "span_id" + err = en.Append(0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64) if err != nil { - err = msgp.WrapError(err, "Meta") return } - err = en.WriteString(za0002) + err = en.WriteUint64(z.SpanID) if err != nil { - err = msgp.WrapError(err, "Meta", za0001) + err = msgp.WrapError(err, "SpanID") return } } - } - if (zb0001Mask & 0x8000) == 0 { // if not empty - // write "metrics" - err = en.Append(0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73) + if (zb0001Mask & 0x800) == 0 { // if not omitted + // write "trace_id" + err = en.Append(0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.TraceID) + if err != nil { + err = msgp.WrapError(err, "TraceID") + return + } + } + if (zb0001Mask & 0x1000) == 0 { // if not omitted + // write "parent_id" + err = en.Append(0xa9, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.ParentID) + if err != nil { + err = msgp.WrapError(err, "ParentID") + return + } + } + // write "error" + err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72) if err != nil { return } - err = en.WriteMapHeader(uint32(len(z.Metrics))) + err = en.WriteInt32(z.Error) if err != nil { - err = msgp.WrapError(err, "Metrics") + err = msgp.WrapError(err, "Error") return } - for za0003, za0004 := range z.Metrics { - err = en.WriteString(za0003) + if (zb0001Mask & 0x4000) == 0 { // if not omitted + // write "meta" + err = en.Append(0xa4, 0x6d, 0x65, 0x74, 0x61) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Meta))) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + for za0001, za0002 := range z.Meta { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return + } + } + } + if (zb0001Mask & 0x8000) == 0 { // if not omitted + // write "metrics" + err = en.Append(0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73) if err != nil { - err = msgp.WrapError(err, "Metrics") return } - err = en.WriteFloat64(za0004) + err = en.WriteMapHeader(uint32(len(z.Metrics))) if err != nil { - err = msgp.WrapError(err, "Metrics", za0003) + err = msgp.WrapError(err, "Metrics") return } + for za0003, za0004 := range z.Metrics { + err = en.WriteString(za0003) + if err != nil { + err = msgp.WrapError(err, "Metrics") + return + } + err = en.WriteFloat64(za0004) + if err != nil { + err = msgp.WrapError(err, "Metrics", za0003) + return + } + } } } return diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_writer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_writer.go new file mode 100644 index 00000000..038e8e60 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_writer.go @@ -0,0 +1,132 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package tracer + +import ( + "sync" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// Constants defining the payload size limits for agentless mode. +const ( + // agentlessPayloadMaxLimit is the maximum payload size allowed, indicating the + // maximum size of the package that the intake can receive. + agentlessPayloadMaxLimit = 5 * 1024 * 1024 // 5 MB + + // agentlessPayloadSizeLimit specifies the maximum allowed size of the payload before + // it triggers a flush to the transport. + agentlessPayloadSizeLimit = agentlessPayloadMaxLimit / 2 +) + +// Ensure that ciVisibilityTraceWriter implements the traceWriter interface. +var _ traceWriter = (*ciVisibilityTraceWriter)(nil) + +// ciVisibilityTraceWriter is responsible for buffering and sending CI visibility trace data +// to the Datadog backend. It manages the payload size and flushes the data when necessary. +type ciVisibilityTraceWriter struct { + config *config // Configuration for the tracer. + payload *ciVisibilityPayload // Encodes and buffers events in msgpack format. + climit chan struct{} // Limits the number of concurrent outgoing connections. + wg sync.WaitGroup // Waits for all uploads to finish. +} + +// newCiVisibilityTraceWriter creates a new instance of ciVisibilityTraceWriter. +// +// Parameters: +// +// c - The tracer configuration. +// +// Returns: +// +// A pointer to an initialized ciVisibilityTraceWriter. +func newCiVisibilityTraceWriter(c *config) *ciVisibilityTraceWriter { + log.Debug("ciVisibilityTraceWriter: creating trace writer instance") + return &ciVisibilityTraceWriter{ + config: c, + payload: newCiVisibilityPayload(), + climit: make(chan struct{}, concurrentConnectionLimit), + } +} + +// add adds a new trace to the payload. If the payload size exceeds the limit, +// it triggers a flush to send the data. +// +// Parameters: +// +// trace - A slice of spans representing the trace to be added. +func (w *ciVisibilityTraceWriter) add(trace []*Span) { + telemetry.EventsEnqueueForSerialization() + for _, s := range trace { + cvEvent := getCiVisibilityEvent(s) + size, err := w.payload.push(cvEvent) + if err != nil { + log.Error("ciVisibilityTraceWriter: Error encoding msgpack: %s", err.Error()) + } + if size > agentlessPayloadSizeLimit { + w.flush() + } + } +} + +// stop stops the trace writer, ensuring all data is flushed and all uploads are completed. +func (w *ciVisibilityTraceWriter) stop() { + w.flush() + w.wg.Wait() +} + +// flush sends the current payload to the transport. It ensures that the payload is reset +// and the resources are freed after the flush operation is completed. +func (w *ciVisibilityTraceWriter) flush() { + if w.payload.stats().itemCount == 0 { + return + } + + w.wg.Add(1) + w.climit <- struct{}{} + oldp := w.payload + w.payload = newCiVisibilityPayload() + + go func(p *ciVisibilityPayload) { + defer func(_ time.Time) { + // Once the payload has been used, clear the buffer for garbage + // collection to avoid a memory leak when references to this object + // may still be kept by faulty transport implementations or the + // standard library. See dd-trace-go#976 + p.clear() + + <-w.climit + w.wg.Done() + }(time.Now()) + + var count, size int + var err error + + requestCompressedType := telemetry.UncompressedRequestCompressedType + if ciTransport, ok := w.config.transport.(*ciVisibilityTransport); ok && ciTransport.agentless { + requestCompressedType = telemetry.CompressedRequestCompressedType + } + telemetry.EndpointPayloadRequests(telemetry.TestCycleEndpointType, requestCompressedType) + + for attempt := 0; attempt <= w.config.sendRetries; attempt++ { + stats := p.stats() + size, count = stats.size, stats.itemCount + log.Debug("ciVisibilityTraceWriter: sending payload: size: %d events: %d\n", size, count) + _, err = w.config.transport.send(p.payload) + if err == nil { + log.Debug("ciVisibilityTraceWriter: sent events after %d attempts", attempt+1) + return + } + log.Error("ciVisibilityTraceWriter: failure sending events (attempt %d of %d): %v", attempt+1, w.config.sendRetries+1, err.Error()) + p.reset() + time.Sleep(w.config.retryInterval) + } + log.Error("ciVisibilityTraceWriter: lost %d events: %v", count, err.Error()) + telemetry.EndpointPayloadDropped(telemetry.TestCycleEndpointType) + }(oldp) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/context.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/context.go new file mode 100644 index 00000000..4fad8bfc --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/context.go @@ -0,0 +1,95 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "context" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/options" + "github.com/DataDog/dd-trace-go/v2/internal" + illmobs "github.com/DataDog/dd-trace-go/v2/internal/llmobs" + "github.com/DataDog/dd-trace-go/v2/internal/orchestrion" +) + +// ContextWithSpan returns a copy of the given context which includes the span s. +func ContextWithSpan(ctx context.Context, s *Span) context.Context { + newCtx := orchestrion.CtxWithValue(ctx, internal.ActiveSpanKey, s) + return contextWithPropagatedLLMSpan(newCtx, s) +} + +func contextWithPropagatedLLMSpan(ctx context.Context, s *Span) context.Context { + if s == nil { + return ctx + } + // if there is a propagated llm span already just skip + if _, ok := illmobs.PropagatedLLMSpanFromContext(ctx); ok { + return ctx + } + newCtx := ctx + + propagatedLLMObs := propagatedLLMSpanFromTags(s) + if propagatedLLMObs.SpanID == "" || propagatedLLMObs.TraceID == "" { + return newCtx + } + return illmobs.ContextWithPropagatedLLMSpan(newCtx, propagatedLLMObs) +} + +// propagatedLLMSpanFromTags extracts LLMObs propagation information from the trace propagating tags. +// This is used during distributed tracing to set the correct parent span for the current span. +func propagatedLLMSpanFromTags(s *Span) *illmobs.PropagatedLLMSpan { + propagatedLLMObs := &illmobs.PropagatedLLMSpan{} + if s.context == nil || s.context.trace == nil { + return propagatedLLMObs + } + if parentID := s.context.trace.propagatingTag(keyPropagatedLLMObsParentID); parentID != "" { + propagatedLLMObs.SpanID = parentID + } + if mlApp := s.context.trace.propagatingTag(keyPropagatedLLMObsMLAPP); mlApp != "" { + propagatedLLMObs.MLApp = mlApp + } + if trID := s.context.trace.propagatingTag(keyPropagatedLLMObsTraceID); trID != "" { + propagatedLLMObs.TraceID = trID + } + return propagatedLLMObs +} + +// SpanFromContext returns the span contained in the given context. A second return +// value indicates if a span was found in the context. If no span is found, a no-op +// span is returned. +func SpanFromContext(ctx context.Context) (*Span, bool) { + if ctx == nil { + return nil, false + } + v := orchestrion.WrapContext(ctx).Value(internal.ActiveSpanKey) + if s, ok := v.(*Span); ok { + // We may have a nil *Span wrapped in an interface in the GLS context stack, + // in which case we need to act a if there was nothing (for else we'll + // forcefully un-do a [ChildOf] option if one was passed). + return s, s != nil + } + return nil, false +} + +// StartSpanFromContext returns a new span with the given operation name and options. If a span +// is found in the context, it will be used as the parent of the resulting span. If the ChildOf +// option is passed, it will only be used as the parent if there is no span found in `ctx`. +func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (*Span, context.Context) { + // copy opts in case the caller reuses the slice in parallel + // we will add at least 1, at most 2 items + optsLocal := options.Expand(opts, 0, 2) + if ctx == nil { + // default to context.Background() to avoid panics on Go >= 1.15 + ctx = context.Background() + } else if s, ok := SpanFromContext(ctx); ok { + optsLocal = append(optsLocal, ChildOf(s.Context())) + } + optsLocal = append(optsLocal, withContext(ctx)) + s := StartSpan(operationName, optsLocal...) + if s != nil && s.pprofCtxActive != nil { + ctx = s.pprofCtxActive + } + return s, ContextWithSpan(ctx, s) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/data_streams.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/data_streams.go new file mode 100644 index 00000000..c79f3091 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/data_streams.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "context" + + "github.com/DataDog/dd-trace-go/v2/datastreams/options" + idatastreams "github.com/DataDog/dd-trace-go/v2/internal/datastreams" +) + +// dataStreamsContainer is an object that contains a data streams processor. +type dataStreamsContainer interface { + GetDataStreamsProcessor() *idatastreams.Processor +} + +// GetDataStreamsProcessor returns the processor tracking data streams stats +func (t *tracer) GetDataStreamsProcessor() *idatastreams.Processor { + return t.dataStreams +} + +// SetDataStreamsCheckpoint sets a consume or produce checkpoint in a Data Streams pathway. +// This enables tracking data flow & end to end latency. +// To learn more about the data streams product, see: https://docs.datadoghq.com/data_streams/go/ +func SetDataStreamsCheckpoint(ctx context.Context, edgeTags ...string) (outCtx context.Context, ok bool) { + return SetDataStreamsCheckpointWithParams(ctx, options.CheckpointParams{}, edgeTags...) +} + +// SetDataStreamsCheckpointWithParams sets a consume or produce checkpoint in a Data Streams pathway. +// This enables tracking data flow & end to end latency. +// To learn more about the data streams product, see: https://docs.datadoghq.com/data_streams/go/ +func SetDataStreamsCheckpointWithParams(ctx context.Context, params options.CheckpointParams, edgeTags ...string) (outCtx context.Context, ok bool) { + if t, ok := getGlobalTracer().(dataStreamsContainer); ok { + if processor := t.GetDataStreamsProcessor(); processor != nil { + outCtx = processor.SetCheckpointWithParams(ctx, params, edgeTags...) + return outCtx, true + } + } + return ctx, false +} + +// TrackKafkaCommitOffset should be used in the consumer, to track when it acks offset. +// if used together with TrackKafkaProduceOffset it can generate a Kafka lag in seconds metric. +func TrackKafkaCommitOffset(group, topic string, partition int32, offset int64) { + if t, ok := getGlobalTracer().(dataStreamsContainer); ok { + if p := t.GetDataStreamsProcessor(); p != nil { + p.TrackKafkaCommitOffset(group, topic, partition, offset) + } + } +} + +// TrackKafkaProduceOffset should be used in the producer, to track when it produces a message. +// if used together with TrackKafkaCommitOffset it can generate a Kafka lag in seconds metric. +func TrackKafkaProduceOffset(topic string, partition int32, offset int64) { + if t, ok := getGlobalTracer().(dataStreamsContainer); ok { + if p := t.GetDataStreamsProcessor(); p != nil { + p.TrackKafkaProduceOffset(topic, partition, offset) + } + } +} + +// TrackKafkaHighWatermarkOffset should be used in the producer, to track when it produces a message. +// if used together with TrackKafkaCommitOffset it can generate a Kafka lag in seconds metric. +func TrackKafkaHighWatermarkOffset(cluster string, topic string, partition int32, offset int64) { + if t, ok := getGlobalTracer().(dataStreamsContainer); ok { + if p := t.GetDataStreamsProcessor(); p != nil { + p.TrackKafkaHighWatermarkOffset(cluster, topic, partition, offset) + } + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/doc.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/doc.go new file mode 100644 index 00000000..6b68d7f4 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/doc.go @@ -0,0 +1,110 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package tracer contains Datadog's core tracing client. It is used to trace +// requests as they flow across web servers, databases and microservices, giving +// developers visibility into bottlenecks and troublesome requests. To start the +// tracer, simply call the start method along with an optional set of options. +// By default, the trace agent is considered to be found at "localhost:8126". In a +// setup where this would be different (let's say 127.0.0.1:1234), we could do: +// +// tracer.Start(tracer.WithAgentAddr("127.0.0.1:1234")) +// defer tracer.Stop() +// +// The tracing client can perform trace sampling. While the trace agent +// already samples traces to reduce bandwidth usage, client sampling reduces +// performance overhead. To make use of it, the package comes with a ready-to-use +// rate sampler that can be passed to the tracer. To use it and keep only 30% of the +// requests, one would do: +// +// s := tracer.NewRateSampler(0.3) +// tracer.Start(tracer.WithSampler(s)) +// +// More precise control of sampling rates can be configured using sampling rules. +// This can be applied based on span name, service or both, and is used to determine +// the sampling rate to apply. MaxPerSecond specifies max number of spans per second +// that can be sampled per the rule and applies only to sampling rules of type +// tracer.SamplingRuleSpan. If MaxPerSecond is not specified, the default is no limit. +// +// rules := []tracer.SamplingRule{ +// // sample 10% of traces with the span name "web.request" +// tracer.NameRule("web.request", 0.1), +// // sample 20% of traces for the service "test-service" +// tracer.ServiceRule("test-service", 0.2), +// // sample 30% of traces when the span name is "db.query" and the service +// // is "postgres.db" +// tracer.NameServiceRule("db.query", "postgres.db", 0.3), +// // sample 100% of traces when name and service match these regular expressions +// {Name: regexp.MustCompile("web\\..*"), Service: regexp.MustCompile("^test-"), Rate: 1.0}, +// // sample 50% of spans when service and name match these glob patterns with no limit on the number of spans +// tracer.SpanNameServiceRule("web.*", "test-*", 0.5), +// // sample 50% of spans when service and name match these glob patterns up to 100 spans per second +// tracer.SpanNameServiceMPSRule("web.*", "test-*", 0.5, 100), +// } +// tracer.Start(tracer.WithSamplingRules(rules)) +// defer tracer.Stop() +// +// Sampling rules can also be configured at runtime using the DD_TRACE_SAMPLING_RULES and +// DD_SPAN_SAMPLING_RULES environment variables. When set, it overrides rules set by tracer.WithSamplingRules. +// The value is a JSON array of objects. +// For trace sampling rules, the "sample_rate" field is required, the "name" and "service" fields are optional. +// For span sampling rules, the "name" and "service", if specified, must be a valid glob pattern, +// i.e. a string where "*" matches any contiguous substring, even an empty string, +// and "?" character matches exactly one of any character. +// The "sample_rate" field is optional, and if not specified, defaults to "1.0", sampling 100% of the spans. +// The "max_per_second" field is optional, and if not specified, defaults to 0, keeping all the previously sampled spans. +// +// export DD_TRACE_SAMPLING_RULES='[{"name": "web.request", "sample_rate": 1.0}]' +// export DD_SPAN_SAMPLING_RULES='[{"service":"test.?","name": "web.*", "sample_rate": 1.0, "max_per_second":100}]' +// +// To create spans, use the functions StartSpan and StartSpanFromContext. Both accept +// StartSpanOptions that can be used to configure the span. A span that is started +// with no parent will begin a new trace. See the function documentation for details +// on specific usage. Each trace has a hard limit of 100,000 spans, after which the +// trace will be dropped and give a diagnostic log message. In practice users should +// not approach this limit as traces of this size are not useful and impossible to +// visualize. +// +// See the contrib package ( https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/contrib ) +// for integrating datadog with various libraries, frameworks and clients. +// +// All spans created by the tracer contain a context hereby referred to as the span +// context. Note that this is different from Go's context. The span context is used +// to package essential information from a span, which is needed when creating child +// spans that inherit from it. Thus, a child span is created from a span's span context. +// The span context can originate from within the same process, but also a +// different process or even a different machine in the case of distributed tracing. +// +// To make use of distributed tracing, a span's context may be injected via a carrier +// into a transport (HTTP, RPC, etc.) to be extracted on the other end and used to +// create spans that are direct descendants of it. A couple of carrier interfaces +// which should cover most of the use-case scenarios are readily provided, such as +// HTTPCarrier and TextMapCarrier. Users are free to create their own, which will work +// with our propagation algorithm as long as they implement the TextMapReader and TextMapWriter +// interfaces. An example alternate implementation is the MDCarrier in our gRPC integration. +// +// As an example, injecting a span's context into an HTTP request would look like this. +// (See the net/http contrib package for more examples https://pkg.go.dev/github.com/DataDog/dd-trace-go/contrib/net/http/v2): +// +// req, err := http.NewRequest("GET", "http://example.com", nil) +// // ... +// err := tracer.Inject(span.Context(), tracer.HTTPHeadersCarrier(req.Header)) +// // ... +// http.DefaultClient.Do(req) +// +// Then, on the server side, to continue the trace one would do: +// +// sctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(req.Header)) +// // ... +// span := tracer.StartSpan("child.span", tracer.ChildOf(sctx)) +// +// In the same manner, any means can be used as a carrier to inject a context into a transport. Go's +// context can also be used as a means to transport spans within the same process. The methods +// StartSpanFromContext, ContextWithSpan and SpanFromContext exist for this reason. +// +// Some libraries and frameworks are supported out-of-the-box by using one +// of our integrations. You can see a list of supported integrations here: +// https://pkg.go.dev/github.com/DataDog/dd-trace-go/v2/contrib +package tracer // import "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/dynamic_config.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/dynamic_config.go new file mode 100644 index 00000000..c2b67925 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/dynamic_config.go @@ -0,0 +1,119 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +// dynamicConfig is a thread-safe generic data structure to represent configuration fields. +// It's designed to satisfy the dynamic configuration semantics (i.e reset, update, apply configuration changes). +// This structure will be extended to track the origin of configuration values as well (e.g remote_config, env_var). +type dynamicConfig[T any] struct { + sync.RWMutex + current T // holds the current configuration value + startup T // holds the startup configuration value + cfgName string // holds the name of the configuration, has to be compatible with telemetry.Configuration.Name + cfgOrigin telemetry.Origin // holds the origin of the current configuration value (currently only supports remote_config, empty otherwise) + apply func(T) bool // executes any config-specific operations to propagate the update properly, returns whether the update was applied + equal func(x, y T) bool // compares two configuration values, this is used to avoid unnecessary config and telemetry updates +} + +func newDynamicConfig[T any](name string, val T, apply func(T) bool, equal func(x, y T) bool) dynamicConfig[T] { + return dynamicConfig[T]{ + cfgName: name, + current: val, + startup: val, + cfgOrigin: telemetry.OriginDefault, + apply: apply, + equal: equal, + } +} + +// get returns the current configuration value +func (dc *dynamicConfig[T]) get() T { + dc.RLock() + defer dc.RUnlock() + return dc.current +} + +// update applies a new configuration value +func (dc *dynamicConfig[T]) update(val T, origin telemetry.Origin) bool { + dc.Lock() + defer dc.Unlock() + if dc.equal(dc.current, val) { + return false + } + dc.current = val + dc.cfgOrigin = origin + return dc.apply(val) +} + +// reset re-applies the startup configuration value +func (dc *dynamicConfig[T]) reset() bool { + dc.Lock() + defer dc.Unlock() + if dc.equal(dc.current, dc.startup) { + return false + } + dc.current = dc.startup + // TODO: set the origin to the startup value's origin + dc.cfgOrigin = telemetry.OriginDefault + return dc.apply(dc.startup) +} + +// handleRC processes a new configuration value from remote config +// Returns whether the configuration value has been updated or not +func (dc *dynamicConfig[T]) handleRC(val *T) bool { + if val != nil { + return dc.update(*val, telemetry.OriginRemoteConfig) + } + return dc.reset() +} + +// toTelemetry returns the current configuration value as telemetry.Configuration +func (dc *dynamicConfig[T]) toTelemetry() telemetry.Configuration { + dc.RLock() + defer dc.RUnlock() + return telemetry.Configuration{ + Name: dc.cfgName, + Value: dc.current, + Origin: dc.cfgOrigin, + } +} + +func equal[T comparable](x, y T) bool { + return x == y +} + +// equalSlice compares two slices of comparable values +// The comparison takes into account the order of the elements +func equalSlice[T comparable](x, y []T) bool { + if len(x) != len(y) { + return false + } + for i, v := range x { + if v != y[i] { + return false + } + } + return true +} + +// equalMap compares two maps of comparable keys and values +func equalMap[T comparable](x, y map[T]interface{}) bool { + if len(x) != len(y) { + return false + } + for k, v := range x { + if yv, ok := y[k]; !ok || yv != v { + return false + } + } + return true +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/globaltracer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/globaltracer.go new file mode 100644 index 00000000..8e2d954b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/globaltracer.go @@ -0,0 +1,23 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import "github.com/DataDog/dd-trace-go/v2/ddtrace/internal" + +func init() { + var tracer Tracer = &NoopTracer{} + internal.SetGlobalTracer(tracer) +} + +// setGlobalTracer sets the global tracer to t. +func setGlobalTracer(t Tracer) { + internal.SetGlobalTracer(t) +} + +// getGlobalTracer returns the currently active tracer. +func getGlobalTracer() Tracer { + return internal.GetGlobalTracer[Tracer]() +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/llmobs.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/llmobs.go new file mode 100644 index 00000000..79205bcb --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/llmobs.go @@ -0,0 +1,74 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package tracer + +import ( + "context" + "strconv" + + "github.com/DataDog/dd-trace-go/v2/internal/llmobs" +) + +var ( + _ llmobs.Tracer = (*llmobsTracerAdapter)(nil) + _ llmobs.APMSpan = (*llmobsSpanAdapter)(nil) +) + +// llmobsTracerAdapter adapts the public ddtrace/tracer API to the internal/llmobs.Tracer interface. +// This allows the internal llmobs package to start APM spans without directly depending +// on the tracer package, avoiding circular dependencies. +type llmobsTracerAdapter struct{} + +func (l *llmobsTracerAdapter) StartSpan(ctx context.Context, name string, cfg llmobs.StartAPMSpanConfig) (llmobs.APMSpan, context.Context) { + opts := make([]StartSpanOption, 0) + if !cfg.StartTime.IsZero() { + opts = append(opts, StartTime(cfg.StartTime)) + } + if cfg.SpanType != "" { + opts = append(opts, SpanType(cfg.SpanType)) + } + span, ctx := StartSpanFromContext(ctx, name, opts...) + return &llmobsSpanAdapter{span}, ctx +} + +// llmobsSpanAdapter adapts a public ddtrace/tracer.Span to the internal/llmobs.APMSpan interface. +type llmobsSpanAdapter struct { + span *Span +} + +func (l *llmobsSpanAdapter) Finish(cfg llmobs.FinishAPMSpanConfig) { + opts := make([]FinishOption, 0) + if !cfg.FinishTime.IsZero() { + opts = append(opts, FinishTime(cfg.FinishTime)) + } + if cfg.Error != nil { + opts = append(opts, WithError(cfg.Error)) + } + l.span.Finish(opts...) +} + +func (l *llmobsSpanAdapter) AddLink(link llmobs.SpanLink) { + l.span.AddLink(SpanLink{ + TraceID: link.TraceID, + TraceIDHigh: link.TraceIDHigh, + SpanID: link.SpanID, + Attributes: link.Attributes, + Tracestate: link.Tracestate, + Flags: link.Flags, + }) +} + +func (l *llmobsSpanAdapter) SpanID() string { + return strconv.FormatUint(l.span.Context().SpanID(), 10) +} + +func (l *llmobsSpanAdapter) TraceID() string { + return l.span.Context().TraceID() +} + +func (l *llmobsSpanAdapter) SetBaggageItem(key string, value string) { + l.span.SetBaggageItem(key, value) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/log.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/log.go new file mode 100644 index 00000000..98d5d2f0 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/log.go @@ -0,0 +1,177 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "bytes" + "encoding/json" + "fmt" + "math" + "net/http" + "runtime" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/appsec" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/osinfo" + telemetrylog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" + "github.com/DataDog/dd-trace-go/v2/internal/version" +) + +// startupInfo contains various information about the status of the tracer on startup. +type startupInfo struct { + Date string `json:"date"` // ISO 8601 date and time of start + OSName string `json:"os_name"` // Windows, Darwin, Debian, etc. + OSVersion string `json:"os_version"` // Version of the OS + Version string `json:"version"` // Tracer version + Lang string `json:"lang"` // "Go" + LangVersion string `json:"lang_version"` // Go version, e.g. go1.13 + Env string `json:"env"` // Tracer env + Service string `json:"service"` // Tracer Service + AgentURL string `json:"agent_url"` // The address of the agent + AgentError string `json:"agent_error"` // Any error that occurred trying to connect to agent + Debug bool `json:"debug"` // Whether debug mode is enabled + AnalyticsEnabled bool `json:"analytics_enabled"` // True if there is a global analytics rate set + SampleRate string `json:"sample_rate"` // The default sampling rate for the rules sampler + SampleRateLimit string `json:"sample_rate_limit"` // The rate limit configured with the rules sampler + TraceSamplingRules []SamplingRule `json:"trace_sampling_rules"` // Trace rules used by the rules sampler + SpanSamplingRules []SamplingRule `json:"span_sampling_rules"` // Span rules used by the rules sampler + SamplingRulesError string `json:"sampling_rules_error"` // Any errors that occurred while parsing sampling rules + ServiceMappings map[string]string `json:"service_mappings"` // Service Mappings + Tags map[string]string `json:"tags"` // Global tags + RuntimeMetricsEnabled bool `json:"runtime_metrics_enabled"` // Whether runtime metrics are enabled + RuntimeMetricsV2Enabled bool `json:"runtime_metrics_v2_enabled"` // Whether runtime metrics v2 are enabled + ProfilerCodeHotspotsEnabled bool `json:"profiler_code_hotspots_enabled"` // Whether profiler code hotspots are enabled + ProfilerEndpointsEnabled bool `json:"profiler_endpoints_enabled"` // Whether profiler endpoints are enabled + ApplicationVersion string `json:"dd_version"` // Version of the user's application + Architecture string `json:"architecture"` // Architecture of host machine + GlobalService string `json:"global_service"` // Global service string. If not-nil should be same as Service. (#614) + LambdaMode string `json:"lambda_mode"` // Whether the client has enabled lambda mode + AppSec bool `json:"appsec"` // AppSec status: true when started, false otherwise. + AgentFeatures agentFeatures `json:"agent_features"` // Lists the capabilities of the agent. + Integrations map[string]integrationConfig `json:"integrations"` // Available tracer integrations + PartialFlushEnabled bool `json:"partial_flush_enabled"` // Whether Partial Flushing is enabled + PartialFlushMinSpans int `json:"partial_flush_min_spans"` // The min number of spans to trigger a partial flush + Orchestrion orchestrionConfig `json:"orchestrion"` // Orchestrion (auto-instrumentation) configuration. + FeatureFlags []string `json:"feature_flags"` + PropagationStyleInject string `json:"propagation_style_inject"` // Propagation style for inject + PropagationStyleExtract string `json:"propagation_style_extract"` // Propagation style for extract + TracingAsTransport bool `json:"tracing_as_transport"` // Whether the tracer is disabled and other products are using it as a transport + DogstatsdAddr string `json:"dogstatsd_address"` // Destination of statsd payloads + DataStreamsEnabled bool `json:"data_streams_enabled"` // Whether Data Streams is enabled +} + +// checkEndpoint tries to connect to the URL specified by endpoint. +// If the endpoint is not reachable, checkEndpoint returns an error +// explaining why. +func checkEndpoint(c *http.Client, endpoint string) error { + req, err := http.NewRequest("POST", endpoint, bytes.NewReader([]byte{0x90})) + if err != nil { + return fmt.Errorf("cannot create http request: %s", err.Error()) + } + req.Header.Set(traceCountHeader, "0") + req.Header.Set("Content-Type", "application/msgpack") + res, err := c.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + return nil +} + +// logStartup generates a startupInfo for a tracer and writes it to the log in +// JSON format. +func logStartup(t *tracer) { + tags := make(map[string]string) + for k, v := range t.config.globalTags.get() { + tags[k] = fmt.Sprintf("%v", v) + } + + featureFlags := make([]string, 0, len(t.config.featureFlags)) + for f := range t.config.featureFlags { + featureFlags = append(featureFlags, f) + } + + var injectorNames, extractorNames string + switch v := t.config.propagator.(type) { + case *chainedPropagator: + injectorNames = v.injectorNames + extractorNames = v.extractorsNames + case nil: + injectorNames = "" + extractorNames = "" + default: + injectorNames = "custom" + extractorNames = "custom" + } + // Determine the agent URL to use in the logs + var agentURL string + if t.config.originalAgentURL != nil && t.config.originalAgentURL.Scheme == "unix" { + agentURL = t.config.originalAgentURL.String() + } else { + agentURL = t.config.transport.endpoint() + } + info := startupInfo{ + Date: time.Now().Format(time.RFC3339), + OSName: osinfo.OSName(), + OSVersion: osinfo.OSVersion(), + Version: version.Tag, + Lang: "Go", + LangVersion: runtime.Version(), + Env: t.config.env, + Service: t.config.serviceName, + AgentURL: agentURL, + Debug: t.config.debug, + AnalyticsEnabled: !math.IsNaN(globalconfig.AnalyticsRate()), + SampleRate: fmt.Sprintf("%f", t.rulesSampling.traces.globalRate), + SampleRateLimit: "disabled", + TraceSamplingRules: t.config.traceRules, + SpanSamplingRules: t.config.spanRules, + ServiceMappings: t.config.serviceMappings, + Tags: tags, + RuntimeMetricsEnabled: t.config.runtimeMetrics, + RuntimeMetricsV2Enabled: t.config.runtimeMetricsV2, + ApplicationVersion: t.config.version, + ProfilerCodeHotspotsEnabled: t.config.profilerHotspots, + ProfilerEndpointsEnabled: t.config.profilerEndpoints, + Architecture: runtime.GOARCH, + GlobalService: globalconfig.ServiceName(), + LambdaMode: fmt.Sprintf("%t", t.config.logToStdout), + AgentFeatures: t.config.agent, + Integrations: t.config.integrations, + AppSec: appsec.Enabled(), + PartialFlushEnabled: t.config.partialFlushEnabled, + PartialFlushMinSpans: t.config.partialFlushMinSpans, + Orchestrion: t.config.orchestrionCfg, + FeatureFlags: featureFlags, + PropagationStyleInject: injectorNames, + PropagationStyleExtract: extractorNames, + TracingAsTransport: t.config.tracingAsTransport, + DogstatsdAddr: t.config.dogstatsdAddr, + DataStreamsEnabled: t.config.dataStreamsMonitoringEnabled, + } + if _, _, err := samplingRulesFromEnv(); err != nil { + info.SamplingRulesError = fmt.Sprintf("%s", err.Error()) + } + if limit, ok := t.rulesSampling.TraceRateLimit(); ok { + info.SampleRateLimit = fmt.Sprintf("%v", limit) + } + if !t.config.logToStdout { + if err := checkEndpoint(t.config.httpClient, t.config.transport.endpoint()); err != nil { + info.AgentError = fmt.Sprintf("%s", err.Error()) + log.Warn("DIAGNOSTICS Unable to reach agent intake: %s", err.Error()) + } + } + bs, err := json.Marshal(info) + if err != nil { + //nolint:gocritic // Diagnostic logging needs full struct representation + log.Warn("DIAGNOSTICS Failed to serialize json for startup log (%v) %#v\n", err, info) + return + } + log.Info("DATADOG TRACER CONFIGURATION %s\n", string(bs)) + telemetrylog.Debug("DATADOG TRACER CONFIGURATION %s\n", string(bs)) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/logger.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/logger.go new file mode 100644 index 00000000..6663fe62 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/logger.go @@ -0,0 +1,42 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import "github.com/DataDog/dd-trace-go/v2/internal/log" + +// Logger implementations are able to log given messages that the tracer or profiler might output. +type Logger interface { + // Log prints the given message. + Log(msg string) +} + +// UseLogger sets l as the logger for all tracer and profiler logs. +func UseLogger(l Logger) { + log.UseLogger(l) +} + +// LogLevel represents the logging level that the log package prints at. +type LogLevel = log.Level + +type loggerAdapter struct { + fn func(lvl LogLevel, msg string, a ...any) +} + +func (l loggerAdapter) Log(msg string) { + l.LogL(log.DefaultLevel(), msg) +} + +func (l loggerAdapter) LogL(lvl LogLevel, msg string) { + l.fn(lvl, msg) +} + +// AdaptLogger adapts a function to the Logger interface to adapt any logger +// to the Logger interface. +func AdaptLogger(fn func(lvl LogLevel, msg string, a ...any)) Logger { + return loggerAdapter{ + fn: fn, + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/meta_struct.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/meta_struct.go new file mode 100644 index 00000000..2f564da0 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/meta_struct.go @@ -0,0 +1,88 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "github.com/tinylib/msgp/msgp" +) + +var ( + _ msgp.Encodable = (*metaStructMap)(nil) + _ msgp.Decodable = (*metaStructMap)(nil) + _ msgp.Sizer = (*metaStructMap)(nil) +) + +// metaStructMap is a map of string to any of metadata embedded in each span +// We export special messagepack methods to handle the encoding and decoding of the map +// Because the agent expects the metadata to be a map of string to byte array, we have to create sub-messages of messagepack for each value +type metaStructMap map[string]any + +// EncodeMsg transforms the map[string]any into a map[string][]byte agent-side (which is parsed back into a map[string]any in the backend) +func (m *metaStructMap) EncodeMsg(en *msgp.Writer) error { + err := en.WriteMapHeader(uint32(len(*m))) + if err != nil { + return msgp.WrapError(err, "MetaStruct") + } + + for key, value := range *m { + err = en.WriteString(key) + if err != nil { + return msgp.WrapError(err, "MetaStruct") + } + + // Wrap the encoded value in a byte array that will not be parsed by the agent + msg, err := msgp.AppendIntf(nil, value) + if err != nil { + return msgp.WrapError(err, "MetaStruct", key) + } + + err = en.WriteBytes(msg) + if err != nil { + return msgp.WrapError(err, "MetaStruct", key) + } + } + + return nil +} + +// DecodeMsg transforms the map[string][]byte agent-side into a map[string]any where values are sub-messages in messagepack +func (m *metaStructMap) DecodeMsg(de *msgp.Reader) error { + header, err := de.ReadMapHeader() + if err != nil { + return msgp.WrapError(err, "MetaStruct") + } + + *m = make(metaStructMap, header) + for i := uint32(0); i < header; i++ { + var key string + key, err = de.ReadString() + if err != nil { + return msgp.WrapError(err, "MetaStruct") + } + + subMsg, err := de.ReadBytes(nil) + if err != nil { + return msgp.WrapError(err, "MetaStruct", key) + } + + (*m)[key], _, err = msgp.ReadIntfBytes(subMsg) + if err != nil { + return msgp.WrapError(err, "MetaStruct", key) + } + } + + return nil +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (m *metaStructMap) Msgsize() int { + size := msgp.MapHeaderSize + for key, value := range *m { + size += msgp.StringPrefixSize + len(key) + size += msgp.BytesPrefixSize + msgp.GuessSize(value) + } + return size +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/metrics.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/metrics.go new file mode 100644 index 00000000..f6cf9f3b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/metrics.go @@ -0,0 +1,118 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "runtime" + "runtime/debug" + "time" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/internal/tracerstats" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// defaultMetricsReportInterval specifies the interval at which runtime metrics will +// be reported. +const defaultMetricsReportInterval = 10 * time.Second + +// reportRuntimeMetrics periodically reports go runtime metrics at +// the given interval. +func (t *tracer) reportRuntimeMetrics(interval time.Duration) { + var ms runtime.MemStats + gc := debug.GCStats{ + // When len(stats.PauseQuantiles) is 5, it will be filled with the + // minimum, 25%, 50%, 75%, and maximum pause times. See the documentation + // for (runtime/debug).ReadGCStats. + PauseQuantiles: make([]time.Duration, 5), + } + + tick := time.NewTicker(interval) + defer tick.Stop() + for { + select { + case <-tick.C: + log.Debug("Reporting runtime metrics...") + runtime.ReadMemStats(&ms) + debug.ReadGCStats(&gc) + + statsd := t.statsd + // CPU statistics + statsd.Gauge("runtime.go.num_cpu", float64(runtime.NumCPU()), nil, 1) + statsd.Gauge("runtime.go.num_goroutine", float64(runtime.NumGoroutine()), nil, 1) + statsd.Gauge("runtime.go.num_cgo_call", float64(runtime.NumCgoCall()), nil, 1) + // General statistics + statsd.Gauge("runtime.go.mem_stats.alloc", float64(ms.Alloc), nil, 1) + statsd.Gauge("runtime.go.mem_stats.total_alloc", float64(ms.TotalAlloc), nil, 1) + statsd.Gauge("runtime.go.mem_stats.sys", float64(ms.Sys), nil, 1) + statsd.Gauge("runtime.go.mem_stats.lookups", float64(ms.Lookups), nil, 1) + statsd.Gauge("runtime.go.mem_stats.mallocs", float64(ms.Mallocs), nil, 1) + statsd.Gauge("runtime.go.mem_stats.frees", float64(ms.Frees), nil, 1) + // Heap memory statistics + statsd.Gauge("runtime.go.mem_stats.heap_alloc", float64(ms.HeapAlloc), nil, 1) + statsd.Gauge("runtime.go.mem_stats.heap_sys", float64(ms.HeapSys), nil, 1) + statsd.Gauge("runtime.go.mem_stats.heap_idle", float64(ms.HeapIdle), nil, 1) + statsd.Gauge("runtime.go.mem_stats.heap_inuse", float64(ms.HeapInuse), nil, 1) + statsd.Gauge("runtime.go.mem_stats.heap_released", float64(ms.HeapReleased), nil, 1) + statsd.Gauge("runtime.go.mem_stats.heap_objects", float64(ms.HeapObjects), nil, 1) + // Stack memory statistics + statsd.Gauge("runtime.go.mem_stats.stack_inuse", float64(ms.StackInuse), nil, 1) + statsd.Gauge("runtime.go.mem_stats.stack_sys", float64(ms.StackSys), nil, 1) + // Off-heap memory statistics + statsd.Gauge("runtime.go.mem_stats.m_span_inuse", float64(ms.MSpanInuse), nil, 1) + statsd.Gauge("runtime.go.mem_stats.m_span_sys", float64(ms.MSpanSys), nil, 1) + statsd.Gauge("runtime.go.mem_stats.m_cache_inuse", float64(ms.MCacheInuse), nil, 1) + statsd.Gauge("runtime.go.mem_stats.m_cache_sys", float64(ms.MCacheSys), nil, 1) + statsd.Gauge("runtime.go.mem_stats.buck_hash_sys", float64(ms.BuckHashSys), nil, 1) + statsd.Gauge("runtime.go.mem_stats.gc_sys", float64(ms.GCSys), nil, 1) + statsd.Gauge("runtime.go.mem_stats.other_sys", float64(ms.OtherSys), nil, 1) + // Garbage collector statistics + statsd.Gauge("runtime.go.mem_stats.next_gc", float64(ms.NextGC), nil, 1) + statsd.Gauge("runtime.go.mem_stats.last_gc", float64(ms.LastGC), nil, 1) + statsd.Gauge("runtime.go.mem_stats.pause_total_ns", float64(ms.PauseTotalNs), nil, 1) + statsd.Gauge("runtime.go.mem_stats.num_gc", float64(ms.NumGC), nil, 1) + statsd.Gauge("runtime.go.mem_stats.num_forced_gc", float64(ms.NumForcedGC), nil, 1) + statsd.Gauge("runtime.go.mem_stats.gc_cpu_fraction", ms.GCCPUFraction, nil, 1) + for i, p := range []string{"min", "25p", "50p", "75p", "max"} { + statsd.Gauge("runtime.go.gc_stats.pause_quantiles."+p, float64(gc.PauseQuantiles[i]), nil, 1) + } + + case <-t.stop: + return + } + } +} + +// reportHealthMetricsAtInterval reports noisy health metrics at the specified interval. +// The periodic reporting ensures metrics are delivered without overwhelming the system or logs. +func (t *tracer) reportHealthMetricsAtInterval(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + // if there are started spans, report the number of spans with their integration, then + // reset the count + // the Count() function reports the total number of event occurrences in one time interval. We reset + // our count to 0 regardless of if Count succeeded to cleanup before the next interval. + + for k, v := range t.spansStarted.GetAndReset() { + t.statsd.Count("datadog.tracer.spans_started", v, []string{"integration:" + k}, 1) + } + + // if there are finished spans, report the number of spans with their integration, then + // reset the count + // the Count() function reports the total number of event occurrences in one time interval. We reset + // our count to 0 regardless of if Count succeeded to cleanup before the next interval. + for k, v := range t.spansFinished.GetAndReset() { + t.statsd.Count("datadog.tracer.spans_finished", v, []string{"integration:" + k}, 1) + } + + t.statsd.Count("datadog.tracer.traces_dropped", int64(tracerstats.Count(tracerstats.TracesDropped)), []string{"reason:trace_too_large"}, 1) + case <-t.stop: + return + } + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/noop.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/noop.go new file mode 100644 index 00000000..6401bc51 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/noop.go @@ -0,0 +1,39 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import "github.com/DataDog/dd-trace-go/v2/internal/log" + +var _ Tracer = (*NoopTracer)(nil) + +// NoopTracer is an implementation of Tracer that is a no-op. +type NoopTracer struct{} + +// StartSpan implements Tracer. +func (NoopTracer) StartSpan(_ string, _ ...StartSpanOption) *Span { + log.Debug("Tracer must be started before starting a span; Review the docs for more information: https://docs.datadoghq.com/tracing/trace_collection/library_config/go/") + return nil +} + +// SetServiceInfo implements Tracer. +func (NoopTracer) SetServiceInfo(_, _, _ string) {} + +// Extract implements Tracer. +func (NoopTracer) Extract(_ interface{}) (*SpanContext, error) { + return nil, nil +} + +// Inject implements Tracer. +func (NoopTracer) Inject(_ *SpanContext, _ interface{}) error { return nil } + +// Stop implements Tracer. +func (NoopTracer) Stop() {} + +func (NoopTracer) TracerConf() TracerConf { + return TracerConf{} +} + +func (NoopTracer) Flush() {} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/option.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/option.go new file mode 100644 index 00000000..2413a6a6 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/option.go @@ -0,0 +1,1727 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/mod/semver" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/tinylib/msgp/msgp" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/internal" + appsecconfig "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + llmobsconfig "github.com/DataDog/dd-trace-go/v2/internal/llmobs/config" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/namingschema" + "github.com/DataDog/dd-trace-go/v2/internal/normalizer" + "github.com/DataDog/dd-trace-go/v2/internal/orchestrion" + "github.com/DataDog/dd-trace-go/v2/internal/stableconfig" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + "github.com/DataDog/dd-trace-go/v2/internal/traceprof" + "github.com/DataDog/dd-trace-go/v2/internal/version" + + "github.com/DataDog/datadog-go/v5/statsd" +) + +const ( + envLLMObsEnabled = "DD_LLMOBS_ENABLED" + envLLMObsMlApp = "DD_LLMOBS_ML_APP" + envLLMObsAgentlessEnabled = "DD_LLMOBS_AGENTLESS_ENABLED" + envLLMObsProjectName = "DD_LLMOBS_PROJECT_NAME" +) + +var contribIntegrations = map[string]struct { + name string // user readable name for startup logs + imported bool // true if the user has imported the integration +}{ + "github.com/99designs/gqlgen": {"gqlgen", false}, + "github.com/aws/aws-sdk-go": {"AWS SDK", false}, + "github.com/aws/aws-sdk-go-v2": {"AWS SDK v2", false}, + "github.com/bradfitz/gomemcache": {"Memcache", false}, + "cloud.google.com/go/pubsub.v1": {"Pub/Sub", false}, + "cloud.google.com/go/pubsub/v2": {"Pub/Sub v2", false}, + "github.com/confluentinc/confluent-kafka-go": {"Kafka (confluent)", false}, + "github.com/confluentinc/confluent-kafka-go/v2": {"Kafka (confluent) v2", false}, + "database/sql": {"SQL", false}, + "github.com/dimfeld/httptreemux/v5": {"HTTP Treemux", false}, + "github.com/elastic/go-elasticsearch/v6": {"Elasticsearch v6", false}, + "github.com/emicklei/go-restful/v3": {"go-restful v3", false}, + "github.com/gin-gonic/gin": {"Gin", false}, + "github.com/globalsign/mgo": {"MongoDB (mgo)", false}, + "github.com/go-chi/chi": {"chi", false}, + "github.com/go-chi/chi/v5": {"chi v5", false}, + "github.com/go-pg/pg/v10": {"go-pg v10", false}, + "github.com/go-redis/redis": {"Redis", false}, + "github.com/go-redis/redis/v7": {"Redis v7", false}, + "github.com/go-redis/redis/v8": {"Redis v8", false}, + "go.mongodb.org/mongo-driver": {"MongoDB", false}, + "github.com/gocql/gocql": {"Cassandra", false}, + "github.com/gofiber/fiber/v2": {"Fiber", false}, + "github.com/gomodule/redigo": {"Redigo", false}, + "google.golang.org/api": {"Google API", false}, + "google.golang.org/grpc": {"gRPC", false}, + "github.com/gorilla/mux": {"Gorilla Mux", false}, + "gorm.io/gorm.v1": {"Gorm v1", false}, + "github.com/graph-gophers/graphql-go": {"Graph Gophers GraphQL", false}, + "github.com/graphql-go/graphql": {"GraphQL-Go GraphQL", false}, + "github.com/hashicorp/consul/api": {"Consul", false}, + "github.com/hashicorp/vault/api": {"Vault", false}, + "github.com/jackc/pgx/v5": {"PGX", false}, + "github.com/jmoiron/sqlx": {"SQLx", false}, + "github.com/julienschmidt/httprouter": {"HTTP Router", false}, + "k8s.io/client-go/kubernetes": {"Kubernetes", false}, + "github.com/labstack/echo/v4": {"echo v4", false}, + "log/slog": {"log/slog", false}, + "github.com/miekg/dns": {"miekg/dns", false}, + "net/http": {"HTTP", false}, + "gopkg.in/olivere/elastic.v5": {"Elasticsearch v5", false}, + "github.com/redis/go-redis/v9": {"Redis v9", false}, + "github.com/redis/rueidis": {"Rueidis", false}, + "github.com/segmentio/kafka-go": {"Kafka v0", false}, + "github.com/IBM/sarama": {"IBM sarama", false}, + "github.com/Shopify/sarama": {"Shopify sarama", false}, + "github.com/sirupsen/logrus": {"Logrus", false}, + "github.com/syndtr/goleveldb": {"LevelDB", false}, + "github.com/tidwall/buntdb": {"BuntDB", false}, + "github.com/twitchtv/twirp": {"Twirp", false}, + "github.com/uptrace/bun": {"Bun", false}, + "github.com/urfave/negroni": {"Negroni", false}, + "github.com/valyala/fasthttp": {"FastHTTP", false}, + "github.com/valkey-io/valkey-go": {"Valkey", false}, +} + +var ( + // defaultSocketDSD specifies the socket path to use for connecting to the statsd server. + // Replaced in tests + defaultSocketDSD = "/var/run/datadog/dsd.socket" + + // defaultStatsdPort specifies the default port to use for connecting to the statsd server. + defaultStatsdPort = "8125" + + // defaultMaxTagsHeaderLen specifies the default maximum length of the X-Datadog-Tags header value. + defaultMaxTagsHeaderLen = 512 + + // defaultRateLimit specifies the default trace rate limit used when DD_TRACE_RATE_LIMIT is not set. + defaultRateLimit = 100.0 +) + +// Supported trace protocols. +const ( + traceProtocolV04 = 0.4 // v0.4 (default) + traceProtocolV1 = 1.0 // v1.0 +) + +// config holds the tracer configuration. +type config struct { + // debug, when true, writes details to logs. + debug bool + + // appsecStartOptions controls the options used when starting appsec features. + appsecStartOptions []appsecconfig.StartOption + + // agent holds the capabilities of the agent and determines some + // of the behaviour of the tracer. + agent agentFeatures + + // integrations reports if the user has instrumented a Datadog integration and + // if they have a version of the library available to integrate. + integrations map[string]integrationConfig + + // featureFlags specifies any enabled feature flags. + featureFlags map[string]struct{} + + // logToStdout reports whether we should log all traces to the standard + // output instead of using the agent. This is used in Lambda environments. + logToStdout bool + + // sendRetries is the number of times a trace or CI Visibility payload send is retried upon + // failure. + sendRetries int + + // retryInterval is the interval between agent connection retries. It has no effect if sendRetries is not set + retryInterval time.Duration + + // logStartup, when true, causes various startup info to be written + // when the tracer starts. + logStartup bool + + // serviceName specifies the name of this application. + serviceName string + + // universalVersion, reports whether span service name and config service name + // should match to set application version tag. False by default + universalVersion bool + + // version specifies the version of this application + version string + + // env contains the environment that this application will run under. + env string + + // sampler specifies the sampler that will be used for sampling traces. + sampler RateSampler + + // agentURL is the agent URL that receives traces from the tracer. + agentURL *url.URL + + // originalAgentURL is the agent URL that receives traces from the tracer and does not get changed. + originalAgentURL *url.URL + + // serviceMappings holds a set of service mappings to dynamically rename services + serviceMappings map[string]string + + // globalTags holds a set of tags that will be automatically applied to + // all spans. + globalTags dynamicConfig[map[string]interface{}] + + // transport specifies the Transport interface which will be used to send data to the agent. + transport transport + + // httpClientTimeout specifies the timeout for the HTTP client. + httpClientTimeout time.Duration + + // propagator propagates span context cross-process + propagator Propagator + + // httpClient specifies the HTTP client to be used by the agent's transport. + httpClient *http.Client + + // hostname is automatically assigned when the DD_TRACE_REPORT_HOSTNAME is set to true, + // and is added as a special tag to the root span of traces. + hostname string + + // logger specifies the logger to use when printing errors. If not specified, the "log" package + // will be used. + logger Logger + + // runtimeMetrics specifies whether collection of runtime metrics is enabled. + runtimeMetrics bool + + // runtimeMetricsV2 specifies whether collection of runtime metrics v2 is enabled. + runtimeMetricsV2 bool + + // dogstatsdAddr specifies the address to connect for sending metrics to the + // Datadog Agent. If not set, it defaults to "localhost:8125" or to the + // combination of the environment variables DD_AGENT_HOST and DD_DOGSTATSD_PORT. + dogstatsdAddr string + + // statsdClient is set when a user provides a custom statsd client for tracking metrics + // associated with the runtime and the tracer. + statsdClient internal.StatsdClient + + // spanRules contains user-defined rules to determine the sampling rate to apply + // to a single span without affecting the entire trace + spanRules []SamplingRule + + // traceRules contains user-defined rules to determine the sampling rate to apply + // to the entire trace if any spans satisfy the criteria + traceRules []SamplingRule + + // tickChan specifies a channel which will receive the time every time the tracer must flush. + // It defaults to time.Ticker; replaced in tests. + tickChan <-chan time.Time + + // noDebugStack disables the collection of debug stack traces globally. No traces reporting + // errors will record a stack trace when this option is set. + noDebugStack bool + + // profilerHotspots specifies whether profiler Code Hotspots is enabled. + profilerHotspots bool + + // profilerEndpoints specifies whether profiler endpoint filtering is enabled. + profilerEndpoints bool + + // enabled reports whether tracing is enabled. + enabled dynamicConfig[bool] + + // enableHostnameDetection specifies whether the tracer should enable hostname detection. + enableHostnameDetection bool + + // spanAttributeSchemaVersion holds the selected DD_TRACE_SPAN_ATTRIBUTE_SCHEMA version. + spanAttributeSchemaVersion int + + // peerServiceDefaultsEnabled indicates whether the peer.service tag calculation is enabled or not. + peerServiceDefaultsEnabled bool + + // peerServiceMappings holds a set of service mappings to dynamically rename peer.service values. + peerServiceMappings map[string]string + + // debugAbandonedSpans controls if the tracer should log when old, open spans are found + debugAbandonedSpans bool + + // spanTimeout represents how old a span can be before it should be logged as a possible + // misconfiguration + spanTimeout time.Duration + + // partialFlushMinSpans is the number of finished spans in a single trace to trigger a + // partial flush, or 0 if partial flushing is disabled. + // Value from DD_TRACE_PARTIAL_FLUSH_MIN_SPANS, default 1000. + partialFlushMinSpans int + + // partialFlushEnabled specifices whether the tracer should enable partial flushing. Value + // from DD_TRACE_PARTIAL_FLUSH_ENABLED, default false. + partialFlushEnabled bool + + // statsComputationEnabled enables client-side stats computation (aka trace metrics). + statsComputationEnabled bool + + // dataStreamsMonitoringEnabled specifies whether the tracer should enable monitoring of data streams + dataStreamsMonitoringEnabled bool + + // orchestrionCfg holds Orchestrion (aka auto-instrumentation) configuration. + // Only used for telemetry currently. + orchestrionCfg orchestrionConfig + + // traceSampleRate holds the trace sample rate. + traceSampleRate dynamicConfig[float64] + + // traceSampleRules holds the trace sampling rules + traceSampleRules dynamicConfig[[]SamplingRule] + + // headerAsTags holds the header as tags configuration. + headerAsTags dynamicConfig[[]string] + + // dynamicInstrumentationEnabled controls if the target application can be modified by Dynamic Instrumentation or not. + // Value from DD_DYNAMIC_INSTRUMENTATION_ENABLED, default false. + dynamicInstrumentationEnabled bool + + // globalSampleRate holds sample rate read from environment variables. + globalSampleRate float64 + + // ciVisibilityEnabled controls if the tracer is loaded with CI Visibility mode. default false + ciVisibilityEnabled bool + + // ciVisibilityAgentless controls if the tracer is loaded with CI Visibility agentless mode. default false + ciVisibilityAgentless bool + + // logDirectory is directory for tracer logs specified by user-setting DD_TRACE_LOG_DIRECTORY. default empty/unused + logDirectory string + + // tracingAsTransport specifies whether the tracer is running in transport-only mode, where traces are only sent when other products request it. + tracingAsTransport bool + + // traceRateLimitPerSecond specifies the rate limit for traces. + traceRateLimitPerSecond float64 + + // traceProtocol specifies the trace protocol to use. + traceProtocol float64 + + // llmobs contains the LLM Observability config + llmobs llmobsconfig.Config +} + +// orchestrionConfig contains Orchestrion configuration. +type ( + orchestrionConfig struct { + // Enabled indicates whether this tracer was instanciated via Orchestrion. + Enabled bool `json:"enabled"` + + // Metadata holds Orchestrion specific metadata (e.g orchestrion version, mode (toolexec or manual) etc..) + Metadata *orchestrionMetadata `json:"metadata,omitempty"` + } + orchestrionMetadata struct { + // Version is the version of the orchestrion tool that was used to instrument the application. + Version string `json:"version,omitempty"` + } +) + +// HasFeature reports whether feature f is enabled. +func (c *config) HasFeature(f string) bool { + _, ok := c.featureFlags[strings.TrimSpace(f)] + return ok +} + +// StartOption represents a function that can be provided as a parameter to Start. +type StartOption func(*config) + +// maxPropagatedTagsLength limits the size of DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH to prevent HTTP 413 responses. +const maxPropagatedTagsLength = 512 + +// partialFlushMinSpansDefault is the default number of spans for partial flushing, if enabled. +const partialFlushMinSpansDefault = 1000 + +// newConfig renders the tracer configuration based on defaults, environment variables +// and passed user opts. +func newConfig(opts ...StartOption) (*config, error) { + c := new(config) + + // If this was built with a recent-enough version of Orchestrion, force the orchestrion config to + // the baked-in values. We do this early so that opts can be used to override the baked-in values, + // which is necessary for some tests to work properly. + c.orchestrionCfg.Enabled = orchestrion.Enabled() + if orchestrion.Version != "" { + c.orchestrionCfg.Metadata = &orchestrionMetadata{Version: orchestrion.Version} + } + + c.sampler = NewAllSampler() + sampleRate := math.NaN() + if r := getDDorOtelConfig("sampleRate"); r != "" { + var err error + sampleRate, err = strconv.ParseFloat(r, 64) + if err != nil { + log.Warn("ignoring DD_TRACE_SAMPLE_RATE, error: %s", err.Error()) + sampleRate = math.NaN() + } else if sampleRate < 0.0 || sampleRate > 1.0 { + log.Warn("ignoring DD_TRACE_SAMPLE_RATE: out of range %f", sampleRate) + sampleRate = math.NaN() + } + } + c.globalSampleRate = sampleRate + c.httpClientTimeout = time.Second * 10 // 10 seconds + + c.traceRateLimitPerSecond = defaultRateLimit + origin := telemetry.OriginDefault + if v, ok := env.Lookup("DD_TRACE_RATE_LIMIT"); ok { + l, err := strconv.ParseFloat(v, 64) + if err != nil { + log.Warn("DD_TRACE_RATE_LIMIT invalid, using default value %f: %v", defaultRateLimit, err.Error()) + } else if l < 0.0 { + log.Warn("DD_TRACE_RATE_LIMIT negative, using default value %f", defaultRateLimit) + } else { + c.traceRateLimitPerSecond = l + origin = telemetry.OriginEnvVar + } + } + + reportTelemetryOnAppStarted(telemetry.Configuration{Name: "trace_rate_limit", Value: c.traceRateLimitPerSecond, Origin: origin}) + + // Set the trace protocol to use. + c.traceProtocol = internal.FloatEnv("DD_TRACE_AGENT_PROTOCOL_VERSION", traceProtocolV04) + + if v := env.Get("OTEL_LOGS_EXPORTER"); v != "" { + log.Warn("OTEL_LOGS_EXPORTER is not supported") + } + if internal.BoolEnv("DD_TRACE_ANALYTICS_ENABLED", false) { + globalconfig.SetAnalyticsRate(1.0) + } + if env.Get("DD_TRACE_REPORT_HOSTNAME") == "true" { + var err error + c.hostname, err = os.Hostname() + if err != nil { + log.Warn("unable to look up hostname: %s", err.Error()) + return c, fmt.Errorf("unable to look up hostnamet: %s", err.Error()) + } + } + if v := env.Get("DD_TRACE_SOURCE_HOSTNAME"); v != "" { + c.hostname = v + } + if v := env.Get("DD_ENV"); v != "" { + c.env = v + } + if v := env.Get("DD_TRACE_FEATURES"); v != "" { + WithFeatureFlags(strings.FieldsFunc(v, func(r rune) bool { + return r == ',' || r == ' ' + })...)(c) + } + if v := getDDorOtelConfig("service"); v != "" { + c.serviceName = v + globalconfig.SetServiceName(v) + } + if ver := env.Get("DD_VERSION"); ver != "" { + c.version = ver + } + if v := env.Get("DD_SERVICE_MAPPING"); v != "" { + internal.ForEachStringTag(v, internal.DDTagsDelimiter, func(key, val string) { WithServiceMapping(key, val)(c) }) + } + c.headerAsTags = newDynamicConfig("trace_header_tags", nil, setHeaderTags, equalSlice[string]) + if v := env.Get("DD_TRACE_HEADER_TAGS"); v != "" { + c.headerAsTags.update(strings.Split(v, ","), telemetry.OriginEnvVar) + // Required to ensure that the startup header tags are set on reset. + c.headerAsTags.startup = c.headerAsTags.current + } + if v := getDDorOtelConfig("resourceAttributes"); v != "" { + tags := internal.ParseTagString(v) + internal.CleanGitMetadataTags(tags) + for key, val := range tags { + WithGlobalTag(key, val)(c) + } + // TODO: should we track the origin of these tags individually? + c.globalTags.cfgOrigin = telemetry.OriginEnvVar + } + if _, ok := env.Lookup("AWS_LAMBDA_FUNCTION_NAME"); ok { + // AWS_LAMBDA_FUNCTION_NAME being set indicates that we're running in an AWS Lambda environment. + // See: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html + c.logToStdout = true + } + c.logStartup = internal.BoolEnv("DD_TRACE_STARTUP_LOGS", true) + c.runtimeMetrics = internal.BoolVal(getDDorOtelConfig("metrics"), false) + c.runtimeMetricsV2 = internal.BoolEnv("DD_RUNTIME_METRICS_V2_ENABLED", true) + c.debug = internal.BoolVal(getDDorOtelConfig("debugMode"), false) + c.logDirectory = env.Get("DD_TRACE_LOG_DIRECTORY") + c.enabled = newDynamicConfig("tracing_enabled", internal.BoolVal(getDDorOtelConfig("enabled"), true), func(_ bool) bool { return true }, equal[bool]) + if _, ok := env.Lookup("DD_TRACE_ENABLED"); ok { + c.enabled.cfgOrigin = telemetry.OriginEnvVar + } + c.profilerEndpoints = internal.BoolEnv(traceprof.EndpointEnvVar, true) + c.profilerHotspots = internal.BoolEnv(traceprof.CodeHotspotsEnvVar, true) + if compatMode := env.Get("DD_TRACE_CLIENT_HOSTNAME_COMPAT"); compatMode != "" { + if semver.IsValid(compatMode) { + c.enableHostnameDetection = semver.Compare(semver.MajorMinor(compatMode), "v1.66") <= 0 + } else { + log.Warn("ignoring DD_TRACE_CLIENT_HOSTNAME_COMPAT, invalid version %q", compatMode) + } + } + c.debugAbandonedSpans = internal.BoolEnv("DD_TRACE_DEBUG_ABANDONED_SPANS", false) + if c.debugAbandonedSpans { + c.spanTimeout = internal.DurationEnv("DD_TRACE_ABANDONED_SPAN_TIMEOUT", 10*time.Minute) + } + c.statsComputationEnabled = internal.BoolEnv("DD_TRACE_STATS_COMPUTATION_ENABLED", true) + c.dataStreamsMonitoringEnabled, _, _ = stableconfig.Bool("DD_DATA_STREAMS_ENABLED", false) + c.partialFlushEnabled = internal.BoolEnv("DD_TRACE_PARTIAL_FLUSH_ENABLED", false) + c.partialFlushMinSpans = internal.IntEnv("DD_TRACE_PARTIAL_FLUSH_MIN_SPANS", partialFlushMinSpansDefault) + if c.partialFlushMinSpans <= 0 { + log.Warn("DD_TRACE_PARTIAL_FLUSH_MIN_SPANS=%d is not a valid value, setting to default %d", c.partialFlushMinSpans, partialFlushMinSpansDefault) + c.partialFlushMinSpans = partialFlushMinSpansDefault + } else if c.partialFlushMinSpans >= traceMaxSize { + log.Warn("DD_TRACE_PARTIAL_FLUSH_MIN_SPANS=%d is above the max number of spans that can be kept in memory for a single trace (%d spans), so partial flushing will never trigger, setting to default %d", c.partialFlushMinSpans, traceMaxSize, partialFlushMinSpansDefault) + c.partialFlushMinSpans = partialFlushMinSpansDefault + } + // TODO(partialFlush): consider logging a warning if DD_TRACE_PARTIAL_FLUSH_MIN_SPANS + // is set, but DD_TRACE_PARTIAL_FLUSH_ENABLED is not true. Or just assume it should be enabled + // if it's explicitly set, and don't require both variables to be configured. + + c.dynamicInstrumentationEnabled, _, _ = stableconfig.Bool("DD_DYNAMIC_INSTRUMENTATION_ENABLED", false) + + namingschema.LoadFromEnv() + c.spanAttributeSchemaVersion = int(namingschema.GetVersion()) + + // peer.service tag default calculation is enabled by default if using attribute schema >= 1 + c.peerServiceDefaultsEnabled = true + if c.spanAttributeSchemaVersion == int(namingschema.SchemaV0) { + c.peerServiceDefaultsEnabled = internal.BoolEnv("DD_TRACE_PEER_SERVICE_DEFAULTS_ENABLED", false) + } + c.peerServiceMappings = make(map[string]string) + if v := env.Get("DD_TRACE_PEER_SERVICE_MAPPING"); v != "" { + internal.ForEachStringTag(v, internal.DDTagsDelimiter, func(key, val string) { c.peerServiceMappings[key] = val }) + } + c.retryInterval = time.Millisecond + + // LLM Observability config + c.llmobs = llmobsconfig.Config{ + Enabled: internal.BoolEnv(envLLMObsEnabled, false), + MLApp: env.Get(envLLMObsMlApp), + AgentlessEnabled: llmobsAgentlessEnabledFromEnv(), + ProjectName: env.Get(envLLMObsProjectName), + } + for _, fn := range opts { + if fn == nil { + continue + } + fn(c) + } + if c.agentURL == nil { + c.agentURL = internal.AgentURLFromEnv() + } + c.originalAgentURL = c.agentURL // Preserve the original agent URL for logging + if c.httpClient == nil || orchestrion.Enabled() { + if orchestrion.Enabled() && c.httpClient != nil { + // Make sure we don't create http client traces from inside the tracer by using our http client + // TODO(eliott.bouhana): remove once dd:no-span is implemented + log.Debug("Orchestrion is enabled, but a custom HTTP client was provided to tracer.Start. This is not supported and will be ignored.") + } + if c.agentURL.Scheme == "unix" { + // If we're connecting over UDS we can just rely on the agent to provide the hostname + log.Debug("connecting to agent over unix, do not set hostname on any traces") + c.httpClient = udsClient(c.agentURL.Path, c.httpClientTimeout) + // TODO(darccio): use internal.UnixDataSocketURL instead + c.agentURL = &url.URL{ + Scheme: "http", + Host: fmt.Sprintf("UDS_%s", strings.NewReplacer(":", "_", "/", "_", `\`, "_").Replace(c.agentURL.Path)), + } + } else { + c.httpClient = defaultHTTPClient(c.httpClientTimeout, false) + } + } + WithGlobalTag(ext.RuntimeID, globalconfig.RuntimeID())(c) + globalTags := c.globalTags.get() + if c.env == "" { + if v, ok := globalTags["env"]; ok { + if e, ok := v.(string); ok { + c.env = e + } + } + } + if c.version == "" { + if v, ok := globalTags["version"]; ok { + if ver, ok := v.(string); ok { + c.version = ver + } + } + } + if c.serviceName == "" { + if v, ok := globalTags["service"]; ok { + if s, ok := v.(string); ok { + c.serviceName = s + globalconfig.SetServiceName(s) + } + } else { + // There is not an explicit service set, default to binary name. + // In this case, don't set a global service name so the contribs continue using their defaults. + c.serviceName = filepath.Base(os.Args[0]) + } + } + if c.transport == nil { + c.transport = newHTTPTransport(c.agentURL.String(), c.httpClient) + } + if c.propagator == nil { + envKey := "DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH" + maxLen := internal.IntEnv(envKey, defaultMaxTagsHeaderLen) + if maxLen < 0 { + log.Warn("Invalid value %d for %s. Setting to 0.", maxLen, envKey) + maxLen = 0 + } + if maxLen > maxPropagatedTagsLength { + log.Warn("Invalid value %d for %s. Maximum allowed is %d. Setting to %d.", maxLen, envKey, maxPropagatedTagsLength, maxPropagatedTagsLength) + maxLen = maxPropagatedTagsLength + } + c.propagator = NewPropagator(&PropagatorConfig{ + MaxTagsHeaderLen: maxLen, + }) + } + if c.logger != nil { + log.UseLogger(c.logger) + } + if c.debug { + log.SetLevel(log.LevelDebug) + } + + // Check if CI Visibility mode is enabled + if internal.BoolEnv(constants.CIVisibilityEnabledEnvironmentVariable, false) { + c.ciVisibilityEnabled = true // Enable CI Visibility mode + c.httpClientTimeout = time.Second * 45 // Increase timeout up to 45 seconds (same as other tracers in CIVis mode) + c.logStartup = false // If we are in CI Visibility mode we don't want to log the startup to stdout to avoid polluting the output + ciTransport := newCiVisibilityTransport(c) // Create a default CI Visibility Transport + c.transport = ciTransport // Replace the default transport with the CI Visibility transport + c.ciVisibilityAgentless = ciTransport.agentless + } + + // if using stdout or traces are disabled or we are in ci visibility agentless mode, agent is disabled + agentDisabled := c.logToStdout || !c.enabled.current || c.ciVisibilityAgentless + c.agent = loadAgentFeatures(agentDisabled, c.agentURL, c.httpClient) + info, ok := debug.ReadBuildInfo() + if !ok { + c.loadContribIntegrations([]*debug.Module{}) + } else { + c.loadContribIntegrations(info.Deps) + } + if c.statsdClient == nil { + // configure statsd client + addr := resolveDogstatsdAddr(c) + globalconfig.SetDogstatsdAddr(addr) + c.dogstatsdAddr = addr + } + // Re-initialize the globalTags config with the value constructed from the environment and start options + // This allows persisting the initial value of globalTags for future resets and updates. + globalTagsOrigin := c.globalTags.cfgOrigin + c.initGlobalTags(c.globalTags.get(), globalTagsOrigin) + if tracingEnabled, _, _ := stableconfig.Bool("DD_APM_TRACING_ENABLED", true); !tracingEnabled { + apmTracingDisabled(c) + } + // Update the llmobs config with stuff needed from the tracer. + c.llmobs.TracerConfig = llmobsconfig.TracerConfig{ + DDTags: c.globalTags.get(), + Env: c.env, + Service: c.serviceName, + Version: c.version, + AgentURL: c.agentURL, + APIKey: env.Get("DD_API_KEY"), + APPKey: env.Get("DD_APP_KEY"), + HTTPClient: c.httpClient, + Site: env.Get("DD_SITE"), + } + c.llmobs.AgentFeatures = llmobsconfig.AgentFeatures{ + EVPProxyV2: c.agent.evpProxyV2, + } + + return c, nil +} + +func llmobsAgentlessEnabledFromEnv() *bool { + v, ok := internal.BoolEnvNoDefault(envLLMObsAgentlessEnabled) + if !ok { + return nil + } + return &v +} + +func apmTracingDisabled(c *config) { + // Enable tracing as transport layer mode + // This means to stop sending trace metrics, send one trace per minute and those force-kept by other products + // using the tracer as transport layer for their data. And finally adding the _dd.apm.enabled=0 tag to all traces + // to let the backend know that it needs to keep APM UI disabled. + c.globalSampleRate = 1.0 + c.traceRateLimitPerSecond = 1.0 / 60 + c.tracingAsTransport = true + WithGlobalTag("_dd.apm.enabled", 0)(c) + // Disable runtime metrics. In `tracingAsTransport` mode, we'll still + // tell the agent we computed them, so it doesn't do it either. + c.runtimeMetrics = false + c.runtimeMetricsV2 = false +} + +// resolveDogstatsdAddr resolves the Dogstatsd address to use, based on the user-defined +// address and the agent-reported port. If the agent reports a port, it will be used +// instead of the user-defined address' port. UDS paths are honored regardless of the +// agent-reported port. +func resolveDogstatsdAddr(c *config) string { + addr := c.dogstatsdAddr + if addr == "" { + // no config defined address; use host and port from env vars + // or default to localhost:8125 if not set + addr = defaultDogstatsdAddr() + } + agentport := c.agent.StatsdPort + if agentport == 0 { + // the agent didn't report a port; use the already resolved address as + // features are loaded from the trace-agent, which might be not running + return addr + } + // the agent reported a port + host, _, err := net.SplitHostPort(addr) + if err != nil { + // parsing the address failed; use the already resolved address as is + return addr + } + if host == "unix" { + // no need to change the address because it's a UDS connection + // and these don't have ports + return addr + } + if host == "" { + // no host was provided; use the default hostname + host = defaultHostname + } + // use agent-reported address if it differs from the user-defined TCP-based protocol URI + // we have a valid host:port address; replace the port because the agent knows better + addr = net.JoinHostPort(host, strconv.Itoa(agentport)) + return addr +} + +func newStatsdClient(c *config) (internal.StatsdClient, error) { + if c.statsdClient != nil { + return c.statsdClient, nil + } + return internal.NewStatsdClient(c.dogstatsdAddr, statsTags(c)) +} + +// udsClient returns a new http.Client which connects using the given UDS socket path. +func udsClient(socketPath string, timeout time.Duration) *http.Client { + if timeout == 0 { + timeout = defaultHTTPTimeout + } + return &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return defaultDialer(timeout).DialContext(ctx, "unix", (&net.UnixAddr{ + Name: socketPath, + Net: "unix", + }).String()) + }, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + Timeout: timeout, + } +} + +// defaultDogstatsdAddr returns the default connection address for Dogstatsd. +func defaultDogstatsdAddr() string { + envHost, envPort := env.Get("DD_DOGSTATSD_HOST"), env.Get("DD_DOGSTATSD_PORT") + if envHost == "" { + envHost = env.Get("DD_AGENT_HOST") + } + if _, err := os.Stat(defaultSocketDSD); err == nil && envHost == "" && envPort == "" { + // socket exists and user didn't specify otherwise via env vars + return "unix://" + defaultSocketDSD + } + host, port := defaultHostname, defaultStatsdPort + if envHost != "" { + host = envHost + } + if envPort != "" { + port = envPort + } + return net.JoinHostPort(host, port) +} + +type integrationConfig struct { + Instrumented bool `json:"instrumented"` // indicates if the user has imported and used the integration + Available bool `json:"available"` // indicates if the user is using a library that can be used with DataDog integrations + Version string `json:"available_version"` // if available, indicates the version of the library the user has +} + +// agentFeatures holds information about the trace-agent's capabilities. +// When running WithLambdaMode, a zero-value of this struct will be used +// as features. +type agentFeatures struct { + // DropP0s reports whether it's ok for the tracer to not send any + // P0 traces to the agent. + DropP0s bool + + // Stats reports whether the agent can receive client-computed stats on + // the /v0.6/stats endpoint. + Stats bool + + // StatsdPort specifies the Dogstatsd port as provided by the agent. + // If it's the default, it will be 0, which means 8125. + StatsdPort int + + // featureFlags specifies all the feature flags reported by the trace-agent. + featureFlags map[string]struct{} + + // peerTags specifies precursor tags to aggregate stats on when client stats is enabled + peerTags []string + + // defaultEnv is the trace-agent's default env, used for stats calculation if no env override is present + defaultEnv string + + // metaStructAvailable reports whether the trace-agent can receive spans with the `meta_struct` field. + metaStructAvailable bool + + // obfuscationVersion reports the trace-agent's version of obfuscation logic. A value of 0 means this field wasn't present. + obfuscationVersion int + + // spanEvents reports whether the trace-agent can receive spans with the `span_events` field. + spanEventsAvailable bool + + // evpProxyV2 reports if the trace-agent can receive payloads on the /evp_proxy/v2 endpoint. + evpProxyV2 bool +} + +// HasFlag reports whether the agent has set the feat feature flag. +func (a *agentFeatures) HasFlag(feat string) bool { + _, ok := a.featureFlags[feat] + return ok +} + +// loadAgentFeatures queries the trace-agent for its capabilities and updates +// the tracer's behaviour. +func loadAgentFeatures(agentDisabled bool, agentURL *url.URL, httpClient *http.Client) (features agentFeatures) { + if agentDisabled { + // there is no agent; all features off + return + } + resp, err := httpClient.Get(fmt.Sprintf("%s/info", agentURL)) + if err != nil { + log.Error("Loading features: %s", err.Error()) + return + } + if resp.StatusCode == http.StatusNotFound { + // agent is older than 7.28.0, features not discoverable + return + } + defer resp.Body.Close() + type infoResponse struct { + Endpoints []string `json:"endpoints"` + ClientDropP0s bool `json:"client_drop_p0s"` + FeatureFlags []string `json:"feature_flags"` + PeerTags []string `json:"peer_tags"` + SpanMetaStruct bool `json:"span_meta_structs"` + ObfuscationVersion int `json:"obfuscation_version"` + SpanEvents bool `json:"span_events"` + Config struct { + StatsdPort int `json:"statsd_port"` + DefaultEnv string `json:"default_env"` + } `json:"config"` + } + + var info infoResponse + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + log.Error("Decoding features: %s", err.Error()) + return + } + + features.DropP0s = info.ClientDropP0s + features.StatsdPort = info.Config.StatsdPort + features.defaultEnv = info.Config.DefaultEnv + features.metaStructAvailable = info.SpanMetaStruct + features.peerTags = info.PeerTags + features.obfuscationVersion = info.ObfuscationVersion + features.spanEventsAvailable = info.SpanEvents + for _, endpoint := range info.Endpoints { + switch endpoint { + case "/v0.6/stats": + features.Stats = true + case "/evp_proxy/v2/": + features.evpProxyV2 = true + } + } + features.featureFlags = make(map[string]struct{}, len(info.FeatureFlags)) + for _, flag := range info.FeatureFlags { + features.featureFlags[flag] = struct{}{} + } + return features +} + +// MarkIntegrationImported labels the given integration as imported +func MarkIntegrationImported(integration string) bool { + s, ok := contribIntegrations[integration] + if !ok { + return false + } + s.imported = true + contribIntegrations[integration] = s + return true +} + +func (c *config) loadContribIntegrations(deps []*debug.Module) { + integrations := map[string]integrationConfig{} + for _, s := range contribIntegrations { + integrations[s.name] = integrationConfig{ + Instrumented: s.imported, + } + } + for _, d := range deps { + p := d.Path + s, ok := contribIntegrations[p] + if !ok { + continue + } + conf := integrations[s.name] + conf.Available = true + conf.Version = d.Version + integrations[s.name] = conf + } + c.integrations = integrations +} + +func (c *config) canComputeStats() bool { + return c.agent.Stats && (c.HasFeature("discovery") || c.statsComputationEnabled) +} + +func (c *config) canDropP0s() bool { + return c.canComputeStats() && c.agent.DropP0s +} + +func statsTags(c *config) []string { + tags := []string{ + "lang:go", + "lang_version:" + runtime.Version(), + } + if c.env != "" { + tags = append(tags, "env:"+c.env) + } + if c.hostname != "" { + tags = append(tags, "host:"+c.hostname) + } + for k, v := range c.globalTags.get() { + if vstr, ok := v.(string); ok { + tags = append(tags, k+":"+vstr) + } + } + globalconfig.SetStatsTags(tags) + tags = append(tags, "tracer_version:"+version.Tag) + if c.serviceName != "" { + tags = append(tags, "service:"+c.serviceName) + } + return tags +} + +// withNoopStats is used for testing to disable statsd client +func withNoopStats() StartOption { + return func(c *config) { + c.statsdClient = &statsd.NoOpClientDirect{} + } +} + +// WithAppSecEnabled specifies whether AppSec features should be activated +// or not. +// +// By default, AppSec features are enabled if `DD_APPSEC_ENABLED` is set to a +// truthy value; and may be enabled by remote configuration if +// `DD_APPSEC_ENABLED` is not set at all. +// +// Using this option to explicitly disable appsec also prevents it from being +// remote activated. +func WithAppSecEnabled(enabled bool) StartOption { + mode := appsecconfig.ForcedOff + if enabled { + mode = appsecconfig.ForcedOn + } + return func(c *config) { + c.appsecStartOptions = append(c.appsecStartOptions, appsecconfig.WithEnablementMode(mode)) + } +} + +// WithFeatureFlags specifies a set of feature flags to enable. Please take into account +// that most, if not all features flags are considered to be experimental and result in +// unexpected bugs. +func WithFeatureFlags(feats ...string) StartOption { + return func(c *config) { + if c.featureFlags == nil { + c.featureFlags = make(map[string]struct{}, len(feats)) + } + for _, f := range feats { + c.featureFlags[strings.TrimSpace(f)] = struct{}{} + } + log.Info("FEATURES enabled: %s", feats) + } +} + +// WithLogger sets logger as the tracer's error printer. +// Diagnostic and startup tracer logs are prefixed to simplify the search within logs. +// If JSON logging format is required, it's possible to wrap tracer logs using an existing JSON logger with this +// function. To learn more about this possibility, please visit: https://github.com/DataDog/dd-trace-go/issues/2152#issuecomment-1790586933 +func WithLogger(logger Logger) StartOption { + return func(c *config) { + c.logger = logger + } +} + +// WithDebugStack can be used to globally enable or disable the collection of stack traces when +// spans finish with errors. It is enabled by default. This is a global version of the NoDebugStack +// FinishOption. +func WithDebugStack(enabled bool) StartOption { + return func(c *config) { + c.noDebugStack = !enabled + } +} + +// WithDebugMode enables debug mode on the tracer, resulting in more verbose logging. +func WithDebugMode(enabled bool) StartOption { + return func(c *config) { + telemetry.RegisterAppConfig("trace_debug_enabled", enabled, telemetry.OriginCode) + c.debug = enabled + } +} + +// WithLambdaMode enables lambda mode on the tracer, for use with AWS Lambda. +// This option is only required if the the Datadog Lambda Extension is not +// running. +func WithLambdaMode(enabled bool) StartOption { + return func(c *config) { + c.logToStdout = enabled + } +} + +// WithSendRetries enables re-sending payloads that are not successfully +// submitted to the agent. This will cause the tracer to retry the send at +// most `retries` times. +func WithSendRetries(retries int) StartOption { + return func(c *config) { + c.sendRetries = retries + } +} + +// WithRetryInterval sets the interval, in seconds, for retrying submitting payloads to the agent. +func WithRetryInterval(interval int) StartOption { + return func(c *config) { + c.retryInterval = time.Duration(interval) * time.Second + } +} + +// WithPropagator sets an alternative propagator to be used by the tracer. +func WithPropagator(p Propagator) StartOption { + return func(c *config) { + c.propagator = p + } +} + +// WithService sets the default service name for the program. +func WithService(name string) StartOption { + return func(c *config) { + c.serviceName = name + globalconfig.SetServiceName(c.serviceName) + } +} + +// WithGlobalServiceName causes contrib libraries to use the global service name and not any locally defined service name. +// This is synonymous with `DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED`. +func WithGlobalServiceName(enabled bool) StartOption { + return func(_ *config) { + namingschema.SetRemoveIntegrationServiceNames(enabled) + } +} + +// WithAgentAddr sets the address where the agent is located. The default is +// localhost:8126. It should contain both host and port. +func WithAgentAddr(addr string) StartOption { + return func(c *config) { + c.agentURL = &url.URL{ + Scheme: "http", + Host: addr, + } + } +} + +// WithAgentURL sets the full trace agent URL +func WithAgentURL(agentURL string) StartOption { + return func(c *config) { + u, err := url.Parse(agentURL) + if err != nil { + var urlErr *url.Error + if errors.As(err, &urlErr) { + u, err = url.Parse(urlErr.URL) + if u != nil { + urlErr.URL = u.Redacted() + log.Warn("Fail to parse Agent URL: %s", urlErr.Err) + return + } + log.Warn("Fail to parse Agent URL") + return + } + log.Warn("Fail to parse Agent URL: %s", err.Error()) + return + } + switch u.Scheme { + case "http", "https": + c.agentURL = &url.URL{ + Scheme: u.Scheme, + Host: u.Host, + } + case "unix": + c.agentURL = internal.UnixDataSocketURL(u.Path) + default: + log.Warn("Unsupported protocol %q in Agent URL %q. Must be one of: http, https, unix.", u.Scheme, agentURL) + } + } +} + +// WithAgentTimeout sets the timeout for the agent connection. Timeout is in seconds. +func WithAgentTimeout(timeout int) StartOption { + return func(c *config) { + c.httpClientTimeout = time.Duration(timeout) * time.Second + } +} + +// WithEnv sets the environment to which all traces started by the tracer will be submitted. +// The default value is the environment variable DD_ENV, if it is set. +func WithEnv(env string) StartOption { + return func(c *config) { + c.env = env + } +} + +// WithServiceMapping determines service "from" to be renamed to service "to". +// This option is is case sensitive and can be used multiple times. +func WithServiceMapping(from, to string) StartOption { + return func(c *config) { + if c.serviceMappings == nil { + c.serviceMappings = make(map[string]string) + } + c.serviceMappings[from] = to + } +} + +// WithPeerServiceDefaults sets default calculation for peer.service. +// Related documentation: https://docs.datadoghq.com/tracing/guide/inferred-service-opt-in/?tab=go#apm-tracer-configuration +func WithPeerServiceDefaults(enabled bool) StartOption { + return func(c *config) { + c.peerServiceDefaultsEnabled = enabled + } +} + +// WithPeerServiceMapping determines the value of the peer.service tag "from" to be renamed to service "to". +func WithPeerServiceMapping(from, to string) StartOption { + return func(c *config) { + if c.peerServiceMappings == nil { + c.peerServiceMappings = make(map[string]string) + } + c.peerServiceMappings[from] = to + } +} + +// WithGlobalTag sets a key/value pair which will be set as a tag on all spans +// created by tracer. This option may be used multiple times. +func WithGlobalTag(k string, v interface{}) StartOption { + return func(c *config) { + if c.globalTags.get() == nil { + c.initGlobalTags(map[string]interface{}{}, telemetry.OriginDefault) + } + c.globalTags.Lock() + defer c.globalTags.Unlock() + c.globalTags.current[k] = v + } +} + +// initGlobalTags initializes the globalTags config with the provided init value +func (c *config) initGlobalTags(init map[string]interface{}, origin telemetry.Origin) { + apply := func(map[string]interface{}) bool { + // always set the runtime ID on updates + c.globalTags.current[ext.RuntimeID] = globalconfig.RuntimeID() + return true + } + c.globalTags = newDynamicConfig("trace_tags", init, apply, equalMap[string]) + c.globalTags.cfgOrigin = origin +} + +// WithSampler sets the given sampler to be used with the tracer. By default +// an all-permissive sampler is used. +// Deprecated: Use WithSamplerRate instead. Custom sampling will be phased out in a future release. +func WithSampler(s Sampler) StartOption { + return func(c *config) { + c.sampler = &customSampler{s: s} + } +} + +// WithRateSampler sets the given sampler rate to be used with the tracer. +// The rate must be between 0 and 1. By default an all-permissive sampler rate (1) is used. +func WithSamplerRate(rate float64) StartOption { + return func(c *config) { + c.sampler = NewRateSampler(rate) + } +} + +// WithHTTPClient specifies the HTTP client to use when emitting spans to the agent. +func WithHTTPClient(client *http.Client) StartOption { + return func(c *config) { + c.httpClient = client + } +} + +// WithUDS configures the HTTP client to dial the Datadog Agent via the specified Unix Domain Socket path. +func WithUDS(socketPath string) StartOption { + return func(c *config) { + c.agentURL = &url.URL{ + Scheme: "unix", + Path: socketPath, + } + } +} + +// WithAnalytics allows specifying whether Trace Search & Analytics should be enabled +// for integrations. +func WithAnalytics(on bool) StartOption { + return func(_ *config) { + if on { + globalconfig.SetAnalyticsRate(1.0) + } else { + globalconfig.SetAnalyticsRate(math.NaN()) + } + } +} + +// WithAnalyticsRate sets the global sampling rate for sampling APM events. +func WithAnalyticsRate(rate float64) StartOption { + return func(_ *config) { + if rate >= 0.0 && rate <= 1.0 { + globalconfig.SetAnalyticsRate(rate) + } else { + globalconfig.SetAnalyticsRate(math.NaN()) + } + } +} + +// WithRuntimeMetrics enables automatic collection of runtime metrics every 10 seconds. +func WithRuntimeMetrics() StartOption { + return func(cfg *config) { + telemetry.RegisterAppConfig("runtime_metrics_enabled", true, telemetry.OriginCode) + cfg.runtimeMetrics = true + } +} + +// WithDogstatsdAddr specifies the address to connect to for sending metrics to the Datadog +// Agent. It should be a "host:port" string, or the path to a unix domain socket.If not set, it +// attempts to determine the address of the statsd service according to the following rules: +// 1. Look for /var/run/datadog/dsd.socket and use it if present. IF NOT, continue to #2. +// 2. The host is determined by DD_AGENT_HOST, and defaults to "localhost" +// 3. The port is retrieved from the agent. If not present, it is determined by DD_DOGSTATSD_PORT, and defaults to 8125 +// +// This option is in effect when WithRuntimeMetrics is enabled. +func WithDogstatsdAddr(addr string) StartOption { + return func(cfg *config) { + cfg.dogstatsdAddr = addr + globalconfig.SetDogstatsdAddr(addr) + } +} + +// WithSamplingRules specifies the sampling rates to apply to spans based on the +// provided rules. +func WithSamplingRules(rules []SamplingRule) StartOption { + return func(cfg *config) { + for _, rule := range rules { + if rule.ruleType == SamplingRuleSpan { + cfg.spanRules = append(cfg.spanRules, rule) + } else { + cfg.traceRules = append(cfg.traceRules, rule) + } + } + } +} + +// WithServiceVersion specifies the version of the service that is running. This will +// be included in spans from this service in the "version" tag, provided that +// span service name and config service name match. Do NOT use with WithUniversalVersion. +func WithServiceVersion(version string) StartOption { + return func(cfg *config) { + cfg.version = version + cfg.universalVersion = false + } +} + +// WithUniversalVersion specifies the version of the service that is running, and will be applied to all spans, +// regardless of whether span service name and config service name match. +// See: WithService, WithServiceVersion. Do NOT use with WithServiceVersion. +func WithUniversalVersion(version string) StartOption { + return func(c *config) { + c.version = version + c.universalVersion = true + } +} + +// WithHostname allows specifying the hostname with which to mark outgoing traces. +func WithHostname(name string) StartOption { + return func(c *config) { + c.hostname = name + } +} + +// WithTraceEnabled allows specifying whether tracing will be enabled +func WithTraceEnabled(enabled bool) StartOption { + return func(c *config) { + telemetry.RegisterAppConfig("trace_enabled", enabled, telemetry.OriginCode) + c.enabled = newDynamicConfig("tracing_enabled", enabled, func(_ bool) bool { return true }, equal[bool]) + } +} + +// WithLogStartup allows enabling or disabling the startup log. +func WithLogStartup(enabled bool) StartOption { + return func(c *config) { + c.logStartup = enabled + } +} + +// WithProfilerCodeHotspots enables the code hotspots integration between the +// tracer and profiler. This is done by automatically attaching pprof labels +// called "span id" and "local root span id" when new spans are created. You +// should not use these label names in your own code when this is enabled. The +// enabled value defaults to the value of the +// DD_PROFILING_CODE_HOTSPOTS_COLLECTION_ENABLED env variable or true. +func WithProfilerCodeHotspots(enabled bool) StartOption { + return func(c *config) { + c.profilerHotspots = enabled + } +} + +// WithProfilerEndpoints enables the endpoints integration between the tracer +// and profiler. This is done by automatically attaching a pprof label called +// "trace endpoint" holding the resource name of the top-level service span if +// its type is "http", "rpc" or "" (default). You should not use this label +// name in your own code when this is enabled. The enabled value defaults to +// the value of the DD_PROFILING_ENDPOINT_COLLECTION_ENABLED env variable or +// true. +func WithProfilerEndpoints(enabled bool) StartOption { + return func(c *config) { + c.profilerEndpoints = enabled + } +} + +// WithDebugSpansMode enables debugging old spans that may have been +// abandoned, which may prevent traces from being set to the Datadog +// Agent, especially if partial flushing is off. +// This setting can also be configured by setting DD_TRACE_DEBUG_ABANDONED_SPANS +// to true. The timeout will default to 10 minutes, unless overwritten +// by DD_TRACE_ABANDONED_SPAN_TIMEOUT. +// This feature is disabled by default. Turning on this debug mode may +// be expensive, so it should only be enabled for debugging purposes. +func WithDebugSpansMode(timeout time.Duration) StartOption { + return func(c *config) { + c.debugAbandonedSpans = true + c.spanTimeout = timeout + } +} + +// WithPartialFlushing enables flushing of partially finished traces. +// This is done after "numSpans" have finished in a single local trace at +// which point all finished spans in that trace will be flushed, freeing up +// any memory they were consuming. This can also be configured by setting +// DD_TRACE_PARTIAL_FLUSH_ENABLED to true, which will default to 1000 spans +// unless overriden with DD_TRACE_PARTIAL_FLUSH_MIN_SPANS. Partial flushing +// is disabled by default. +func WithPartialFlushing(numSpans int) StartOption { + return func(c *config) { + c.partialFlushEnabled = true + c.partialFlushMinSpans = numSpans + } +} + +// WithStatsComputation enables client-side stats computation, allowing +// the tracer to compute stats from traces. This can reduce network traffic +// to the Datadog Agent, and produce more accurate stats data. +// This can also be configured by setting DD_TRACE_STATS_COMPUTATION_ENABLED to true. +// Client-side stats is off by default. +func WithStatsComputation(enabled bool) StartOption { + return func(c *config) { + c.statsComputationEnabled = enabled + } +} + +// Tag sets the given key/value pair as a tag on the started Span. +func Tag(k string, v interface{}) StartSpanOption { + return func(cfg *StartSpanConfig) { + if cfg.Tags == nil { + cfg.Tags = map[string]interface{}{} + } + cfg.Tags[k] = v + } +} + +// ServiceName sets the given service name on the started span. For example "http.server". +func ServiceName(name string) StartSpanOption { + return Tag(ext.ServiceName, name) +} + +// ResourceName sets the given resource name on the started span. A resource could +// be an SQL query, a URL, an RPC method or something else. +func ResourceName(name string) StartSpanOption { + return Tag(ext.ResourceName, name) +} + +// SpanType sets the given span type on the started span. Some examples in the case of +// the Datadog APM product could be "web", "db" or "cache". +func SpanType(name string) StartSpanOption { + return Tag(ext.SpanType, name) +} + +// WithSpanLinks sets span links on the started span. +func WithSpanLinks(links []SpanLink) StartSpanOption { + return func(cfg *StartSpanConfig) { + cfg.SpanLinks = append(cfg.SpanLinks, links...) + } +} + +var measuredTag = Tag(keyMeasured, 1) + +// Measured marks this span to be measured for metrics and stats calculations. +func Measured() StartSpanOption { + // cache a global instance of this tag: saves one alloc/call + return measuredTag +} + +// WithSpanID sets the SpanID on the started span, instead of using a random number. +// If there is no parent Span (eg from ChildOf), then the TraceID will also be set to the +// value given here. +func WithSpanID(id uint64) StartSpanOption { + return func(cfg *StartSpanConfig) { + cfg.SpanID = id + } +} + +// ChildOf tells StartSpan to use the given span context as a parent for the created span. +// +// Deprecated: Use [Span.StartChild] instead. +func ChildOf(ctx *SpanContext) StartSpanOption { + return func(cfg *StartSpanConfig) { + cfg.Parent = ctx + } +} + +// withContext associates the ctx with the span. +func withContext(ctx context.Context) StartSpanOption { + return func(cfg *StartSpanConfig) { + cfg.Context = ctx + } +} + +// StartTime sets a custom time as the start time for the created span. By +// default a span is started using the creation time. +func StartTime(t time.Time) StartSpanOption { + return func(cfg *StartSpanConfig) { + cfg.StartTime = t + } +} + +// AnalyticsRate sets a custom analytics rate for a span. It decides the percentage +// of events that will be picked up by the App Analytics product. It's represents a +// float64 between 0 and 1 where 0.5 would represent 50% of events. +func AnalyticsRate(rate float64) StartSpanOption { + if math.IsNaN(rate) { + return func(_ *StartSpanConfig) {} + } + return Tag(ext.EventSampleRate, rate) +} + +// WithStartSpanConfig merges the given StartSpanConfig into the one used to start the span. +// It is useful when you want to set a common base config, reducing the number of function calls in hot loops. +func WithStartSpanConfig(cfg *StartSpanConfig) StartSpanOption { + return func(c *StartSpanConfig) { + // copy cfg into c only if cfg fields are not zero values + // c fields have precedence, as they may have been set up before running this option + if c.SpanID == 0 { + c.SpanID = cfg.SpanID + } + if c.Parent == nil { + c.Parent = cfg.Parent + } + if c.Context == nil { + c.Context = cfg.Context + } + if c.SpanLinks == nil { + c.SpanLinks = cfg.SpanLinks + } + if c.StartTime.IsZero() { + c.StartTime = cfg.StartTime + } + // tags are a special case, as we need to merge them + if c.Tags == nil { + // if cfg.Tags is nil, this is a no-op + c.Tags = cfg.Tags + } else if cfg.Tags != nil { + for k, v := range cfg.Tags { + c.Tags[k] = v + } + } + } +} + +// WithHeaderTags enables the integration to attach HTTP request headers as span tags. +// Warning: +// Using this feature can risk exposing sensitive data such as authorization tokens to Datadog. +// Special headers can not be sub-selected. E.g., an entire Cookie header would be transmitted, without the ability to choose specific Cookies. +func WithHeaderTags(headerAsTags []string) StartOption { + return func(c *config) { + c.headerAsTags = newDynamicConfig("trace_header_tags", headerAsTags, setHeaderTags, equalSlice[string]) + setHeaderTags(headerAsTags) + } +} + +// WithTestDefaults configures the tracer to not send spans to the agent, and to not collect metrics. +// Warning: +// This option should only be used in tests, as it will prevent the tracer from sending spans to the agent. +func WithTestDefaults(statsdClient any) StartOption { + return func(c *config) { + if statsdClient == nil { + statsdClient = &statsd.NoOpClientDirect{} + } + c.statsdClient = statsdClient.(internal.StatsdClient) + c.transport = newDummyTransport() + } +} + +// WithLLMObsEnabled allows to enable LLM Observability (it is disabled by default). +// This is equivalent to the DD_LLMOBS_ENABLED environment variable. +func WithLLMObsEnabled(enabled bool) StartOption { + return func(c *config) { + c.llmobs.Enabled = enabled + } +} + +// WithLLMObsMLApp allows to configure the default ML App for LLM Observability. +// It is required to have this configured to use any LLM Observability features. +// This is equivalent to the DD_LLMOBS_ML_APP environment variable. +func WithLLMObsMLApp(mlApp string) StartOption { + return func(c *config) { + c.llmobs.MLApp = mlApp + } +} + +// WithLLMObsProjectName allows to configure the default LLM Observability project to use. +// It is required when using the Experiments and Datasets feature. +// This is equivalent to the DD_LLMOBS_PROJECT_NAME environment variable. +func WithLLMObsProjectName(projectName string) StartOption { + return func(c *config) { + c.llmobs.ProjectName = projectName + } +} + +// WithLLMObsAgentlessEnabled allows to configure LLM Observability to work in agent/agentless mode. +// The default is using the agent if it is available and supports it, otherwise it will default to agentless mode. +// Please note when using agentless mode, a valid DD_API_KEY must also be set. +// This is equivalent to the DD_LLMOBS_AGENTLESS_ENABLED environment variable. +func WithLLMObsAgentlessEnabled(agentlessEnabled bool) StartOption { + return func(c *config) { + c.llmobs.AgentlessEnabled = &agentlessEnabled + } +} + +// Mock Transport with a real Encoder +type dummyTransport struct { + sync.RWMutex + traces spanLists + stats []*pb.ClientStatsPayload + obfVersion int +} + +func newDummyTransport() *dummyTransport { + return &dummyTransport{traces: spanLists{}, obfVersion: -1} +} + +func (t *dummyTransport) Len() int { + t.RLock() + defer t.RUnlock() + return len(t.traces) +} + +func (t *dummyTransport) sendStats(p *pb.ClientStatsPayload, obfVersion int) error { + t.Lock() + t.stats = append(t.stats, p) + t.obfVersion = obfVersion + t.Unlock() + return nil +} + +func (t *dummyTransport) Stats() []*pb.ClientStatsPayload { + t.RLock() + defer t.RUnlock() + return t.stats +} + +func (t *dummyTransport) ObfuscationVersion() int { + t.RLock() + defer t.RUnlock() + return t.obfVersion +} + +func (t *dummyTransport) send(p payload) (io.ReadCloser, error) { + traces, err := decode(p) + if err != nil { + return nil, err + } + t.Lock() + t.traces = append(t.traces, traces...) + t.Unlock() + ok := io.NopCloser(strings.NewReader("OK")) + return ok, nil +} + +func (t *dummyTransport) endpoint() string { + return "http://localhost:9/v0.4/traces" +} + +func decode(p payloadReader) (spanLists, error) { + var traces spanLists + err := msgp.Decode(p, &traces) + return traces, err +} + +func (t *dummyTransport) Reset() { + t.Lock() + t.traces = t.traces[:0] + t.Unlock() +} + +func (t *dummyTransport) Traces() spanLists { + t.Lock() + defer t.Unlock() + + traces := t.traces + t.traces = spanLists{} + return traces +} + +// setHeaderTags sets the global header tags. +// Always resets the global value and returns true. +func setHeaderTags(headerAsTags []string) bool { + globalconfig.ClearHeaderTags() + for _, h := range headerAsTags { + header, tag := normalizer.HeaderTag(h) + if len(header) == 0 || len(tag) == 0 { + log.Debug("Header-tag input is in unsupported format; dropping input value %q", h) + continue + } + globalconfig.SetHeaderTag(header, tag) + } + return true +} + +// UserMonitoringConfig is used to configure what is used to identify a user. +// This configuration can be set by combining one or several UserMonitoringOption with a call to SetUser(). +type UserMonitoringConfig struct { + PropagateID bool + Login string + Org string + Email string + Name string + Role string + SessionID string + Scope string + Metadata map[string]string +} + +// UserMonitoringOption represents a function that can be provided as a parameter to SetUser. +type UserMonitoringOption func(*UserMonitoringConfig) + +// WithUserMetadata returns the option setting additional metadata of the authenticated user. +// This can be used multiple times and the given data will be tracked as `usr.{key}=value`. +func WithUserMetadata(key, value string) UserMonitoringOption { + return func(cfg *UserMonitoringConfig) { + cfg.Metadata[key] = value + } +} + +// WithUserLogin returns the option setting the login of the authenticated user. +func WithUserLogin(login string) UserMonitoringOption { + return func(cfg *UserMonitoringConfig) { + cfg.Login = login + } +} + +// WithUserOrg returns the option setting the organization of the authenticated user. +func WithUserOrg(org string) UserMonitoringOption { + return func(cfg *UserMonitoringConfig) { + cfg.Org = org + } +} + +// WithUserEmail returns the option setting the email of the authenticated user. +func WithUserEmail(email string) UserMonitoringOption { + return func(cfg *UserMonitoringConfig) { + cfg.Email = email + } +} + +// WithUserName returns the option setting the name of the authenticated user. +func WithUserName(name string) UserMonitoringOption { + return func(cfg *UserMonitoringConfig) { + cfg.Name = name + } +} + +// WithUserSessionID returns the option setting the session ID of the authenticated user. +func WithUserSessionID(sessionID string) UserMonitoringOption { + return func(cfg *UserMonitoringConfig) { + cfg.SessionID = sessionID + } +} + +// WithUserRole returns the option setting the role of the authenticated user. +func WithUserRole(role string) UserMonitoringOption { + return func(cfg *UserMonitoringConfig) { + cfg.Role = role + } +} + +// WithUserScope returns the option setting the scope (authorizations) of the authenticated user. +func WithUserScope(scope string) UserMonitoringOption { + return func(cfg *UserMonitoringConfig) { + cfg.Scope = scope + } +} + +// WithPropagation returns the option allowing the user id to be propagated through distributed traces. +// The user id is base64 encoded and added to the datadog propagated tags header. +// This option should only be used if you are certain that the user id passed to `SetUser()` does not contain any +// personal identifiable information or any kind of sensitive data, as it will be leaked to other services. +func WithPropagation() UserMonitoringOption { + return func(cfg *UserMonitoringConfig) { + cfg.PropagateID = true + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/orchestrion.yml b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/orchestrion.yml new file mode 100644 index 00000000..42e9f6ee --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/orchestrion.yml @@ -0,0 +1,120 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2023-present Datadog, Inc. +--- +# yaml-language-server: $schema=https://datadoghq.dev/orchestrion/schema.json +meta: + name: github.com/DataDog/dd-trace-go/v2/ddtrace/tracer + description: |- + Automatically starts the github.com/DataDog/dd-trace-go/v2/ddtrace/tracer + at the start of the application, and closes it at exit of the main function. + + Adding the `//dd:span` directive on functions creates custom spans + representing every call to that function. The default operation (span) name + is the name of the function, and this can be overridden using a "span.name" + argument to the directive: + + ```go + //dd:span span.name:custom-operation-name other:tag + func myFunction() { + // The default operation name would have been "myFunction" + } + ``` + + Function literal expressions don't have a function name, and their default operation name is the value of the very + first directive argument (if there is one). If there are no directive arguments, the operation name will remain + blank. + + ```go + //dd:span other:tag span.name:custom-operation-name + myOp := func() { + // The default operation name would have been "tag" + } + ``` + +extends: + - ../../internal/orchestrion/gls.orchestrion.yml + +aspects: + # Automatically manage the tracer lifecycle + - id: func main() + join-point: + all-of: + - package-name: main + - test-main: false + - function-body: + function: + - name: main + - signature: {} + advice: + - inject-declarations: + imports: + tracer: github.com/DataDog/dd-trace-go/v2/ddtrace/tracer + # Note: it is valid to have multiple func init() in a single compile unit (e.g, `.go` file), in which case + # they get executed in declaration order. This means it's okay for us to add a new init function if there is + # already one in the file, but as it currently is appended AFTER all other declarations in the file, it means + # that it will be executed last (tracing contents of previous init functions will not be possible). + template: func init() { tracer.Start() } + # We need to stop the tracer at the end of `main` to ensure all spans are properly flushed. + - prepend-statements: + imports: + tracer: github.com/DataDog/dd-trace-go/v2/ddtrace/tracer + template: |- + defer tracer.Stop() + + # Create spans for each function annotated with the //dd:span directive. + - id: '//dd:span' + join-point: + function-body: + directive: 'dd:span' + advice: + - prepend-statements: + imports: + context: context + tracer: github.com/DataDog/dd-trace-go/v2/ddtrace/tracer + template: |- + {{- $ctx := .Function.ArgumentOfType "context.Context" -}} + {{- $req := .Function.ArgumentOfType "*net/http.Request" -}} + {{- if (eq $ctx "") -}} + {{- $ctx = "ctx" -}} + ctx := {{- with $req -}} + {{ $req }}.Context() + {{- else -}} + context.TODO() + {{- end }} + {{ end -}} + + {{ $functionName := .Function.Name -}} + {{- $opName := $functionName -}} + {{- range .DirectiveArgs "dd:span" -}} + {{- if eq $opName "" -}} + {{ $opName = .Value }} + {{- end -}} + {{- if eq .Key "span.name" -}} + {{- $opName = .Value -}} + {{- break -}} + {{- end -}} + {{- end -}} + + var span *tracer.Span + span, {{ $ctx }} = tracer.StartSpanFromContext({{ $ctx }}, {{ printf "%q" $opName }}, + {{- with $functionName }} + tracer.Tag("function-name", {{ printf "%q" $functionName }}), + {{ end -}} + {{- range .DirectiveArgs "dd:span" }} + {{ if eq .Key "span.name" -}}{{- continue -}}{{- end -}} + tracer.Tag({{ printf "%q" .Key }}, {{ printf "%q" .Value }}), + {{- end }} + ) + {{- with $req }} + {{ $req }} = {{ $req }}.WithContext({{ $ctx }}) + {{- end }} + + {{ with .Function.ResultOfType "error" -}} + defer func(){ + span.Finish(tracer.WithError({{ . }})) + }() + {{ else -}} + defer span.Finish() + {{- end -}} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otel_dd_mappings.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otel_dd_mappings.go new file mode 100644 index 00000000..9d4e5c5a --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otel_dd_mappings.go @@ -0,0 +1,236 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. +package tracer + +import ( + "fmt" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/stableconfig" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +// otelDDEnv contains env vars from both dd (DD) and ot (OTEL) that map to the same tracer configuration +// remapper contains functionality to remap OTEL values to DD values +type otelDDEnv struct { + dd string + ot string + remapper func(string) (string, error) + handsOff bool // if true, check for configuration set in application_monitoring.yaml file +} + +var otelDDConfigs = map[string]*otelDDEnv{ + "service": { + dd: "DD_SERVICE", + ot: "OTEL_SERVICE_NAME", + remapper: mapService, + handsOff: false, + }, + "metrics": { + dd: "DD_RUNTIME_METRICS_ENABLED", + ot: "OTEL_METRICS_EXPORTER", + remapper: mapMetrics, + handsOff: true, + }, + "debugMode": { + dd: "DD_TRACE_DEBUG", + ot: "OTEL_LOG_LEVEL", + remapper: mapLogLevel, + handsOff: true, + }, + "enabled": { + dd: "DD_TRACE_ENABLED", + ot: "OTEL_TRACES_EXPORTER", + remapper: mapEnabled, + handsOff: false, + }, + "sampleRate": { + dd: "DD_TRACE_SAMPLE_RATE", + ot: "OTEL_TRACES_SAMPLER", + remapper: mapSampleRate, + handsOff: false, + }, + "propagationStyle": { + dd: "DD_TRACE_PROPAGATION_STYLE", + ot: "OTEL_PROPAGATORS", + remapper: mapPropagationStyle, + handsOff: false, + }, + "resourceAttributes": { + dd: "DD_TAGS", + ot: "OTEL_RESOURCE_ATTRIBUTES", + remapper: mapDDTags, + handsOff: false, + }, +} + +var ddTagsMapping = map[string]string{ + "service.name": "service", + "deployment.environment": "env", + "service.version": "version", +} + +var unsupportedSamplerMapping = map[string]string{ + "always_on": "parentbased_always_on", + "always_off": "parentbased_always_off", + "traceidratio": "parentbased_traceidratio", +} + +var propagationMapping = map[string]string{ + "tracecontext": "tracecontext", + "b3": "b3 single header", + "b3multi": "b3multi", + "datadog": "datadog", + "none": "none", +} + +// getDDorOtelConfig determines whether the provided otelDDOpt will be set via DD or OTEL env vars, and returns the value +func getDDorOtelConfig(configName string) string { + config, ok := otelDDConfigs[configName] + if !ok { + panic(fmt.Sprintf("Programming Error: %v not found in supported configurations", configName)) + } + + // 1. Check managed stable config if handsOff + if config.handsOff { + if v := stableconfig.ManagedConfig.Get(config.dd); v != "" { + telemetry.RegisterAppConfigs(telemetry.Configuration{Name: telemetry.EnvToTelemetryName(config.dd), Value: v, Origin: telemetry.OriginManagedStableConfig, ID: stableconfig.ManagedConfig.GetID()}) + return v + } + } + + // 2. Check environment variables (DD or OT) + val := env.Get(config.dd) + key := config.dd // Store the environment variable that will be used to set the config + if otVal := env.Get(config.ot); otVal != "" { + ddPrefix := "config_datadog:" + otelPrefix := "config_opentelemetry:" + if val != "" { + log.Warn("Both %q and %q are set, using %s=%s", config.ot, config.dd, config.dd, val) + telemetryTags := []string{ddPrefix + strings.ToLower(config.dd), otelPrefix + strings.ToLower(config.ot)} + telemetry.Count(telemetry.NamespaceTracers, "otel.env.hiding", telemetryTags).Submit(1) + } else { + v, err := config.remapper(otVal) + if err != nil { + log.Warn("%s", err.Error()) + telemetryTags := []string{ddPrefix + strings.ToLower(config.dd), otelPrefix + strings.ToLower(config.ot)} + telemetry.Count(telemetry.NamespaceTracers, "otel.env.invalid", telemetryTags).Submit(1) + } + key = config.ot + val = v + } + } + if val != "" { + telemetry.RegisterAppConfig(telemetry.EnvToTelemetryName(key), val, telemetry.OriginEnvVar) + return val + } + + // 3. If handsOff, check local stable config + if config.handsOff { + if v := stableconfig.LocalConfig.Get(config.dd); v != "" { + telemetry.RegisterAppConfigs(telemetry.Configuration{Name: telemetry.EnvToTelemetryName(config.dd), Value: v, Origin: telemetry.OriginLocalStableConfig, ID: stableconfig.LocalConfig.GetID()}) + return v + } + } + + // 4. Not found, return empty string + return "" +} + +// mapDDTags maps OTEL_RESOURCE_ATTRIBUTES to DD_TAGS +func mapDDTags(ot string) (string, error) { + ddTags := make([]string, 0) + internal.ForEachStringTag(ot, internal.OtelTagsDelimeter, func(key, val string) { + // replace otel delimiter with dd delimiter and normalize tag names + if ddkey, ok := ddTagsMapping[key]; ok { + // map reserved otel tag names to dd tag names + ddTags = append([]string{ddkey + internal.DDTagsDelimiter + val}, ddTags...) + } else { + ddTags = append(ddTags, key+internal.DDTagsDelimiter+val) + } + }) + + if len(ddTags) > 10 { + log.Warn("The following resource attributes have been dropped: %v. Only the first 10 resource attributes will be applied: %s", ddTags[10:], ddTags[:10]) //nolint:gocritic // Slice logging for debugging + ddTags = ddTags[:10] + } + + return strings.Join(ddTags, ","), nil +} + +// mapService maps OTEL_SERVICE_NAME to DD_SERVICE +func mapService(ot string) (string, error) { + return ot, nil +} + +// mapMetrics maps OTEL_METRICS_EXPORTER to DD_RUNTIME_METRICS_ENABLED +func mapMetrics(ot string) (string, error) { + ot = strings.TrimSpace(strings.ToLower(ot)) + if ot == "none" { + return "false", nil + } + return "", fmt.Errorf("the following configuration is not supported: OTEL_METRICS_EXPORTER=%v", ot) +} + +// mapLogLevel maps OTEL_LOG_LEVEL to DD_TRACE_DEBUG +func mapLogLevel(ot string) (string, error) { + if strings.TrimSpace(strings.ToLower(ot)) == "debug" { + return "true", nil + } + return "", fmt.Errorf("the following configuration is not supported: OTEL_LOG_LEVEL=%v", ot) +} + +// mapEnabled maps OTEL_TRACES_EXPORTER to DD_TRACE_ENABLED +func mapEnabled(ot string) (string, error) { + if strings.TrimSpace(strings.ToLower(ot)) == "none" { + return "false", nil + } + return "", fmt.Errorf("the following configuration is not supported: OTEL_METRICS_EXPORTER=%v", ot) +} + +// mapSampleRate maps OTEL_TRACES_SAMPLER to DD_TRACE_SAMPLE_RATE +func otelTraceIDRatio() string { + if v := env.Get("OTEL_TRACES_SAMPLER_ARG"); v != "" { + return v + } + return "1.0" +} + +// mapSampleRate maps OTEL_TRACES_SAMPLER to DD_TRACE_SAMPLE_RATE +func mapSampleRate(ot string) (string, error) { + ot = strings.TrimSpace(strings.ToLower(ot)) + if v, ok := unsupportedSamplerMapping[ot]; ok { + log.Warn("The following configuration is not supported: OTEL_TRACES_SAMPLER=%s. %s will be used", ot, v) + ot = v + } + + var samplerMapping = map[string]string{ + "parentbased_always_on": "1.0", + "parentbased_always_off": "0.0", + "parentbased_traceidratio": otelTraceIDRatio(), + } + if v, ok := samplerMapping[ot]; ok { + return v, nil + } + return "", fmt.Errorf("unknown sampling configuration %v", ot) +} + +// mapPropagationStyle maps OTEL_PROPAGATORS to DD_TRACE_PROPAGATION_STYLE +func mapPropagationStyle(ot string) (string, error) { + ot = strings.TrimSpace(strings.ToLower(ot)) + supportedStyles := make([]string, 0) + for _, otStyle := range strings.Split(ot, ",") { + otStyle = strings.TrimSpace(otStyle) + if _, ok := propagationMapping[otStyle]; ok { + supportedStyles = append(supportedStyles, propagationMapping[otStyle]) + } else { + log.Warn("Invalid configuration: %q is not supported. This propagation style will be ignored.", otStyle) + } + } + return strings.Join(supportedStyles, ","), nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/payload.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/payload.go new file mode 100644 index 00000000..3cb58ef1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/payload.go @@ -0,0 +1,321 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "bytes" + "encoding/binary" + "io" + "sync" + "sync/atomic" + + "github.com/tinylib/msgp/msgp" +) + +// payloadStats contains the statistics of a payload. +type payloadStats struct { + size int // size in bytes + itemCount int // number of items (traces) +} + +// payloadWriter defines the interface for writing data to a payload. +type payloadWriter interface { + io.Writer + + push(t spanList) (stats payloadStats, err error) + grow(n int) + reset() + clear() + + // recordItem records that an item was added and updates the header + recordItem() +} + +// payloadReader defines the interface for reading data from a payload. +type payloadReader interface { + io.Reader + io.Closer + + stats() payloadStats + size() int + itemCount() int + protocol() float64 +} + +// payload combines both reading and writing operations for a payload. +type payload interface { + payloadWriter + payloadReader +} + +// unsafePayload is a wrapper on top of the msgpack encoder which allows constructing an +// encoded array by pushing its entries sequentially, one at a time. It basically +// allows us to encode as we would with a stream, except that the contents of the stream +// can be read as a slice by the msgpack decoder at any time. It follows the guidelines +// from the msgpack array spec: +// https://github.com/msgpack/msgpack/blob/master/spec.md#array-format-family +// +// unsafePayload implements io.Reader and can be used with the decoder directly. +// +// unsafePayload is not safe for concurrent use. +// +// unsafePayload is meant to be used only once and eventually dismissed with the +// single exception of retrying failed flush attempts. +// +// ⚠️ Warning! +// +// The payload should not be reused for multiple sets of traces. Resetting the +// payload for re-use requires the transport to wait for the HTTP package to +// Close the request body before attempting to re-use it again! This requires +// additional logic to be in place. See: +// +// • https://github.com/golang/go/blob/go1.16/src/net/http/client.go#L136-L138 +// • https://github.com/DataDog/dd-trace-go/pull/475 +// • https://github.com/DataDog/dd-trace-go/pull/549 +// • https://github.com/DataDog/dd-trace-go/pull/976 +type unsafePayload struct { + // header specifies the first few bytes in the msgpack stream + // indicating the type of array (fixarray, array16 or array32) + // and the number of items contained in the stream. + header []byte + + // off specifies the current read position on the header. + off int + + // count specifies the number of items in the stream. + count uint32 + + // buf holds the sequence of msgpack-encoded items. + buf bytes.Buffer + + // reader is used for reading the contents of buf. + reader *bytes.Reader + + // protocolVersion specifies the trace protocolVersion to use. + protocolVersion float64 +} + +var _ io.Reader = (*unsafePayload)(nil) + +// newUnsafePayload returns a ready to use unsafe payload. +func newUnsafePayload(protocol float64) *unsafePayload { + p := &unsafePayload{ + header: make([]byte, 8), + off: 8, + protocolVersion: protocol, + } + return p +} + +// push pushes a new item into the stream. +func (p *unsafePayload) push(t []*Span) (stats payloadStats, err error) { + sl := spanList(t) + p.buf.Grow(sl.Msgsize()) + if err := msgp.Encode(&p.buf, sl); err != nil { + return payloadStats{}, err + } + p.recordItem() + return p.stats(), nil +} + +// itemCount returns the number of items available in the stream. +func (p *unsafePayload) itemCount() int { + return int(atomic.LoadUint32(&p.count)) +} + +// size returns the payload size in bytes. After the first read the value becomes +// inaccurate by up to 8 bytes. +func (p *unsafePayload) size() int { + return p.buf.Len() + len(p.header) - p.off +} + +// reset sets up the payload to be read a second time. It maintains the +// underlying byte contents of the buffer. reset should not be used in order to +// reuse the payload for another set of traces. +func (p *unsafePayload) reset() { + p.updateHeader() + if p.reader != nil { + p.reader.Seek(0, 0) + } +} + +// clear empties the payload buffers. +func (p *unsafePayload) clear() { + p.buf = bytes.Buffer{} + p.reader = nil +} + +// https://github.com/msgpack/msgpack/blob/master/spec.md#array-format-family +const ( + msgpackArrayFix byte = 144 // up to 15 items + msgpackArray16 byte = 0xdc // up to 2^16-1 items, followed by size in 2 bytes + msgpackArray32 byte = 0xdd // up to 2^32-1 items, followed by size in 4 bytes +) + +// updateHeader updates the payload header based on the number of items currently +// present in the stream. +func (p *unsafePayload) updateHeader() { + n := uint64(atomic.LoadUint32(&p.count)) + switch { + case n <= 15: + p.header[7] = msgpackArrayFix + byte(n) + p.off = 7 + case n <= 1<<16-1: + binary.BigEndian.PutUint64(p.header, n) // writes 2 bytes + p.header[5] = msgpackArray16 + p.off = 5 + default: // n <= 1<<32-1 + binary.BigEndian.PutUint64(p.header, n) // writes 4 bytes + p.header[3] = msgpackArray32 + p.off = 3 + } +} + +// Close implements io.Closer +func (p *unsafePayload) Close() error { + return nil +} + +// Read implements io.Reader. It reads from the msgpack-encoded stream. +func (p *unsafePayload) Read(b []byte) (n int, err error) { + if p.off < len(p.header) { + // reading header + n = copy(b, p.header[p.off:]) + p.off += n + return n, nil + } + if p.reader == nil { + p.reader = bytes.NewReader(p.buf.Bytes()) + } + return p.reader.Read(b) +} + +// Write implements io.Writer. It writes data directly to the buffer. +func (p *unsafePayload) Write(data []byte) (n int, err error) { + return p.buf.Write(data) +} + +// grow grows the buffer to ensure it can accommodate n more bytes. +func (p *unsafePayload) grow(n int) { + p.buf.Grow(n) +} + +// recordItem records that an item was added and updates the header. +func (p *unsafePayload) recordItem() { + atomic.AddUint32(&p.count, 1) + p.updateHeader() +} + +// stats returns the current stats of the payload. +func (p *unsafePayload) stats() payloadStats { + return payloadStats{ + size: p.size(), + itemCount: int(atomic.LoadUint32(&p.count)), + } +} + +// protocol returns the protocol version of the payload. +func (p *unsafePayload) protocol() float64 { + return p.protocolVersion +} + +var _ io.Reader = (*safePayload)(nil) + +// newPayload returns a ready to use thread-safe payload. +func newPayload(protocol float64) payload { + return &safePayload{ + p: newUnsafePayload(protocol), + } +} + +// safePayload provides a thread-safe wrapper around unsafePayload. +type safePayload struct { + mu sync.RWMutex + p *unsafePayload +} + +// push pushes a new item into the stream in a thread-safe manner. +func (sp *safePayload) push(t spanList) (stats payloadStats, err error) { + sp.mu.Lock() + defer sp.mu.Unlock() + return sp.p.push(t) +} + +// itemCount returns the number of items available in the stream in a thread-safe manner. +func (sp *safePayload) itemCount() int { + // Use direct atomic access for better performance - no mutex needed + return int(atomic.LoadUint32(&sp.p.count)) +} + +// size returns the payload size in bytes in a thread-safe manner. +func (sp *safePayload) size() int { + sp.mu.RLock() + defer sp.mu.RUnlock() + return sp.p.size() +} + +// reset sets up the payload to be read a second time in a thread-safe manner. +func (sp *safePayload) reset() { + sp.mu.Lock() + defer sp.mu.Unlock() + sp.p.reset() +} + +// clear empties the payload buffers in a thread-safe manner. +func (sp *safePayload) clear() { + sp.mu.Lock() + defer sp.mu.Unlock() + sp.p.clear() +} + +// Read implements io.Reader in a thread-safe manner. +func (sp *safePayload) Read(b []byte) (n int, err error) { + // Note: Read modifies internal state (off, reader), so we need full lock + sp.mu.Lock() + defer sp.mu.Unlock() + return sp.p.Read(b) +} + +// Close implements io.Closer in a thread-safe manner. +func (sp *safePayload) Close() error { + sp.mu.Lock() + defer sp.mu.Unlock() + return sp.p.Close() +} + +// Write implements io.Writer in a thread-safe manner. +func (sp *safePayload) Write(data []byte) (n int, err error) { + sp.mu.Lock() + defer sp.mu.Unlock() + return sp.p.Write(data) +} + +// grow grows the buffer to ensure it can accommodate n more bytes in a thread-safe manner. +func (sp *safePayload) grow(n int) { + sp.mu.Lock() + defer sp.mu.Unlock() + sp.p.grow(n) +} + +// recordItem records that an item was added and updates the header in a thread-safe manner. +func (sp *safePayload) recordItem() { + sp.mu.Lock() + defer sp.mu.Unlock() + sp.p.recordItem() +} + +// stats returns the current stats of the payload in a thread-safe manner. +func (sp *safePayload) stats() payloadStats { + sp.mu.RLock() + defer sp.mu.RUnlock() + return sp.p.stats() +} + +// protocol returns the protocol version of the payload in a thread-safe manner. +func (sp *safePayload) protocol() float64 { + // Protocol is immutable after creation - no lock needed + return sp.p.protocol() +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/propagating_tags.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/propagating_tags.go new file mode 100644 index 00000000..e38b191b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/propagating_tags.go @@ -0,0 +1,94 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +func (t *trace) hasPropagatingTag(k string) bool { + t.mu.RLock() + defer t.mu.RUnlock() + _, ok := t.propagatingTags[k] + return ok +} + +func (t *trace) propagatingTag(k string) string { + t.mu.RLock() + defer t.mu.RUnlock() + return t.propagatingTags[k] +} + +// setPropagatingTag sets the key/value pair as a trace propagating tag. +func (t *trace) setPropagatingTag(key, value string) { + t.mu.Lock() + defer t.mu.Unlock() + t.setPropagatingTagLocked(key, value) +} + +func (t *trace) setTraceSourcePropagatingTag(key string, value internal.TraceSource) { + t.mu.Lock() + defer t.mu.Unlock() + + // If there is already a TraceSource value set in the trace + // we need to add the new value to the bitmask. + if source := t.propagatingTags[key]; source != "" { + tSource, err := internal.ParseTraceSource(source) + if err != nil { + log.Error("failed to parse trace source tag: %s", err.Error()) + } + + tSource |= value + + t.setPropagatingTagLocked(key, tSource.String()) + return + } + + t.setPropagatingTagLocked(key, value.String()) +} + +// setPropagatingTagLocked sets the key/value pair as a trace propagating tag. +// Not safe for concurrent use, setPropagatingTag should be used instead in that case. +func (t *trace) setPropagatingTagLocked(key, value string) { + if t.propagatingTags == nil { + t.propagatingTags = make(map[string]string, 1) + } + t.propagatingTags[key] = value +} + +// unsetPropagatingTag deletes the key/value pair from the trace's propagated tags. +func (t *trace) unsetPropagatingTag(key string) { + t.mu.Lock() + defer t.mu.Unlock() + delete(t.propagatingTags, key) +} + +// iteratePropagatingTags allows safe iteration through the propagating tags of a trace. +// the trace must not be modified during this call, as it is locked for reading. +// +// f should return whether the iteration should continue. +func (t *trace) iteratePropagatingTags(f func(k, v string) bool) { + t.mu.RLock() + defer t.mu.RUnlock() + for k, v := range t.propagatingTags { + if !f(k, v) { + break + } + } +} + +func (t *trace) replacePropagatingTags(tags map[string]string) { + t.mu.Lock() + defer t.mu.Unlock() + t.propagatingTags = tags +} + +func (t *trace) propagatingTagsLen() int { + t.mu.RLock() + defer t.mu.RUnlock() + return len(t.propagatingTags) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/propagator.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/propagator.go new file mode 100644 index 00000000..4a0bfe70 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/propagator.go @@ -0,0 +1,55 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "errors" +) + +// Propagator implementations should be able to inject and extract +// SpanContexts into an implementation specific carrier. +type Propagator interface { + // Inject takes the SpanContext and injects it into the carrier. + Inject(context *SpanContext, carrier interface{}) error + + // Extract returns the SpanContext from the given carrier. + Extract(carrier interface{}) (*SpanContext, error) +} + +// TextMapWriter allows setting key/value pairs of strings on the underlying +// data structure. Carriers implementing TextMapWriter are compatible to be +// used with Datadog's TextMapPropagator. +type TextMapWriter interface { + // Set sets the given key/value pair. + Set(key, val string) +} + +// TextMapReader allows iterating over sets of key/value pairs. Carriers implementing +// TextMapReader are compatible to be used with Datadog's TextMapPropagator. +type TextMapReader interface { + // ForeachKey iterates over all keys that exist in the underlying + // carrier. It takes a callback function which will be called + // using all key/value pairs as arguments. ForeachKey will return + // the first error returned by the handler. + ForeachKey(handler func(key, val string) error) error +} + +var ( + // ErrInvalidCarrier is returned when the carrier provided to the propagator + // does not implement the correct interfaces. + ErrInvalidCarrier = errors.New("invalid carrier") + + // ErrInvalidSpanContext is returned when the span context found in the + // carrier is not of the expected type. + ErrInvalidSpanContext = errors.New("invalid span context") + + // ErrSpanContextCorrupted is returned when there was a problem parsing + // the information found in the carrier. + ErrSpanContextCorrupted = errors.New("span context corrupted") + + // ErrSpanContextNotFound represents missing information in the given carrier. + ErrSpanContextNotFound = errors.New("span context not found") +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/rand.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/rand.go new file mode 100644 index 00000000..75225b1a --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/rand.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "math" + "math/rand/v2" +) + +func randUint64() uint64 { + return rand.Uint64() +} + +func generateSpanID(_ int64) uint64 { + return rand.Uint64() & math.MaxInt64 +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/remote_config.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/remote_config.go new file mode 100644 index 00000000..2a14dadb --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/remote_config.go @@ -0,0 +1,427 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "regexp" + "strings" + "sync" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/remoteconfig" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + + "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" +) + +type configData struct { + Action string `json:"action"` + ServiceTarget target `json:"service_target"` + LibConfig libConfig `json:"lib_config"` +} + +type target struct { + Service string `json:"service"` + Env string `json:"env"` +} + +type libConfig struct { + Enabled *bool `json:"tracing_enabled,omitempty"` + SamplingRate *float64 `json:"tracing_sampling_rate,omitempty"` + TraceSamplingRules *[]rcSamplingRule `json:"tracing_sampling_rules,omitempty"` + HeaderTags *headerTags `json:"tracing_header_tags,omitempty"` + Tags *tags `json:"tracing_tags,omitempty"` +} + +type rcTag struct { + Key string `json:"key"` + ValueGlob string `json:"value_glob"` +} + +// Sampling rules provided by the remote config define tags differently other than using a map. +type rcSamplingRule struct { + Service string `json:"service"` + Provenance provenance `json:"provenance"` + Name string `json:"name,omitempty"` + Resource string `json:"resource"` + Tags []rcTag `json:"tags,omitempty"` + SampleRate float64 `json:"sample_rate"` +} + +func convertRemoteSamplingRules(rules *[]rcSamplingRule) *[]SamplingRule { + if rules == nil { + return nil + } + var convertedRules []SamplingRule + for _, rule := range *rules { + if rule.Tags != nil { + tags := make(map[string]*regexp.Regexp, len(rule.Tags)) + tagsStrs := make(map[string]string, len(rule.Tags)) + for _, tag := range rule.Tags { + tags[tag.Key] = globMatch(tag.ValueGlob) + tagsStrs[tag.Key] = tag.ValueGlob + } + x := SamplingRule{ + Service: globMatch(rule.Service), + Name: globMatch(rule.Name), + Resource: globMatch(rule.Resource), + Rate: rule.SampleRate, + Tags: tags, + Provenance: rule.Provenance, + globRule: &jsonRule{ + Name: rule.Name, + Service: rule.Service, + Resource: rule.Resource, + Tags: tagsStrs, + }, + } + + convertedRules = append(convertedRules, x) + } else { + x := SamplingRule{ + Service: globMatch(rule.Service), + Name: globMatch(rule.Name), + Resource: globMatch(rule.Resource), + Rate: rule.SampleRate, + Provenance: rule.Provenance, + globRule: &jsonRule{Name: rule.Name, Service: rule.Service, Resource: rule.Resource}, + } + convertedRules = append(convertedRules, x) + } + } + return &convertedRules +} + +type headerTags []headerTag + +type headerTag struct { + Header string `json:"header"` + TagName string `json:"tag_name"` +} + +func (hts *headerTags) toSlice() *[]string { + if hts == nil { + return nil + } + s := make([]string, len(*hts)) + for i, ht := range *hts { + s[i] = ht.toString() + } + return &s +} + +func (ht headerTag) toString() string { + var sb strings.Builder + sb.WriteString(ht.Header) + sb.WriteString(":") + sb.WriteString(ht.TagName) + return sb.String() +} + +type tags []string + +func (t *tags) toMap() *map[string]interface{} { + if t == nil { + return nil + } + m := make(map[string]interface{}, len(*t)) + for _, tag := range *t { + if kv := strings.SplitN(tag, ":", 2); len(kv) == 2 { + m[kv[0]] = kv[1] + } + } + return &m +} + +// onRemoteConfigUpdate is a remote config callaback responsible for processing APM_TRACING RC-product updates. +func (t *tracer) onRemoteConfigUpdate(u remoteconfig.ProductUpdate) map[string]state.ApplyStatus { + statuses := map[string]state.ApplyStatus{} + if len(u) == 0 { + return statuses + } + removed := func() bool { + // Returns true if all the values in the update are nil. + for _, raw := range u { + if raw != nil { + return false + } + } + return true + } + var telemConfigs []telemetry.Configuration + if removed() { + // The remote-config client is signaling that the configuration has been deleted for this product. + // We re-apply the startup configuration values. + for path := range u { + log.Debug("Nil payload from RC. Path: %s.", path) + statuses[path] = state.ApplyStatus{State: state.ApplyStateAcknowledged} + } + log.Debug("Resetting configurations") + updated := t.config.traceSampleRate.reset() + if updated { + telemConfigs = append(telemConfigs, t.config.traceSampleRate.toTelemetry()) + } + updated = t.config.traceSampleRules.reset() + if updated { + telemConfigs = append(telemConfigs, t.config.traceSampleRules.toTelemetry()) + } + updated = t.config.headerAsTags.reset() + if updated { + telemConfigs = append(telemConfigs, t.config.headerAsTags.toTelemetry()) + } + updated = t.config.globalTags.reset() + if updated { + telemConfigs = append(telemConfigs, t.config.globalTags.toTelemetry()) + } + if !t.config.enabled.current { + log.Debug("APM Tracing is disabled. Restart the service to enable it.") + } + if len(telemConfigs) > 0 { + log.Debug("Reporting %d configuration changes to telemetry", len(telemConfigs)) + telemetry.RegisterAppConfigs(telemConfigs...) + } + return statuses + } + for path, raw := range u { + if raw == nil { + continue + } + log.Debug("Processing config from RC. Path: %s. Raw: %s", path, raw) + var c configData + if err := json.Unmarshal(raw, &c); err != nil { + log.Debug("Error while unmarshalling payload for %q: %v. Configuration won't be applied.", path, err.Error()) + statuses[path] = state.ApplyStatus{State: state.ApplyStateError, Error: err.Error()} + continue + } + statuses[path] = state.ApplyStatus{State: state.ApplyStateAcknowledged} + updated := t.config.traceSampleRate.handleRC(c.LibConfig.SamplingRate) + if updated { + telemConfigs = append(telemConfigs, t.config.traceSampleRate.toTelemetry()) + } + updated = t.config.traceSampleRules.handleRC(convertRemoteSamplingRules(c.LibConfig.TraceSamplingRules)) + if updated { + telemConfigs = append(telemConfigs, t.config.traceSampleRules.toTelemetry()) + } + updated = t.config.headerAsTags.handleRC(c.LibConfig.HeaderTags.toSlice()) + if updated { + telemConfigs = append(telemConfigs, t.config.headerAsTags.toTelemetry()) + } + updated = t.config.globalTags.handleRC(c.LibConfig.Tags.toMap()) + if updated { + telemConfigs = append(telemConfigs, t.config.globalTags.toTelemetry()) + } + if c.LibConfig.Enabled != nil { + if t.config.enabled.current && !*c.LibConfig.Enabled { + log.Debug("Disabled APM Tracing through RC. Restart the service to enable it.") + t.config.enabled.handleRC(c.LibConfig.Enabled) + telemConfigs = append(telemConfigs, t.config.enabled.toTelemetry()) + } else if !t.config.enabled.current && *c.LibConfig.Enabled { + log.Debug("APM Tracing is disabled. Restart the service to enable it.") + } + } + } + if len(telemConfigs) > 0 { + log.Debug("Reporting %d configuration changes to telemetry", len(telemConfigs)) + telemetry.RegisterAppConfigs(telemConfigs...) + } + return statuses +} + +type dynamicInstrumentationRCProbeConfig struct { + configPath string + configContent string +} + +type dynamicInstrumentationRCState struct { + sync.Mutex + state map[string]dynamicInstrumentationRCProbeConfig + + // symdbExport is a flag that indicates that this tracer is resposible + // for uploading symbols to the symbol database. The tracer will learn + // about this fact through the callbacks like the other dynamic + // instrumentation RC callbacks. + // + // The system is designed such that only a single tracer at a time is + // responsible for uploading symbols to the symbol database. This is + // communicated through a single RC key with a constant value. In order to + // simplify the internal state of the tracer an avoid risks of excess memory + // usage, we use a single boolean flag to track this state as opposed to + // tracking the actual RC key and value. + symdbExport bool +} + +var ( + diRCState dynamicInstrumentationRCState + initalizeRC sync.Once +) + +func (t *tracer) dynamicInstrumentationRCUpdate(u remoteconfig.ProductUpdate) map[string]state.ApplyStatus { + applyStatus := make(map[string]state.ApplyStatus, len(u)) + + diRCState.Lock() + defer diRCState.Unlock() + for k, v := range u { + log.Debug("Received dynamic instrumentation RC configuration for %s\n", k) + if len(v) == 0 { + delete(diRCState.state, k) + applyStatus[k] = state.ApplyStatus{State: state.ApplyStateAcknowledged} + } else { + diRCState.state[k] = dynamicInstrumentationRCProbeConfig{ + configPath: k, + configContent: string(v), + } + applyStatus[k] = state.ApplyStatus{State: state.ApplyStateUnknown} + } + } + return applyStatus +} + +func (t *tracer) dynamicInstrumentationSymDBRCUpdate( + u remoteconfig.ProductUpdate, +) map[string]state.ApplyStatus { + applyStatus := make(map[string]state.ApplyStatus, len(u)) + diRCState.Lock() + defer diRCState.Unlock() + symDBEnabled := false + for k, v := range u { + if len(v) == 0 { + applyStatus[k] = state.ApplyStatus{State: state.ApplyStateAcknowledged} + } else { + applyStatus[k] = state.ApplyStatus{State: state.ApplyStateUnknown} + symDBEnabled = true + } + } + diRCState.symdbExport = symDBEnabled + return applyStatus +} + +// passProbeConfiguration is used as a stable interface to find the +// configuration in via bpf. Go-DI attaches a bpf program to this function and +// extracts the raw bytes accordingly. +// +//nolint:all +//go:noinline +func passProbeConfiguration(runtimeID, configPath, configContent string) {} + +// passAllProbeConfigurationsComplete is used to signal to the bpf program that +// all probe configurations have been passed. +// +//nolint:all +//go:noinline +func passAllProbeConfigurationsComplete(runtimeID string) {} + +// passSymDBState is used as a stable interface to find the symbol database +// state via bpf. Go-DI attaches a bpf program to this function and extracts +// the arguments accordingly. +// +//nolint:all +//go:noinline +func passSymDBState(runtimeID string, enabled bool) {} + +// passAllProbeConfigurations is used to pass all probe configurations to the +// bpf program. +// +//go:noinline +func passAllProbeConfigurations(runtimeID string) { + defer passAllProbeConfigurationsComplete(runtimeID) + diRCState.Lock() + defer diRCState.Unlock() + for _, v := range diRCState.state { + accessStringsToMitigatePageFault(runtimeID, v.configPath, v.configContent) + passProbeConfiguration(runtimeID, v.configPath, v.configContent) + } + passSymDBState(runtimeID, diRCState.symdbExport) +} + +func initalizeDynamicInstrumentationRemoteConfigState() { + diRCState = dynamicInstrumentationRCState{ + state: map[string]dynamicInstrumentationRCProbeConfig{}, + } + + go func() { + for { + time.Sleep(time.Second * 5) + passAllProbeConfigurations(globalconfig.RuntimeID()) + } + }() +} + +// accessStringsToMitigatePageFault iterates over each string to trigger a page fault, +// ensuring it is loaded into RAM or listed in the translation lookaside buffer. +// This is done by writing the string to io.Discard. +// +// This function addresses an issue with the bpf program that hooks the +// `passProbeConfiguration()` function from system-probe. The bpf program fails +// to read strings if a page fault occurs because the `bpf_probe_read()` helper +// disables paging (uprobe bpf programs can't sleep). Consequently, page faults +// cause `bpf_probe_read()` to return an error and not read any data. +// By preloading the strings, we mitigate this issue, enhancing the reliability +// of the Go Dynamic Instrumentation product. +func accessStringsToMitigatePageFault(strs ...string) { + for i := range strs { + io.WriteString(io.Discard, strs[i]) + } +} + +// startRemoteConfig starts the remote config client. It registers the +// APM_TRACING product unconditionally and it registers the LIVE_DEBUGGING and +// LIVE_DEBUGGING_SYMBOL_DB with their respective callbacks if the tracer is +// configured to use the dynamic instrumentation product. +func (t *tracer) startRemoteConfig(rcConfig remoteconfig.ClientConfig) error { + err := remoteconfig.Start(rcConfig) + if err != nil { + return err + } + + var dynamicInstrumentationError, apmTracingError error + + if t.config.dynamicInstrumentationEnabled { + liveDebuggingError := remoteconfig.Subscribe( + "LIVE_DEBUGGING", t.dynamicInstrumentationRCUpdate, + ) + liveDebuggingSymDBError := remoteconfig.Subscribe( + "LIVE_DEBUGGING_SYMBOL_DB", t.dynamicInstrumentationSymDBRCUpdate, + ) + if liveDebuggingError != nil && liveDebuggingSymDBError != nil { + dynamicInstrumentationError = errors.Join( + liveDebuggingError, + liveDebuggingSymDBError, + ) + } else if liveDebuggingError != nil { + dynamicInstrumentationError = liveDebuggingError + } else if liveDebuggingSymDBError != nil { + dynamicInstrumentationError = liveDebuggingSymDBError + } + } + + initalizeRC.Do(initalizeDynamicInstrumentationRemoteConfigState) + + apmTracingError = remoteconfig.Subscribe( + state.ProductAPMTracing, + t.onRemoteConfigUpdate, + remoteconfig.APMTracingSampleRate, + remoteconfig.APMTracingHTTPHeaderTags, + remoteconfig.APMTracingCustomTags, + remoteconfig.APMTracingEnabled, + remoteconfig.APMTracingSampleRules, + ) + + if apmTracingError != nil || dynamicInstrumentationError != nil { + return fmt.Errorf( + "could not subscribe to at least one remote config product: %w; %w", + apmTracingError, + dynamicInstrumentationError, + ) + } + + return nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/rules_sampler.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/rules_sampler.go new file mode 100644 index 00000000..0d974131 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/rules_sampler.go @@ -0,0 +1,875 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "encoding/json" + "fmt" + "math" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/time/rate" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" +) + +// rulesSampler holds instances of trace sampler and single span sampler, that are configured with the given set of rules. +type rulesSampler struct { + // traceRulesSampler samples trace spans based on a user-defined set of rules and might impact sampling decision of the trace. + traces *traceRulesSampler + + // singleSpanRulesSampler samples individual spans based on a separate user-defined set of rules and + // cannot impact the trace sampling decision. + spans *singleSpanRulesSampler +} + +// newRulesSampler configures a *rulesSampler instance using the given set of rules. +// Rules are split between trace and single span sampling rules according to their type. +// Such rules are user-defined through environment variable or WithSamplingRules option. +// Invalid rules or environment variable values are tolerated, by logging warnings and then ignoring them. +func newRulesSampler(traceRules, spanRules []SamplingRule, traceSampleRate, rateLimitPerSecond float64) *rulesSampler { + return &rulesSampler{ + traces: newTraceRulesSampler(traceRules, traceSampleRate, rateLimitPerSecond), + spans: newSingleSpanRulesSampler(spanRules), + } +} + +func (r *rulesSampler) SampleTrace(s *Span) bool { + if s == nil { + return false + } + return r.traces.sampleRules(s) +} + +func (r *rulesSampler) SampleTraceGlobalRate(s *Span) bool { + if s == nil { + return false + } + return r.traces.sampleGlobalRate(s) +} + +func (r *rulesSampler) SampleSpan(s *Span) bool { + if s == nil { + return false + } + return r.spans.apply(s) +} + +func (r *rulesSampler) HasSpanRules() bool { return r.spans.enabled() } + +func (r *rulesSampler) TraceRateLimit() (float64, bool) { return r.traces.limit() } + +type provenance int32 + +const ( + Local provenance = iota + Customer provenance = 1 + Dynamic provenance = 2 +) + +var provenances = []provenance{Local, Customer, Dynamic} + +func (p provenance) String() string { + switch p { + case Local: + return "local" + case Customer: + return "customer" + case Dynamic: + return "dynamic" + default: + return "" + } +} + +func (p provenance) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} + +func (p *provenance) UnmarshalJSON(data []byte) error { + var prov string + var err error + if err = json.Unmarshal(data, &prov); err != nil { + return err + } + if *p, err = parseProvenance(prov); err != nil { + return err + } + return nil +} + +func parseProvenance(p string) (provenance, error) { + for _, v := range provenances { + if strings.EqualFold(strings.TrimSpace(strings.ToLower(p)), v.String()) { + return v, nil + } + } + return Customer, fmt.Errorf("invalid provenance: \"%v\"", p) +} + +// SamplingRule is used for applying sampling rates to spans that match +// the service name, operation name or both. +// For basic usage, consider using the helper functions ServiceRule, NameRule, etc. +type SamplingRule struct { + // Service specifies the regex pattern that a span service name must match. + Service *regexp.Regexp + + // Name specifies the regex pattern that a span operation name must match. + Name *regexp.Regexp + + // Rate specifies the sampling rate that should be applied to spans that match + // service and/or name of the rule. + Rate float64 + + // MaxPerSecond specifies max number of spans per second that can be sampled per the rule. + // If not specified, the default is no limit. + MaxPerSecond float64 + + // Resource specifies the regex pattern that a span resource must match. + Resource *regexp.Regexp + + // Tags specifies the map of key-value patterns that span tags must match. + Tags map[string]*regexp.Regexp + + Provenance provenance + + ruleType SamplingRuleType + limiter *rateLimiter + + globRule *jsonRule +} + +// Poor-man's comparison of two regex for equality without resorting to fancy symbolic computation. +// The result is false negative: whenever the function returns true, we know the two regex must be +// equal. The reverse is not true. Two regex can be equivalent while reported as not. +// This is good for use as an indication of optimization that applies when two regex are equals. +func regexEqualsFalseNegative(a, b *regexp.Regexp) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return a.String() == b.String() +} + +func (sr *SamplingRule) EqualsFalseNegative(other *SamplingRule) bool { + if (sr == nil) != (other == nil) { + return false + } + if sr == nil { + return true + } + if sr.Rate != other.Rate || sr.ruleType != other.ruleType || + !regexEqualsFalseNegative(sr.Service, other.Service) || + !regexEqualsFalseNegative(sr.Name, other.Name) || + !regexEqualsFalseNegative(sr.Resource, other.Resource) || + len(sr.Tags) != len(other.Tags) { + return false + } + for k, v := range sr.Tags { + if vo, ok := other.Tags[k]; !ok || !regexEqualsFalseNegative(v, vo) { + return false + } + } + return true +} + +// match returns true when the span's details match all the expected values in the rule. +func (sr *SamplingRule) match(s *Span) bool { + if sr.Service != nil && !sr.Service.MatchString(s.service) { + return false + } + if sr.Name != nil && !sr.Name.MatchString(s.name) { + return false + } + if sr.Resource != nil && !sr.Resource.MatchString(s.resource) { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + if sr.Tags != nil { + for k, regex := range sr.Tags { + if regex == nil { + continue + } + if s.meta != nil { + v, ok := s.meta[k] + if ok && regex.MatchString(v) { + continue + } + } + if s.metrics != nil { + v, ok := s.metrics[k] + // sampling on numbers with floating point is not supported, + // thus 'math.Floor(v) != v' + if !ok || math.Floor(v) != v || !regex.MatchString(strconv.FormatFloat(v, 'g', -1, 64)) { + return false + } + } + } + } + return true +} + +// SamplingRuleType represents a type of sampling rule spans are matched against. +type SamplingRuleType int + +const ( + SamplingRuleUndefined SamplingRuleType = 0 + + // SamplingRuleTrace specifies a sampling rule that applies to the entire trace if any spans satisfy the criteria. + // If a sampling rule is of type SamplingRuleTrace, such rule determines the sampling rate to apply + // to trace spans. If a span matches that rule, it will impact the trace sampling decision. + SamplingRuleTrace = iota + + // SamplingRuleSpan specifies a sampling rule that applies to a single span without affecting the entire trace. + // If a sampling rule is of type SamplingRuleSingleSpan, such rule determines the sampling rate to apply + // to individual spans. If a span matches a rule, it will NOT impact the trace sampling decision. + // In the case that a trace is dropped and thus not sent to the Agent, spans kept on account + // of matching SamplingRuleSingleSpan rules must be conveyed separately. + SamplingRuleSpan +) + +func (sr SamplingRuleType) String() string { + switch sr { + case SamplingRuleTrace: + return "trace" + case SamplingRuleSpan: + return "span" + default: + return "" + } +} + +// Rule is used to create a sampling rule. +type Rule struct { + ServiceGlob string + NameGlob string + ResourceGlob string + Tags map[string]string // map of string to glob pattern + Rate float64 + MaxPerSecond float64 +} + +// TraceSamplingRules creates a sampling rule that applies to the entire trace if any spans satisfy the criteria. +func TraceSamplingRules(rules ...Rule) []SamplingRule { + var samplingRules []SamplingRule + var typ SamplingRuleType = SamplingRuleTrace + for _, r := range rules { + sr := SamplingRule{ + Service: globMatch(r.ServiceGlob), + Name: globMatch(r.NameGlob), + Resource: globMatch(r.ResourceGlob), + Rate: r.Rate, + ruleType: SamplingRuleTrace, + globRule: &jsonRule{ + Service: r.ServiceGlob, + Name: r.NameGlob, + Rate: json.Number(strconv.FormatFloat(r.Rate, 'f', -1, 64)), + MaxPerSecond: r.MaxPerSecond, + Resource: r.ResourceGlob, + Tags: r.Tags, + Type: &typ, + }, + } + if len(r.Tags) != 0 { + sr.Tags = make(map[string]*regexp.Regexp, len(r.Tags)) + for k, v := range r.Tags { + if g := globMatch(v); g != nil { + sr.Tags[k] = g + } + } + } + samplingRules = append(samplingRules, sr) + } + return samplingRules +} + +// SpanSamplingRules creates a sampling rule that applies to a single span without affecting the entire trace. +func SpanSamplingRules(rules ...Rule) []SamplingRule { + var samplingRules []SamplingRule + var typ SamplingRuleType = SamplingRuleSpan + for _, r := range rules { + sr := SamplingRule{ + Service: globMatch(r.ServiceGlob), + Name: globMatch(r.NameGlob), + Resource: globMatch(r.ResourceGlob), + Rate: r.Rate, + ruleType: SamplingRuleSpan, + MaxPerSecond: r.MaxPerSecond, + limiter: newSingleSpanRateLimiter(r.MaxPerSecond), + globRule: &jsonRule{ + Service: r.ServiceGlob, + Name: r.NameGlob, + Rate: json.Number(strconv.FormatFloat(r.Rate, 'f', -1, 64)), + MaxPerSecond: r.MaxPerSecond, + Resource: r.ResourceGlob, + Tags: r.Tags, + Type: &typ, + }, + } + if len(r.Tags) != 0 { + sr.Tags = make(map[string]*regexp.Regexp, len(r.Tags)) + for k, v := range r.Tags { + if g := globMatch(v); g != nil { + sr.Tags[k] = g + } + } + } + samplingRules = append(samplingRules, sr) + } + return samplingRules +} + +// traceRulesSampler allows a user-defined list of rules to apply to traces. +// These rules can match based on the span's Service, Name or both. +// When making a sampling decision, the rules are checked in order until +// a match is found. +// If a match is found, the rate from that rule is used. +// If no match is found, and the DD_TRACE_SAMPLE_RATE environment variable +// was set to a valid rate, that value is used. +// Otherwise, the rules sampler didn't apply to the span, and the decision +// is passed to the priority sampler. +// +// The rate is used to determine if the span should be sampled, but an upper +// limit can be defined using the DD_TRACE_RATE_LIMIT environment variable. +// Its value is the number of spans to sample per second. +// Spans that matched the rules but exceeded the rate limit are not sampled. +type traceRulesSampler struct { + m sync.RWMutex + rules []SamplingRule // the rules to match spans with + globalRate float64 // a rate to apply when no rules match a span + limiter *rateLimiter // used to limit the volume of spans sampled +} + +// newTraceRulesSampler configures a *traceRulesSampler instance using the given set of rules. +// Invalid rules or environment variable values are tolerated, by logging warnings and then ignoring them. +func newTraceRulesSampler(rules []SamplingRule, traceSampleRate, rateLimitPerSecond float64) *traceRulesSampler { + return &traceRulesSampler{ + rules: rules, + globalRate: traceSampleRate, + limiter: newRateLimiter(rateLimitPerSecond), + } +} + +func (rs *traceRulesSampler) enabled() bool { + rs.m.RLock() + defer rs.m.RUnlock() + return len(rs.rules) > 0 || !math.IsNaN(rs.globalRate) +} + +// EqualsFalseNegative tests whether two sets of the rules are the same. +// This returns result that can be false negative. If the result is true, then the two sets of rules +// are guaranteed to be the same. +// On the other hand, false can be returned while the two rulesets are logically the same. +// This function can be used to detect optimization opportunities when two rulesets are the same. +// For example, an update of one ruleset is not needed if it's the same as the previous one. +func EqualsFalseNegative(a, b []SamplingRule) bool { + if len(a) != len(b) { + return false + } + for i, r := range a { + if !r.EqualsFalseNegative(&b[i]) { + return false + } + } + return true +} + +// setGlobalSampleRate sets the global sample rate to the given value. +// Returns whether the value was changed or not. +func (rs *traceRulesSampler) setGlobalSampleRate(rate float64) bool { + if rate < 0.0 || rate > 1.0 { + log.Warn("Ignoring trace sample rate %f: value out of range [0,1]", rate) + return false + } + rs.m.Lock() + defer rs.m.Unlock() + if math.IsNaN(rs.globalRate) && math.IsNaN(rate) { + // NaN is not considered equal to any number, including itself. + // It should be compared with math.IsNaN + return false + } + if rs.globalRate == rate { + return false + } + rs.globalRate = rate + return true +} + +// Assumes the new rules are different from the old rules. +func (rs *traceRulesSampler) setTraceSampleRules(rules []SamplingRule) bool { + if EqualsFalseNegative(rs.rules, rules) { + return false + } + rs.rules = rules + return true +} + +// sampleGlobalRate applies the global trace sampling rate to the span. If the rate is Nan, +// the function return false, then it returns false and the span is not +// modified. +func (rs *traceRulesSampler) sampleGlobalRate(span *Span) bool { + if !rs.enabled() { + // short path when disabled + return false + } + + rs.m.RLock() + rate := rs.globalRate + rs.m.RUnlock() + + if math.IsNaN(rate) { + return false + } + + // global rate is a degenerated case of rule rate. + // Technically speaking, global rate also has two possible provenance: local or remote. + // We just apply the the sampler name corresponding to local rule rate because global rate is + // being deprecated in favor of sampling rules. + // Note that this just preserves an existing behavior even though it is not correct. + sampler := samplernames.RuleRate + rs.applyRate(span, rate, time.Now(), sampler) + return true +} + +// sampleRules uses the sampling rules to determine the sampling rate for the +// provided span. If the rules don't match, then it returns false and the span is not +// modified. +func (rs *traceRulesSampler) sampleRules(span *Span) bool { + if !rs.enabled() { + // short path when disabled + return false + } + + var matched bool + rs.m.RLock() + rate := rs.globalRate + rs.m.RUnlock() + sampler := samplernames.RuleRate + for _, rule := range rs.rules { + if rule.match(span) { + matched = true + rate = rule.Rate + if rule.Provenance == Customer { + sampler = samplernames.RemoteUserRule + } else if rule.Provenance == Dynamic { + sampler = samplernames.RemoteDynamicRule + } + break + } + } + if !matched { + // no matching rule or global rate, so we want to fall back + // to priority sampling + return false + } + + rs.applyRate(span, rate, time.Now(), sampler) + return true +} + +func (rs *traceRulesSampler) applyRate(span *Span, rate float64, now time.Time, sampler samplernames.SamplerName) { + span.mu.Lock() + defer span.mu.Unlock() + + // We don't lock spans when flushing, so we could have a data race when + // modifying a span as it's being flushed. This protects us against that + // race, since spans are marked `finished` before we flush them. + if span.finished { + return + } + + span.setMetric(keyRulesSamplerAppliedRate, rate) + delete(span.metrics, keySamplingPriorityRate) + if !sampledByRate(span.traceID, rate) { + span.setSamplingPriorityLocked(ext.PriorityUserReject, sampler) + return + } + + sampled, rate := rs.limiter.allowOne(now) + if sampled { + span.setSamplingPriorityLocked(ext.PriorityUserKeep, sampler) + } else { + span.setSamplingPriorityLocked(ext.PriorityUserReject, sampler) + } + span.setMetric(keyRulesSamplerLimiterRate, rate) +} + +// limit returns the rate limit set in the rules sampler, controlled by DD_TRACE_RATE_LIMIT, and +// true if rules sampling is enabled. If not present it returns math.NaN() and false. +func (rs *traceRulesSampler) limit() (float64, bool) { + if rs.enabled() { + return float64(rs.limiter.limiter.Limit()), true + } + return math.NaN(), false +} + +// newRateLimiter returns a rate limiter which restricts the number of traces sampled per second. +// The limit is DD_TRACE_RATE_LIMIT if set, `defaultRateLimit` otherwise. +func newRateLimiter(ratePerSecond float64) *rateLimiter { + return &rateLimiter{ + limiter: rate.NewLimiter(rate.Limit(ratePerSecond), int(math.Ceil(ratePerSecond))), + prevTime: time.Now(), + } +} + +// singleSpanRulesSampler allows a user-defined list of rules to apply to spans +// to sample single spans. +// These rules match based on the span's Service and Name. If empty value is supplied +// to either Service or Name field, it will default to "*", allow all. +// When making a sampling decision, the rules are checked in order until +// a match is found. +// If a match is found, the rate from that rule is used. +// If no match is found, no changes or further sampling is applied to the spans. +// The rate is used to determine if the span should be sampled, but an upper +// limit can be defined using the max_per_second field when supplying the rule. +// If max_per_second is absent in the rule, the default is allow all. +// Its value is the max number of spans to sample per second. +// Spans that matched the rules but exceeded the rate limit are not sampled. +type singleSpanRulesSampler struct { + rules []SamplingRule // the rules to match spans with +} + +// newSingleSpanRulesSampler configures a *singleSpanRulesSampler instance using the given set of rules. +// Invalid rules or environment variable values are tolerated, by logging warnings and then ignoring them. +func newSingleSpanRulesSampler(rules []SamplingRule) *singleSpanRulesSampler { + return &singleSpanRulesSampler{ + rules: rules, + } +} + +func (rs *singleSpanRulesSampler) enabled() bool { + return len(rs.rules) > 0 +} + +// apply uses the sampling rules to determine the sampling rate for the +// provided span. If the rules don't match, then it returns false and the span is not +// modified. +func (rs *singleSpanRulesSampler) apply(span *Span) bool { + for _, rule := range rs.rules { + if rule.match(span) { + rate := rule.Rate + if !sampledByRate(span.spanID, rate) { + return false + } + var sampled bool + if rule.limiter != nil { + sampled, rate = rule.limiter.allowOne(nowTime()) + if !sampled { + return false + } + } + delete(span.metrics, keySamplingPriorityRate) + span.setMetric(keySpanSamplingMechanism, float64(samplernames.SingleSpan)) + span.setMetric(keySingleSpanSamplingRuleRate, rate) + if rule.MaxPerSecond != 0 { + span.setMetric(keySingleSpanSamplingMPS, rule.MaxPerSecond) + } + return true + } + } + return false +} + +// rateLimiter is a wrapper on top of golang.org/x/time/rate which implements a rate limiter but also +// returns the effective rate of allowance. +type rateLimiter struct { + limiter *rate.Limiter + + mu sync.Mutex // guards below fields + prevTime time.Time // time at which prevAllowed and prevSeen were set + allowed float64 // number of spans allowed in the current period + seen float64 // number of spans seen in the current period + prevAllowed float64 // number of spans allowed in the previous period + prevSeen float64 // number of spans seen in the previous period +} + +// allowOne returns the rate limiter's decision to allow the span to be sampled, and the +// effective rate at the time it is called. The effective rate is computed by averaging the rate +// for the previous second with the current rate +func (r *rateLimiter) allowOne(now time.Time) (bool, float64) { + r.mu.Lock() + defer r.mu.Unlock() + if d := now.Sub(r.prevTime); d >= time.Second { + // enough time has passed to reset the counters + if d.Truncate(time.Second) == time.Second && r.seen > 0 { + // exactly one second, so update prev + r.prevAllowed = r.allowed + r.prevSeen = r.seen + } else { + // more than one second, so reset previous rate + r.prevAllowed = 0 + r.prevSeen = 0 + } + r.prevTime = now + r.allowed = 0 + r.seen = 0 + } + + r.seen++ + var sampled bool + if r.limiter.AllowN(now, 1) { + r.allowed++ + sampled = true + } + er := (r.prevAllowed + r.allowed) / (r.prevSeen + r.seen) + return sampled, er +} + +// newSingleSpanRateLimiter returns a rate limiter which restricts the number of single spans sampled per second. +// This defaults to infinite, allow all behaviour. The MaxPerSecond value of the rule may override the default. +func newSingleSpanRateLimiter(mps float64) *rateLimiter { + limit := math.MaxFloat64 + if mps > 0 { + limit = mps + } + return &rateLimiter{ + limiter: rate.NewLimiter(rate.Limit(limit), int(math.Ceil(limit))), + prevTime: time.Now(), + } +} + +// globMatch compiles pattern string into glob format, i.e. regular expressions with only '?' +// and '*' treated as regex metacharacters. +func globMatch(pattern string) *regexp.Regexp { + if pattern == "" || pattern == "*" { + return nil + } + // escaping regex characters + pattern = regexp.QuoteMeta(pattern) + // replacing '?' and '*' with regex characters + pattern = strings.Replace(pattern, "\\?", ".", -1) + pattern = strings.Replace(pattern, "\\*", ".*", -1) + // pattern must match an entire string + return regexp.MustCompile(fmt.Sprintf("(?i)^%s$", pattern)) +} + +// samplingRulesFromEnv parses sampling rules from +// the DD_TRACE_SAMPLING_RULES, DD_TRACE_SAMPLING_RULES_FILE +// DD_SPAN_SAMPLING_RULES and DD_SPAN_SAMPLING_RULES_FILE environment variables. +func samplingRulesFromEnv() (trace, span []SamplingRule, err error) { + var errs []string + defer func() { + if len(errs) != 0 { + err = fmt.Errorf("\n\t%s", strings.Join(errs, "\n\t")) + } + }() + + rulesByType := func(spanType SamplingRuleType) (rules []SamplingRule, errs []string) { + envKey := fmt.Sprintf("DD_%s_SAMPLING_RULES", strings.ToUpper(spanType.String())) + rulesEnv := env.Get(envKey) + rules, err := unmarshalSamplingRules([]byte(rulesEnv), spanType) + if err != nil { + errs = append(errs, err.Error()) + } + rulesFile := env.Get(envKey + "_FILE") + if len(rules) != 0 { + if rulesFile != "" { + log.Warn("DIAGNOSTICS Error(s): %s is available and will take precedence over %s_FILE", envKey, envKey) + } + return rules, errs + } + if rulesFile == "" { + return rules, errs + } + rulesFromEnvFile, err := os.ReadFile(rulesFile) + if err != nil { + errs = append(errs, fmt.Sprintf("Couldn't read file from %s_FILE: %v", envKey, err)) + } + rules, err = unmarshalSamplingRules(rulesFromEnvFile, spanType) + if err != nil { + errs = append(errs, err.Error()) + } + return rules, errs + } + + trace, tErrs := rulesByType(SamplingRuleTrace) + if len(tErrs) != 0 { + errs = append(errs, tErrs...) + } + span, sErrs := rulesByType(SamplingRuleSpan) + if len(sErrs) != 0 { + errs = append(errs, sErrs...) + } + return trace, span, err +} + +func (sr *SamplingRule) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + var v jsonRule + if err := json.Unmarshal(b, &v); err != nil { + return err + } + rules, err := validateRules([]jsonRule{v}, SamplingRuleUndefined) + if err != nil { + return err + } + *sr = rules[0] + return nil +} + +type jsonRule struct { + Service string `json:"service"` + Name string `json:"name"` + Rate json.Number `json:"sample_rate"` + MaxPerSecond float64 `json:"max_per_second"` + Resource string `json:"resource"` + Tags map[string]string `json:"tags"` + Type *SamplingRuleType `json:"type,omitempty"` + Provenance provenance `json:"provenance,omitempty"` +} + +func (j jsonRule) String() string { + var s []string + if j.Service != "" { + s = append(s, fmt.Sprintf("Service:%s", j.Service)) + } + if j.Name != "" { + s = append(s, fmt.Sprintf("Name:%s", j.Name)) + } + if j.Rate != "" { + s = append(s, fmt.Sprintf("Rate:%s", j.Rate)) + } + if j.MaxPerSecond != 0 { + s = append(s, fmt.Sprintf("MaxPerSecond:%f", j.MaxPerSecond)) + } + if j.Resource != "" { + s = append(s, fmt.Sprintf("Resource:%s", j.Resource)) + } + if len(j.Tags) != 0 { + s = append(s, fmt.Sprintf("Tags:%v", j.Tags)) + } + if j.Type != nil { + s = append(s, fmt.Sprintf("Type: %v", *j.Type)) + } + if j.Provenance != Local { + s = append(s, fmt.Sprintf("Provenance: %v", j.Provenance.String())) + } + return fmt.Sprintf("{%s}", strings.Join(s, " ")) +} + +// unmarshalSamplingRules unmarshals JSON from b and returns the sampling rules found, attributing +// the type t to them. If any errors are occurred, they are returned. +func unmarshalSamplingRules(b []byte, spanType SamplingRuleType) ([]SamplingRule, error) { + if len(b) == 0 { + return nil, nil + } + var jsonRules []jsonRule + // if the JSON is an array, unmarshal it as an array of rules + err := json.Unmarshal(b, &jsonRules) + if err != nil { + return nil, fmt.Errorf("error unmarshalling JSON: %s", err.Error()) + } + return validateRules(jsonRules, spanType) +} + +func validateRules(jsonRules []jsonRule, spanType SamplingRuleType) ([]SamplingRule, error) { + var errs []string + rules := make([]SamplingRule, 0, len(jsonRules)) + for i, v := range jsonRules { + if v.Rate == "" { + v.Rate = "1" + } + if v.Type != nil && *v.Type != spanType { + spanType = *v.Type + } + rate, err := v.Rate.Float64() + if err != nil { + errs = append(errs, fmt.Sprintf("at index %d: %v", i, err)) + continue + } + if rate < 0.0 || rate > 1.0 { + errs = append( + errs, + fmt.Sprintf("at index %d: ignoring rule %s: rate is out of [0.0, 1.0] range", i, v.String()), + ) + continue + } + tagGlobs := make(map[string]*regexp.Regexp, len(v.Tags)) + for k, g := range v.Tags { + tagGlobs[k] = globMatch(g) + } + rules = append(rules, SamplingRule{ + Service: globMatch(v.Service), + Name: globMatch(v.Name), + Rate: rate, + MaxPerSecond: v.MaxPerSecond, + Resource: globMatch(v.Resource), + Tags: tagGlobs, + Provenance: v.Provenance, + ruleType: spanType, + limiter: newSingleSpanRateLimiter(v.MaxPerSecond), + globRule: &jsonRules[i], + }) + } + if len(errs) != 0 { + return rules, fmt.Errorf("%s", strings.Join(errs, "\n\t")) + } + return rules, nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (sr SamplingRule) MarshalJSON() ([]byte, error) { + s := struct { + Service string `json:"service,omitempty"` + Name string `json:"name,omitempty"` + Resource string `json:"resource,omitempty"` + Rate float64 `json:"sample_rate"` + Tags map[string]string `json:"tags,omitempty"` + MaxPerSecond *float64 `json:"max_per_second,omitempty"` + Provenance string `json:"provenance,omitempty"` + }{} + if sr.globRule != nil { + s.Service = sr.globRule.Service + s.Name = sr.globRule.Name + s.Resource = sr.globRule.Resource + s.Tags = sr.globRule.Tags + } else { + if sr.Service != nil { + s.Service = sr.Service.String() + } + if sr.Name != nil { + s.Name = sr.Name.String() + } + if sr.Resource != nil { + s.Resource = sr.Resource.String() + } + s.Tags = make(map[string]string, len(sr.Tags)) + for k, v := range sr.Tags { + if v != nil { + s.Tags[k] = v.String() + } + } + } + if sr.MaxPerSecond != 0 { + s.MaxPerSecond = &sr.MaxPerSecond + } + s.Rate = sr.Rate + if sr.Provenance != Local { + s.Provenance = sr.Provenance.String() + } + return json.Marshal(&s) +} + +func (sr SamplingRule) String() string { + s, err := sr.MarshalJSON() + if err != nil { + log.Error("Error marshalling SamplingRule to json: %s", err.Error()) + } + return string(s) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/sampler.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/sampler.go new file mode 100644 index 00000000..6d773576 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/sampler.go @@ -0,0 +1,176 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "encoding/json" + "io" + "math" + "sync" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" +) + +// Sampler is an interface for sampling traces. +type Sampler interface { + // Sample returns true if the given span should be sampled. + Sample(span *Span) bool +} + +// RateSampler is a sampler implementation which randomly selects spans using a +// provided rate. For example, a rate of 0.75 will permit 75% of the spans. +// RateSampler implementations should be safe for concurrent use. +type RateSampler interface { + Sampler + + // Rate returns the current sample rate. + Rate() float64 + + // SetRate sets a new sample rate. + SetRate(rate float64) +} + +type customSampler struct { + s Sampler +} + +// Rate implements RateSampler. +func (*customSampler) Rate() float64 { + return 1.0 +} + +// SetRate implements RateSampler. +func (*customSampler) SetRate(_ float64) { + // noop +} + +func (s *customSampler) Sample(span *Span) bool { + return s.s.Sample(span) +} + +// rateSampler samples from a sample rate. +type rateSampler struct { + sync.RWMutex + rate float64 +} + +// NewAllSampler is a short-hand for NewRateSampler(1). It is all-permissive. +func NewAllSampler() RateSampler { return NewRateSampler(1) } + +// NewRateSampler returns an initialized RateSampler with a given sample rate. +func NewRateSampler(rate float64) RateSampler { + if rate > 1.0 { + rate = 1.0 + } + if rate < 0.0 { + rate = 0.0 + } + return &rateSampler{rate: rate} +} + +// Rate returns the current rate of the sampler. +func (r *rateSampler) Rate() float64 { + r.RLock() + defer r.RUnlock() + return r.rate +} + +// SetRate sets a new sampling rate. +func (r *rateSampler) SetRate(rate float64) { + r.Lock() + r.rate = rate + r.Unlock() +} + +// constants used for the Knuth hashing, same as agent. +const knuthFactor = uint64(1111111111111111111) + +// Sample returns true if the given span should be sampled. +func (r *rateSampler) Sample(s *Span) bool { + if r.rate == 1 { + // fast path + return true + } + if r.rate == 0 || s == nil { + return false + } + r.RLock() + defer r.RUnlock() + return sampledByRate(s.traceID, r.rate) +} + +// sampledByRate verifies if the number n should be sampled at the specified +// rate. +func sampledByRate(n uint64, rate float64) bool { + if rate == 1 { + return true + } + if rate == 0 { + return false + } + + return n*knuthFactor <= uint64(rate*math.MaxUint64) +} + +// prioritySampler holds a set of per-service sampling rates and applies +// them to spans. +type prioritySampler struct { + mu sync.RWMutex + rates map[string]float64 + defaultRate float64 +} + +func newPrioritySampler() *prioritySampler { + return &prioritySampler{ + rates: make(map[string]float64), + defaultRate: 1., + } +} + +// readRatesJSON will try to read the rates as JSON from the given io.ReadCloser. +func (ps *prioritySampler) readRatesJSON(rc io.ReadCloser) error { + var payload struct { + Rates map[string]float64 `json:"rate_by_service"` + } + if err := json.NewDecoder(rc).Decode(&payload); err != nil { + return err + } + rc.Close() + const defaultRateKey = "service:,env:" + ps.mu.Lock() + defer ps.mu.Unlock() + ps.rates = payload.Rates + if v, ok := ps.rates[defaultRateKey]; ok { + ps.defaultRate = v + delete(ps.rates, defaultRateKey) + } + return nil +} + +// getRate returns the sampling rate to be used for the given span. Callers must +// guard the span. +func (ps *prioritySampler) getRate(spn *Span) float64 { + key := "service:" + spn.service + ",env:" + spn.meta[ext.Environment] + ps.mu.RLock() + defer ps.mu.RUnlock() + if rate, ok := ps.rates[key]; ok { + return rate + } + return ps.defaultRate +} + +// apply applies sampling priority to the given span. Caller must ensure it is safe +// to modify the span. +func (ps *prioritySampler) apply(spn *Span) { + rate := ps.getRate(spn) + if sampledByRate(spn.traceID, rate) { + spn.setSamplingPriority(ext.PriorityAutoKeep, samplernames.AgentRate) + } else { + spn.setSamplingPriority(ext.PriorityAutoReject, samplernames.AgentRate) + } + spn.SetTag(keySamplingPriorityRate, rate) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/seelog_leak_workaround.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/seelog_leak_workaround.go new file mode 100644 index 00000000..73422712 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/seelog_leak_workaround.go @@ -0,0 +1,54 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package tracer + +import ( + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/cihub/seelog" +) + +// This workaround fixes goroutine leaks caused by seelog. +// See https://github.com/DataDog/dd-trace-go/issues/2987. +// +// TODO(felixge): Remove this once a proper fix lands in the agent or after we +// drop the agent dependency that causes this [1]. +// +// [1] github.com/DataDog/datadog-agent/pkg/util/log +func init() { + if env.Get("DD_TRACE_DEBUG_SEELOG_WORKAROUND") == "false" { + return + } + + // Close the seelog loggers to fix the goroutine leaks. + seelog.Default.Close() + seelog.Disabled.Close() + + // Setup a new seelog logger that doesn't leak goroutines. + constraints, err := seelog.NewMinMaxConstraints(seelog.TraceLvl, seelog.CriticalLvl) + if err != nil { + log.Error("failed to create seelog constraints: %v", err.Error()) + return + } + console, err := seelog.NewConsoleWriter() + if err != nil { + log.Error("failed to create seelog console writer: %v", err.Error()) + return + } + dispatcher, err := seelog.NewSplitDispatcher(seelog.DefaultFormatter, []any{console}) + if err != nil { + log.Error("failed to create seelog dispatcher: %v", err.Error()) + return + } + seelog.Default = seelog.NewSyncLogger( + seelog.NewLoggerConfig( + constraints, + []*seelog.LogLevelException{}, + dispatcher, + ), + ) + seelog.Current = seelog.Default +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/slog.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/slog.go new file mode 100644 index 00000000..9737c54e --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/slog.go @@ -0,0 +1,100 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package tracer + +import ( + "context" + "log/slog" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// groupOrAttrs holds either a group name or a list of slog.Attrs. +type groupOrAttrs struct { + group string // group name if non-empty + attrs []slog.Attr // attrs if non-empty +} + +// slogHandler implements the slog.Handler interface to dispatch messages to our +// internal logger. +type slogHandler struct { + goas []groupOrAttrs +} + +func (h slogHandler) Enabled(_ context.Context, lvl slog.Level) bool { + if lvl <= slog.LevelDebug { + return log.DebugEnabled() + } + // TODO(fg): Implement generic log level checking in the internal logger. + // But we're we're not concerned with slog perf, so this is okay for now. + return true +} + +func (h slogHandler) Handle(_ context.Context, r slog.Record) error { + goas := h.goas + + if r.NumAttrs() == 0 { + // If the record has no Attrs, remove groups at the end of the list; they are empty. + for len(goas) > 0 && goas[len(goas)-1].group != "" { + goas = goas[:len(goas)-1] + } + } + + parts := make([]string, 0, len(goas)+r.NumAttrs()) + formatGroup := "" + + for _, goa := range goas { + if goa.group != "" { + formatGroup += goa.group + "." + } else { + for _, a := range goa.attrs { + parts = append(parts, formatGroup+a.String()) + } + } + } + + r.Attrs(func(a slog.Attr) bool { + parts = append(parts, formatGroup+a.String()) + return true + }) + + extra := strings.Join(parts, " ") + switch r.Level { + case slog.LevelDebug: + log.Debug("%s %s", r.Message, extra) + case slog.LevelInfo: + log.Info("%s %s", r.Message, extra) + case slog.LevelWarn: + log.Warn("%s %s", r.Message, extra) + case slog.LevelError: + log.Error("%s %s", r.Message, extra) + } + return nil +} + +func (h slogHandler) withGroupOrAttrs(goa groupOrAttrs) slogHandler { + h.goas = append(h.goas, goa) + return h +} + +// WithGroup returns a new Handler whose group consist of +// both the receiver's groups and the arguments. +func (h slogHandler) WithGroup(name string) slog.Handler { + if name == "" { + return h + } + return h.withGroupOrAttrs(groupOrAttrs{group: name}) +} + +// WithAttrs returns a new Handler whose attributes consist of +// both the receiver's attributes and the arguments. +func (h slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if len(attrs) == 0 { + return h + } + return h.withGroupOrAttrs(groupOrAttrs{attrs: attrs}) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span.go new file mode 100644 index 00000000..9c052e8d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span.go @@ -0,0 +1,1021 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +//go:generate go run github.com/tinylib/msgp -unexported -marshal=false -o=span_msgp.go -tests=false + +package tracer + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "reflect" + "runtime" + "runtime/pprof" + rt "runtime/trace" + "strconv" + "strings" + "sync" + "time" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/instrumentation/errortrace" + sharedinternal "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + illmobs "github.com/DataDog/dd-trace-go/v2/internal/llmobs" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/orchestrion" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + "github.com/DataDog/dd-trace-go/v2/internal/traceprof" + + "github.com/tinylib/msgp/msgp" + + "golang.org/x/xerrors" + + "github.com/DataDog/datadog-agent/pkg/obfuscate" +) + +type ( + // spanList implements msgp.Encodable on top of a slice of spans. + spanList []*Span + + // spanLists implements msgp.Decodable on top of a slice of spanList. + // This type is only used in tests. + spanLists []spanList +) + +var ( + _ msgp.Encodable = (*spanList)(nil) + _ msgp.Decodable = (*spanLists)(nil) +) + +// errorConfig holds customization options for setting error tags. +type errorConfig struct { + noDebugStack bool + stackFrames uint + stackSkip uint +} + +// AsMap places tags and span properties into a map and returns it. +// +// Note that this is not performant, nor are spans guaranteed to have all of their +// properties set at any time during normal operation! This is used for testing only, +// and should not be used in non-test code, or you may run into performance or other +// issues. +func (s *Span) AsMap() map[string]interface{} { + m := make(map[string]interface{}) + if s == nil { + return m + } + m[ext.SpanName] = s.name + m[ext.ServiceName] = s.service + m[ext.ResourceName] = s.resource + m[ext.SpanType] = s.spanType + m[ext.MapSpanStart] = s.start + m[ext.MapSpanDuration] = s.duration + for k, v := range s.meta { + m[k] = v + } + for k, v := range s.metrics { + m[k] = v + } + for k, v := range s.metaStruct { + m[k] = v + } + m[ext.MapSpanID] = s.spanID + m[ext.MapSpanTraceID] = s.traceID + m[ext.MapSpanParentID] = s.parentID + m[ext.MapSpanError] = s.error + if events := s.spanEventsAsJSONString(); events != "" { + m[ext.MapSpanEvents] = events + } + return m +} + +func (s *Span) spanEventsAsJSONString() string { + if !s.supportsEvents { + return s.meta["events"] + } + if s.spanEvents == nil { + return "" + } + events, err := json.Marshal(s.spanEvents) + if err != nil { + log.Error("failed to marshal span events: %s", err.Error()) + return "" + } + return string(events) +} + +// Span represents a computation. Callers must call Finish when a Span is +// complete to ensure it's submitted. +type Span struct { + mu sync.RWMutex `msg:"-"` // all fields are protected by this RWMutex + + name string `msg:"name"` // operation name + service string `msg:"service"` // service name (i.e. "grpc.server", "http.request") + resource string `msg:"resource"` // resource name (i.e. "/user?id=123", "SELECT * FROM users") + spanType string `msg:"type"` // protocol associated with the span (i.e. "web", "db", "cache") + start int64 `msg:"start"` // span start time expressed in nanoseconds since epoch + duration int64 `msg:"duration"` // duration of the span expressed in nanoseconds + meta map[string]string `msg:"meta,omitempty"` // arbitrary map of metadata + metaStruct metaStructMap `msg:"meta_struct,omitempty"` // arbitrary map of metadata with structured values + metrics map[string]float64 `msg:"metrics,omitempty"` // arbitrary map of numeric metrics + spanID uint64 `msg:"span_id"` // identifier of this span + traceID uint64 `msg:"trace_id"` // lower 64-bits of the root span identifier + parentID uint64 `msg:"parent_id"` // identifier of the span's direct parent + error int32 `msg:"error"` // error status of the span; 0 means no errors + spanLinks []SpanLink `msg:"span_links,omitempty"` // links to other spans + spanEvents []spanEvent `msg:"span_events,omitempty"` // events produced related to this span + + goExecTraced bool `msg:"-"` + noDebugStack bool `msg:"-"` // disables debug stack traces + finished bool `msg:"-"` // true if the span has been submitted to a tracer. Can only be read/modified if the trace is locked. + context *SpanContext `msg:"-"` // span propagation context + integration string `msg:"-"` // where the span was started from, such as a specific contrib or "manual" + supportsEvents bool `msg:"-"` // whether the span supports native span events or not + + pprofCtxActive context.Context `msg:"-"` // contains pprof.WithLabel labels to tell the profiler more about this span + pprofCtxRestore context.Context `msg:"-"` // contains pprof.WithLabel labels of the parent span (if any) that need to be restored when this span finishes + + taskEnd func() // ends execution tracer (runtime/trace) task, if started +} + +// Context yields the SpanContext for this Span. Note that the return +// value of Context() is still valid after a call to Finish(). This is +// called the span context and it is different from Go's context. +func (s *Span) Context() *SpanContext { + if s == nil { + return nil + } + return s.context +} + +// SetBaggageItem sets a key/value pair as baggage on the span. Baggage items +// are propagated down to descendant spans and injected cross-process. Use with +// care as it adds extra load onto your tracing layer. +func (s *Span) SetBaggageItem(key, val string) { + if s == nil { + return + } + s.context.setBaggageItem(key, val) +} + +// BaggageItem gets the value for a baggage item given its key. Returns the +// empty string if the value isn't found in this Span. +func (s *Span) BaggageItem(key string) string { + if s == nil { + return "" + } + return s.context.baggageItem(key) +} + +// SetTag adds a set of key/value metadata to the span. +func (s *Span) SetTag(key string, value interface{}) { + if s == nil { + return + } + // To avoid dumping the memory address in case value is a pointer, we dereference it. + // Any pointer value that is a pointer to a pointer will be dumped as a string. + value = dereference(value) + s.mu.Lock() + defer s.mu.Unlock() + + // We don't lock spans when flushing, so we could have a data race when + // modifying a span as it's being flushed. This protects us against that + // race, since spans are marked `finished` before we flush them. + if s.finished { + return + } + switch key { + case ext.Error: + s.setTagError(value, errorConfig{ + noDebugStack: s.noDebugStack, + }) + return + case ext.Component: + integration, ok := value.(string) + if ok { + s.integration = integration + } + } + if v, ok := value.(bool); ok { + s.setTagBool(key, v) + return + } + if v, ok := value.(string); ok { + if key == ext.ResourceName && s.pprofCtxActive != nil && spanResourcePIISafe(s) { + // If the user overrides the resource name for the span, + // update the endpoint label for the runtime profilers. + // + // We don't change s.pprofCtxRestore since that should + // stay as the original parent span context regardless + // of what we change at a lower level. + s.pprofCtxActive = pprof.WithLabels(s.pprofCtxActive, pprof.Labels(traceprof.TraceEndpoint, v)) + pprof.SetGoroutineLabels(s.pprofCtxActive) + } + s.setMeta(key, v) + return + } + if v, ok := sharedinternal.ToFloat64(value); ok { + s.setMetric(key, v) + return + } + if v, ok := value.(fmt.Stringer); ok { + defer func() { + if e := recover(); e != nil { + if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() { + // If .String() panics due to a nil receiver, we want to catch this + // and replace the string value with "", just as Sprintf does. + // Other panics should not be handled. + s.setMeta(key, "") + return + } + panic(e) + } + }() + s.setMeta(key, v.String()) + return + } + + if v, ok := value.([]byte); ok { + s.setMeta(key, string(v)) + return + } + + if value != nil { + // Arrays will be translated to dot notation. e.g. + // {"myarr.0": "foo", "myarr.1": "bar"} + // which will be displayed as an array in the UI. + switch reflect.TypeOf(value).Kind() { + case reflect.Slice: + slice := reflect.ValueOf(value) + for i := 0; i < slice.Len(); i++ { + key := fmt.Sprintf("%s.%d", key, i) + v := slice.Index(i) + if num, ok := sharedinternal.ToFloat64(v.Interface()); ok { + s.setMetric(key, num) + } else { + s.setMeta(key, fmt.Sprintf("%v", v)) + } + } + return + } + + // Can be sent as messagepack in `meta_struct` instead of `meta` + // reserved for internal use only + if v, ok := value.(sharedinternal.MetaStructValue); ok { + s.setMetaStruct(key, v.Value) + return + } + + // Support for v1 shim meta struct values (only _dd.stack uses this) + if key == "_dd.stack" { + s.setMetaStruct(key, value) + return + } + + // Add this trace source tag to propagating tags and to span tags + // reserved for internal use only + if v, ok := value.(sharedinternal.TraceSourceTagValue); ok { + s.context.trace.setTraceSourcePropagatingTag(key, v.Value) + } + } + + // not numeric, not a string, not a fmt.Stringer, not a bool, and not an error + s.setMeta(key, fmt.Sprint(value)) +} + +// setSamplingPriority locks the span, then updates the sampling priority. +// It also updates the trace's sampling priority. +func (s *Span) setSamplingPriority(priority int, sampler samplernames.SamplerName) { + if s == nil { + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.setSamplingPriorityLocked(priority, sampler) +} + +// root returns the root span of the span's trace. The return value shouldn't be +// nil as long as the root span is valid and not finished. +func (s *Span) Root() *Span { + if s == nil || s.context == nil { + return nil + } + if s.context.trace == nil { + return nil + } + return s.context.trace.root +} + +// SetUser associates user information to the current trace which the +// provided span belongs to. The options can be used to tune which user +// bit of information gets monitored. In case of distributed traces, +// the user id can be propagated across traces using the WithPropagation() option. +// See https://docs.datadoghq.com/security_platform/application_security/setup_and_configure/?tab=set_user#add-user-information-to-traces +func (s *Span) SetUser(id string, opts ...UserMonitoringOption) { + if s == nil { + return + } + cfg := UserMonitoringConfig{ + Metadata: make(map[string]string), + } + for _, fn := range opts { + fn(&cfg) + } + root := s.Root() + trace := root.context.trace + root.mu.Lock() + defer root.mu.Unlock() + + // We don't lock spans when flushing, so we could have a data race when + // modifying a span as it's being flushed. This protects us against that + // race, since spans are marked `finished` before we flush them. + if root.finished { + return + } + if cfg.PropagateID { + // Delete usr.id from the tags since _dd.p.usr.id takes precedence + delete(root.meta, keyUserID) + idenc := base64.StdEncoding.EncodeToString([]byte(id)) + trace.setPropagatingTag(keyPropagatedUserID, idenc) + s.context.updated = true + } else { + if trace.hasPropagatingTag(keyPropagatedUserID) { + // Unset the propagated user ID so that a propagated user ID coming from upstream won't be propagated anymore. + trace.unsetPropagatingTag(keyPropagatedUserID) + s.context.updated = true + } + delete(root.meta, keyPropagatedUserID) + } + + usrData := map[string]string{ + keyUserID: id, + keyUserLogin: cfg.Login, + keyUserEmail: cfg.Email, + keyUserName: cfg.Name, + keyUserScope: cfg.Scope, + keyUserRole: cfg.Role, + keyUserSessionID: cfg.SessionID, + } + for k, v := range cfg.Metadata { + usrData[fmt.Sprintf("usr.%s", k)] = v + } + for k, v := range usrData { + if v != "" { + // setMeta is used since the span is already locked + root.setMeta(k, v) + } + } +} + +// StartChild starts a new child span with the given operation name and options. +func (s *Span) StartChild(operationName string, opts ...StartSpanOption) *Span { + if s == nil { + return nil + } + opts = append(opts, ChildOf(s.Context())) + return getGlobalTracer().StartSpan(operationName, opts...) +} + +// setSamplingPriorityLocked updates the sampling priority. +// It also updates the trace's sampling priority. +func (s *Span) setSamplingPriorityLocked(priority int, sampler samplernames.SamplerName) { + // We don't lock spans when flushing, so we could have a data race when + // modifying a span as it's being flushed. This protects us against that + // race, since spans are marked `finished` before we flush them. + if s.finished { + return + } + s.setMetric(keySamplingPriority, float64(priority)) + s.context.setSamplingPriority(priority, sampler) +} + +// setTagError sets the error tag. It accounts for various valid scenarios. +// This method is not safe for concurrent use. +func (s *Span) setTagError(value interface{}, cfg errorConfig) { + setError := func(yes bool) { + if yes { + if s.error == 0 { + // new error + s.context.errors.Add(1) + } + s.error = 1 + } else { + if s.error > 0 { + // flip from active to inactive + s.context.errors.Add(-1) + } + s.error = 0 + } + } + // We don't lock spans when flushing, so we could have a data race when + // modifying a span as it's being flushed. This protects us against that + // race, since spans are marked `finished` before we flush them. + if s.finished { + return + } + switch v := value.(type) { + case bool: + // bool value as per Opentracing spec. + setError(v) + case error: + // if anyone sets an error value as the tag, be nice here + // and provide all the benefits. + setError(true) + s.setMeta(ext.ErrorMsg, v.Error()) + s.setMeta(ext.ErrorType, reflect.TypeOf(v).String()) + switch err := v.(type) { + case xerrors.Formatter: + s.setMeta(ext.ErrorDetails, fmt.Sprintf("%+v", v)) + case fmt.Formatter: + // pkg/errors approach + s.setMeta(ext.ErrorDetails, fmt.Sprintf("%+v", v)) + case *errortrace.TracerError: + // instrumentation/errortrace approach + s.setMeta(ext.ErrorDetails, fmt.Sprintf("%+v", v)) + if !cfg.noDebugStack { + s.setMeta(ext.ErrorStack, err.Format()) + } + return + } + if !cfg.noDebugStack { + s.setMeta(ext.ErrorStack, takeStacktrace(cfg.stackFrames, cfg.stackSkip)) + } + case nil: + // no error + setError(false) + default: + // in all other cases, let's assume that setting this tag + // is the result of an error. + setError(true) + } +} + +// defaultStackLength specifies the default maximum size of a stack trace. +const defaultStackLength = 32 + +// takeStacktrace takes a stack trace of maximum n entries, skipping the first skip entries. +// If n is 0, up to 20 entries are retrieved. +func takeStacktrace(n, skip uint) string { + telemetry.Count(telemetry.NamespaceTracers, "errorstack.source", []string{"source:takeStacktrace"}).Submit(1) + now := time.Now() + defer func() { + dur := float64(time.Since(now)) + telemetry.Distribution(telemetry.NamespaceTracers, "errorstack.duration", []string{"source:takeStacktrace"}).Submit(dur) + }() + if n == 0 { + n = defaultStackLength + } + var builder strings.Builder + pcs := make([]uintptr, n) + + // +2 to exclude runtime.Callers and takeStacktrace + numFrames := runtime.Callers(2+int(skip), pcs) + if numFrames == 0 { + return "" + } + frames := runtime.CallersFrames(pcs[:numFrames]) + for i := 0; ; i++ { + frame, more := frames.Next() + if i != 0 { + builder.WriteByte('\n') + } + builder.WriteString(frame.Function) + builder.WriteByte('\n') + builder.WriteByte('\t') + builder.WriteString(frame.File) + builder.WriteByte(':') + builder.WriteString(strconv.Itoa(frame.Line)) + if !more { + break + } + } + return builder.String() +} + +// setMeta sets a string tag. This method is not safe for concurrent use. +func (s *Span) setMeta(key, v string) { + if s.meta == nil { + s.meta = make(map[string]string, 1) + } + delete(s.metrics, key) + switch key { + case ext.SpanName: + s.name = v + case ext.ServiceName: + s.service = v + case ext.ResourceName: + s.resource = v + case ext.SpanType: + s.spanType = v + default: + s.meta[key] = v + } +} + +func (s *Span) setMetaStruct(key string, v any) { + if s.metaStruct == nil { + s.metaStruct = make(metaStructMap, 1) + } + s.metaStruct[key] = v +} + +// setTagBool sets a boolean tag on the span. +func (s *Span) setTagBool(key string, v bool) { + switch key { + case ext.AnalyticsEvent: + if v { + s.setMetric(ext.EventSampleRate, 1.0) + } else { + s.setMetric(ext.EventSampleRate, 0.0) + } + case ext.ManualDrop: + if v { + s.setSamplingPriorityLocked(ext.PriorityUserReject, samplernames.Manual) + } + case ext.ManualKeep: + if v { + s.setSamplingPriorityLocked(ext.PriorityUserKeep, samplernames.Manual) + } + default: + if v { + s.setMeta(key, "true") + } else { + s.setMeta(key, "false") + } + } +} + +// setMetric sets a numeric tag, in our case called a metric. This method +// is not safe for concurrent use. +func (s *Span) setMetric(key string, v float64) { + if s.metrics == nil { + s.metrics = make(map[string]float64, 1) + } + delete(s.meta, key) + switch key { + case ext.ManualKeep: + if v == float64(samplernames.AppSec) { + s.setSamplingPriorityLocked(ext.PriorityUserKeep, samplernames.AppSec) + } + case "_sampling_priority_v1shim": + // We have this for backward compatibility with the v1 shim. + s.setSamplingPriorityLocked(int(v), samplernames.Manual) + default: + s.metrics[key] = v + } +} + +// AddLink appends the given link to the span's span links. +func (s *Span) AddLink(link SpanLink) { + if s == nil { + return + } + s.mu.Lock() + defer s.mu.Unlock() + + // We don't lock spans when flushing, so we could have a data race when + // modifying a span as it's being flushed. This protects us against that + // race, since spans are marked `finished` before we flush them. + if s.finished { + // already finished + return + } + s.spanLinks = append(s.spanLinks, link) +} + +// serializeSpanLinksInMeta saves span links as a JSON string under `Span[meta][_dd.span_links]`. +func (s *Span) serializeSpanLinksInMeta() { + if len(s.spanLinks) == 0 { + return + } + spanLinkBytes, err := json.Marshal(s.spanLinks) + if err != nil { + log.Debug("Unable to marshal span links. Not adding span links to span meta.") + return + } + if s.meta == nil { + s.meta = make(map[string]string) + } + s.meta["_dd.span_links"] = string(spanLinkBytes) +} + +// serializeSpanEvents sets the span events from the current span in the correct transport, depending on whether the +// agent supports the native method or not. +func (s *Span) serializeSpanEvents() { + if len(s.spanEvents) == 0 { + return + } + // if span events are natively supported by the agent, there's nothing to do + // as the events will be already included when the span is serialized. + if s.supportsEvents { + return + } + // otherwise, we need to serialize them as a string tag and remove them from the struct + // so they are not sent twice. + b, err := json.Marshal(s.spanEvents) + s.spanEvents = nil + if err != nil { + log.Debug("Unable to marshal span events; events dropped from span meta\n%s", err.Error()) + return + } + s.meta["events"] = string(b) +} + +// Finish closes this Span (but not its children) providing the duration +// of its part of the tracing session. +func (s *Span) Finish(opts ...FinishOption) { + if s == nil { + return + } + + t := now() + if len(opts) > 0 { + cfg := FinishConfig{ + NoDebugStack: s.noDebugStack, + } + for _, fn := range opts { + if fn == nil { + continue + } + fn(&cfg) + } + if !cfg.FinishTime.IsZero() { + t = cfg.FinishTime.UnixNano() + } + if cfg.Error != nil { + s.mu.Lock() + s.setTagError(cfg.Error, errorConfig{ + noDebugStack: cfg.NoDebugStack, + stackFrames: cfg.StackFrames, + stackSkip: cfg.SkipStackFrames, + }) + s.mu.Unlock() + } + } + + if s.goExecTraced && rt.IsEnabled() { + // Only tag spans as traced if they both started & ended with + // execution tracing enabled. This is technically not sufficient + // for spans which could straddle the boundary between two + // execution traces, but there's really nothing we can do in + // those cases since execution tracing tasks aren't recorded in + // traces if they started before the trace. + s.SetTag("go_execution_traced", "yes") + } else if s.goExecTraced { + // If the span started with tracing enabled, but tracing wasn't + // enabled when the span finished, we still have some data to + // show. If tracing wasn't enabled when the span started, we + // won't have data in the execution trace to identify it so + // there's nothign we can show. + s.SetTag("go_execution_traced", "partial") + } + + if s.Root() == s { + if tr, ok := getGlobalTracer().(*tracer); ok && tr.rulesSampling.traces.enabled() { + if !s.context.trace.isLocked() && s.context.trace.propagatingTag(keyDecisionMaker) != "-4" { + tr.rulesSampling.SampleTrace(s) + } + } + } + + s.finish(t) + orchestrion.GLSPopValue(sharedinternal.ActiveSpanKey) +} + +// SetOperationName sets or changes the operation name. +func (s *Span) SetOperationName(operationName string) { + if s == nil { + return + } + s.mu.Lock() + defer s.mu.Unlock() + + // We don't lock spans when flushing, so we could have a data race when + // modifying a span as it's being flushed. This protects us against that + // race, since spans are marked `finished` before we flush them. + if s.finished { + // already finished + return + } + s.name = operationName +} + +func (s *Span) finish(finishTime int64) { + s.mu.Lock() + defer s.mu.Unlock() + + // We don't lock spans when flushing, so we could have a data race when + // modifying a span as it's being flushed. This protects us against that + // race, since spans are marked `finished` before we flush them. + if s.finished { + // already finished + return + } + + s.serializeSpanLinksInMeta() + s.serializeSpanEvents() + + if s.duration == 0 { + s.duration = finishTime - s.start + } + if s.duration < 0 { + s.duration = 0 + } + if s.taskEnd != nil { + s.taskEnd() + } + + keep := true + tracer, hasTracer := getGlobalTracer().(*tracer) + if hasTracer { + if !tracer.config.enabled.current { + return + } + if tracer.config.canDropP0s() { + // the agent supports dropping p0's in the client + keep = shouldKeep(s) + } + if tracer.config.debugAbandonedSpans { + // the tracer supports debugging abandoned spans + tracer.submitAbandonedSpan(s, true) + } + tracer.spansFinished.Inc(s.integration) + } + if keep { + // a single kept span keeps the whole trace. + s.context.trace.keep() + } + if log.DebugEnabled() { + // avoid allocating the ...interface{} argument if debug logging is disabled + log.Debug("Finished Span: %v, Operation: %s, Resource: %s, Tags: %v, %v", //nolint:gocritic // Debug logging needs full span representation + s, s.name, s.resource, s.meta, s.metrics) + } + s.context.finish() + + // compute stats after finishing the span. This ensures any normalization or tag propagation has been applied + if hasTracer { + tracer.submit(s) + } + + if s.pprofCtxRestore != nil { + // Restore the labels of the parent span so any CPU samples after this + // point are attributed correctly. + pprof.SetGoroutineLabels(s.pprofCtxRestore) + } +} + +// textNonParsable specifies the text that will be assigned to resources for which the resource +// can not be parsed due to an obfuscation error. +const textNonParsable = "Non-parsable SQL query" + +// obfuscatedResource returns the obfuscated version of the given resource. It is +// obfuscated using the given obfuscator for the given span type typ. +func obfuscatedResource(o *obfuscate.Obfuscator, typ, resource string) string { + if o == nil { + return resource + } + switch typ { + case "sql", "cassandra": + oq, err := o.ObfuscateSQLString(resource) + if err != nil { + log.Error("Error obfuscating stats group resource %q: %v", resource, err.Error()) + return textNonParsable + } + return oq.Query + case "redis": + return o.QuantizeRedisString(resource) + default: + return resource + } +} + +// shouldKeep reports whether the trace should be kept. +// a single span being kept implies the whole trace being kept. +func shouldKeep(s *Span) bool { + if p, ok := s.context.SamplingPriority(); ok && p > 0 { + // positive sampling priorities stay + return true + } + if s.context.errors.Load() > 0 { + // traces with any span containing an error get kept + return true + } + if v, ok := s.metrics[ext.EventSampleRate]; ok { + return sampledByRate(s.traceID, v) + } + return false +} + +// shouldComputeStats mentions whether this span needs to have stats computed for. +// Warning: callers must guard! +func shouldComputeStats(s *Span) bool { + if v, ok := s.metrics[keyMeasured]; ok && v == 1 { + return true + } + if v, ok := s.metrics[keyTopLevel]; ok && v == 1 { + return true + } + return false +} + +// String returns a human readable representation of the span. Not for +// production, just debugging. +func (s *Span) String() string { + if s == nil { + return "" + } + s.mu.RLock() + defer s.mu.RUnlock() + lines := []string{ + fmt.Sprintf("Name: %s", s.name), + fmt.Sprintf("Service: %s", s.service), + fmt.Sprintf("Resource: %s", s.resource), + fmt.Sprintf("TraceID: %d", s.traceID), + fmt.Sprintf("TraceID128: %s", s.context.TraceID()), + fmt.Sprintf("SpanID: %d", s.spanID), + fmt.Sprintf("ParentID: %d", s.parentID), + fmt.Sprintf("Start: %s", time.Unix(0, s.start)), + fmt.Sprintf("Duration: %s", time.Duration(s.duration)), + fmt.Sprintf("Error: %d", s.error), + fmt.Sprintf("Type: %s", s.spanType), + "Tags:", + } + for key, val := range s.meta { + lines = append(lines, fmt.Sprintf("\t%s:%s", key, val)) + } + for key, val := range s.metrics { + lines = append(lines, fmt.Sprintf("\t%s:%f", key, val)) + } + return strings.Join(lines, "\n") +} + +// Format implements fmt.Formatter. +func (s *Span) Format(f fmt.State, c rune) { + if s == nil { + fmt.Fprintf(f, "") + } + switch c { + case 's': + fmt.Fprint(f, s.String()) + case 'v': + if svc := globalconfig.ServiceName(); svc != "" { + fmt.Fprintf(f, "dd.service=%s ", svc) + } + if tr := getGlobalTracer(); tr != nil { + tc := tr.TracerConf() + if tc.EnvTag != "" { + fmt.Fprintf(f, "dd.env=%s ", tc.EnvTag) + } else if env := env.Get("DD_ENV"); env != "" { + fmt.Fprintf(f, "dd.env=%s ", env) + } + if tc.VersionTag != "" { + fmt.Fprintf(f, "dd.version=%s ", tc.VersionTag) + } else if v := env.Get("DD_VERSION"); v != "" { + fmt.Fprintf(f, "dd.version=%s ", v) + } + } + var traceID string + if sharedinternal.BoolEnv("DD_TRACE_128_BIT_TRACEID_LOGGING_ENABLED", true) && s.context.traceID.HasUpper() { + traceID = s.context.TraceID() + } else { + traceID = fmt.Sprintf("%d", s.traceID) + } + fmt.Fprintf(f, `dd.trace_id=%q `, traceID) + fmt.Fprintf(f, `dd.span_id="%d" `, s.spanID) + fmt.Fprintf(f, `dd.parent_id="%d"`, s.parentID) + default: + fmt.Fprintf(f, "%%!%c(tracer.Span=%v)", c, s) + } +} + +// AddEvent attaches a new event to the current span. +func (s *Span) AddEvent(name string, opts ...SpanEventOption) { + if s == nil { + return + } + s.mu.Lock() + defer s.mu.Unlock() + + // We don't lock spans when flushing, so we could have a data race when + // modifying a span as it's being flushed. This protects us against that + // race, since spans are marked `finished` before we flush them. + if s.finished { + return + } + cfg := SpanEventConfig{} + for _, opt := range opts { + opt(&cfg) + } + if cfg.Time.IsZero() { + cfg.Time = time.Now() + } + event := spanEvent{ + Name: name, + TimeUnixNano: uint64(cfg.Time.UnixNano()), + } + if s.supportsEvents { + event.Attributes = toSpanEventAttributeMsg(cfg.Attributes) + } else { + event.RawAttributes = cfg.Attributes + } + s.spanEvents = append(s.spanEvents, event) +} + +func setLLMObsPropagatingTags(ctx context.Context, spanCtx *SpanContext) { + llmSpan, ok := illmobs.ActiveLLMSpanFromContext(ctx) + if !ok { + return + } + spanCtx.trace.setPropagatingTag(keyPropagatedLLMObsParentID, llmSpan.SpanID()) + spanCtx.trace.setPropagatingTag(keyPropagatedLLMObsTraceID, llmSpan.TraceID()) + spanCtx.trace.setPropagatingTag(keyPropagatedLLMObsMLAPP, llmSpan.MLApp()) +} + +// used in internal/civisibility/integrations/manual_api_common.go using linkname +func getMeta(s *Span, key string) (string, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + val, ok := s.meta[key] + return val, ok +} + +// used in internal/civisibility/integrations/manual_api_common.go using linkname +func getMetric(s *Span, key string) (float64, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + val, ok := s.metrics[key] + return val, ok +} + +const ( + keySamplingPriority = "_sampling_priority_v1" + keySamplingPriorityRate = "_dd.agent_psr" + keyDecisionMaker = "_dd.p.dm" + keyServiceHash = "_dd.dm.service_hash" + keyOrigin = "_dd.origin" + keyReparentID = "_dd.parent_id" + // keyHostname can be used to override the agent's hostname detection when using `WithHostname`. + // which is set via auto-detection. + keyHostname = "_dd.hostname" + keyRulesSamplerAppliedRate = "_dd.rule_psr" + keyRulesSamplerLimiterRate = "_dd.limit_psr" + keyMeasured = "_dd.measured" + // keyTopLevel is the key of top level metric indicating if a span is top level. + // A top level span is a local root (parent span of the local trace) or the first span of each service. + keyTopLevel = "_dd.top_level" + // keyPropagationError holds any error from propagated trace tags (if any) + keyPropagationError = "_dd.propagation_error" + // keySpanSamplingMechanism specifies the sampling mechanism by which an individual span was sampled + keySpanSamplingMechanism = "_dd.span_sampling.mechanism" + // keySingleSpanSamplingRuleRate specifies the configured sampling probability for the single span sampling rule. + keySingleSpanSamplingRuleRate = "_dd.span_sampling.rule_rate" + // keySingleSpanSamplingMPS specifies the configured limit for the single span sampling rule + // that the span matched. If there is no configured limit, then this tag is omitted. + keySingleSpanSamplingMPS = "_dd.span_sampling.max_per_second" + // keyPropagatedUserID holds the propagated user identifier, if user id propagation is enabled. + keyPropagatedUserID = "_dd.p.usr.id" + // keyPropagatedTraceSource holds a 2 character hexadecimal string representation of the product responsible + // for the span creation. + keyPropagatedTraceSource = "_dd.p.ts" + // keyTraceID128 is the lowercase, hex encoded upper 64 bits of a 128-bit trace id, if present. + keyTraceID128 = "_dd.p.tid" + // keySpanAttributeSchemaVersion holds the selected DD_TRACE_SPAN_ATTRIBUTE_SCHEMA version. + keySpanAttributeSchemaVersion = "_dd.trace_span_attribute_schema" + // keyPeerServiceSource indicates the precursor tag that was used as the value of peer.service. + keyPeerServiceSource = "_dd.peer.service.source" + // keyPeerServiceRemappedFrom indicates the previous value for peer.service, in case remapping happened. + keyPeerServiceRemappedFrom = "_dd.peer.service.remapped_from" + // keyBaseService contains the globally configured tracer service name. It is only set for spans that override it. + keyBaseService = "_dd.base_service" + // keyProcessTags contains a list of process tags to identify the service. + keyProcessTags = "_dd.tags.process" + // keyKnuthSamplingRate holds the propagated Knuth-based sampling rate applied by agent or trace sampling rules. + // Value is a string with up to 6 decimal digits and is forwarded unchanged. + keyKnuthSamplingRate = "_dd.p.ksr" + // keyPropagatedLLMObsParentID contains the propagated llmobs span ID. + keyPropagatedLLMObsParentID = "_dd.p.llmobs_parent_id" + // keyPropagatedLLMObsMLAPP contains the propagated ML App. + keyPropagatedLLMObsMLAPP = "_dd.p.llmobs_ml_app" + // keyPropagatedLLMObsTraceID contains the propagated llmobs trace ID. + keyPropagatedLLMObsTraceID = "_dd.p.llmobs_trace_id" +) + +// The following set of tags is used for user monitoring and set through calls to span.SetUser(). +const ( + keyUserID = "usr.id" + keyUserLogin = "usr.login" + keyUserEmail = "usr.email" + keyUserName = "usr.name" + keyUserOrg = "usr.org" + keyUserRole = "usr.role" + keyUserScope = "usr.scope" + keyUserSessionID = "usr.session_id" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_config.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_config.go new file mode 100644 index 00000000..bd87f6bb --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_config.go @@ -0,0 +1,143 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "context" + "time" +) + +// StartSpanOption is a configuration option that can be used with a Tracer's StartSpan method. +type StartSpanOption func(cfg *StartSpanConfig) + +// StartSpanConfig holds the configuration for starting a new span. It is usually passed +// around by reference to one or more StartSpanOption functions which shape it into its +// final form. +type StartSpanConfig struct { + // Parent holds the SpanContext that should be used as a parent for the + // new span. If nil, implementations should return a root span. + Parent *SpanContext + + // StartTime holds the time that should be used as the start time of the span. + // Implementations should use the current time when StartTime.IsZero(). + StartTime time.Time + + // Tags holds a set of key/value pairs that should be set as metadata on the + // new span. + Tags map[string]interface{} + + // SpanID will be the SpanID of the Span, overriding the random number that would + // be generated. If no Parent SpanContext is present, then this will also set the + // TraceID to the same value. + SpanID uint64 + + // Context is the parent context where the span should be stored. + Context context.Context + + // SpanLink represents a causal relationship between two spans. A span can have multiple links. + SpanLinks []SpanLink +} + +// NewStartSpanConfig allows to build a base config struct. It accepts the same options as StartSpan. +// It's useful to reduce the number of operations in any hot path and update it for request/operation specifics. +func NewStartSpanConfig(opts ...StartSpanOption) *StartSpanConfig { + cfg := new(StartSpanConfig) + for _, fn := range opts { + fn(cfg) + } + return cfg +} + +// FinishOption is a configuration option that can be used with a Span's Finish method. +type FinishOption func(cfg *FinishConfig) + +// FinishConfig holds the configuration for finishing a span. It is usually passed around by +// reference to one or more FinishOption functions which shape it into its final form. +type FinishConfig struct { + // FinishTime represents the time that should be set as finishing time for the + // span. Implementations should use the current time when FinishTime.IsZero(). + FinishTime time.Time + + // Error holds an optional error that should be set on the span before + // finishing. + Error error + + // NoDebugStack will prevent any set errors from generating an attached stack trace tag. + NoDebugStack bool + + // StackFrames specifies the number of stack frames to be attached in spans that finish with errors. + StackFrames uint + + // SkipStackFrames specifies the offset at which to start reporting stack frames from the stack. + SkipStackFrames uint +} + +// NewFinishConfig allows building a base finish config struct. It accepts the same options as Finish. +// It's useful to reduce the number of operations in any hot path and update it for request/operation specifics. +func NewFinishConfig(opts ...FinishOption) *FinishConfig { + cfg := new(FinishConfig) + for _, fn := range opts { + fn(cfg) + } + return cfg +} + +// FinishTime sets the given time as the finishing time for the span. By default, +// the current time is used. +func FinishTime(t time.Time) FinishOption { + return func(cfg *FinishConfig) { + cfg.FinishTime = t + } +} + +// WithError marks the span as having had an error. It uses the information from +// err to set tags such as the error message, error type and stack trace. It has +// no effect if the error is nil. +func WithError(err error) FinishOption { + return func(cfg *FinishConfig) { + cfg.Error = err + } +} + +// NoDebugStack prevents any error presented using the WithError finishing option +// from generating a stack trace. This is useful in situations where errors are frequent +// and performance is critical. +func NoDebugStack() FinishOption { + return func(cfg *FinishConfig) { + cfg.NoDebugStack = true + } +} + +// StackFrames limits the number of stack frames included into erroneous spans to n, starting from skip. +func StackFrames(n, skip uint) FinishOption { + if n == 0 { + return NoDebugStack() + } + return func(cfg *FinishConfig) { + cfg.StackFrames = n + cfg.SkipStackFrames = skip + } +} + +// WithFinishConfig merges the given FinishConfig into the one used to finish the span. +// It is useful when you want to set a common base finish config, reducing the number of function calls in hot loops. +func WithFinishConfig(cfg *FinishConfig) FinishOption { + return func(fc *FinishConfig) { + fc.Error = cfg.Error + if fc.FinishTime.IsZero() { + fc.FinishTime = cfg.FinishTime + } + if !fc.NoDebugStack { + fc.NoDebugStack = cfg.NoDebugStack + } + if fc.SkipStackFrames == 0 { + fc.SkipStackFrames = cfg.SkipStackFrames + } + if fc.StackFrames == 0 { + fc.StackFrames = cfg.StackFrames + } + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_event.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_event.go new file mode 100644 index 00000000..71ce7fd1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_event.go @@ -0,0 +1,243 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package tracer + +import ( + "golang.org/x/exp/constraints" + + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +//go:generate go run github.com/tinylib/msgp -unexported -marshal=false -o=span_event_msgp.go -tests=false + +// SpanEvent represent an event at an instant in time related to this span, but not necessarily during the span. +type spanEvent struct { + // Name is the name of event. + Name string `msg:"name" json:"name"` + + // TimeUnixNano is the number of nanoseconds between the Unix epoch and this event. + TimeUnixNano uint64 `msg:"time_unix_nano" json:"time_unix_nano"` + + // Attributes is a map of string to attribute. + Attributes map[string]*spanEventAttribute `msg:"attributes" json:"-"` + + // RawAttributes is used when native span event serialization is not supported by the agent. + RawAttributes map[string]any `msg:"-" json:"attributes,omitempty"` +} + +type spanEventAttribute struct { + Type spanEventAttributeType `msg:"type" json:"type"` + StringValue string `msg:"string_value,omitempty"` + BoolValue bool `msg:"bool_value,omitempty" ` + IntValue int64 `msg:"int_value,omitempty" ` + DoubleValue float64 `msg:"double_value,omitempty"` + ArrayValue *spanEventArrayAttribute `msg:"array_value,omitempty"` +} + +type spanEventAttributeType int32 + +const ( + spanEventAttributeTypeString spanEventAttributeType = 0 + spanEventAttributeTypeBool spanEventAttributeType = 1 + spanEventAttributeTypeInt spanEventAttributeType = 2 + spanEventAttributeTypeDouble spanEventAttributeType = 3 + spanEventAttributeTypeArray spanEventAttributeType = 4 +) + +type spanEventArrayAttribute struct { + Values []*spanEventArrayAttributeValue `msg:"values" json:"values"` +} + +type spanEventArrayAttributeValue struct { + Type spanEventArrayAttributeValueType `msg:"type"` + StringValue string `msg:"string_value,omitempty"` + BoolValue bool `msg:"bool_value,omitempty"` + IntValue int64 `msg:"int_value,omitempty"` + DoubleValue float64 `msg:"double_value,omitempty"` +} + +type spanEventArrayAttributeValueType int32 + +const ( + spanEventArrayAttributeValueTypeString spanEventArrayAttributeValueType = 0 + spanEventArrayAttributeValueTypeBool spanEventArrayAttributeValueType = 1 + spanEventArrayAttributeValueTypeInt spanEventArrayAttributeValueType = 2 + spanEventArrayAttributeValueTypeDouble spanEventArrayAttributeValueType = 3 +) + +func toSpanEventAttributeMsg(attrs map[string]any) map[string]*spanEventAttribute { + if attrs == nil { + return nil + } + res := make(map[string]*spanEventAttribute, len(attrs)) + for key, val := range attrs { + if msgVal := toSpanEventAttributeValueMsg(val); msgVal != nil { + res[key] = msgVal + } else { + log.Warn("dropped unsupported span event attribute %s (unsupported type: %T)", key, val) + } + } + return res +} + +func toSpanEventAttributeValueMsg(v any) *spanEventAttribute { + switch v := v.(type) { + // string + case string: + return &spanEventAttribute{ + Type: spanEventAttributeTypeString, + StringValue: v, + } + // bool + case bool: + return &spanEventAttribute{ + Type: spanEventAttributeTypeBool, + BoolValue: v, + } + // int types + case int: + return intValue(v) + case uint: + return intValue(v) + case int64: + return intValue(v) + case uint64: + return intValue(v) + case uint8: + return intValue(v) + case uint16: + return intValue(v) + case uint32: + return intValue(v) + case uintptr: + return intValue(v) + case int8: + return intValue(v) + case int16: + return intValue(v) + case int32: + return intValue(v) + // float types + case float64: + return floatValue(v) + case float32: + return floatValue(v) + // string slice + case []string: + return stringSliceValue(v) + // bool slice + case []bool: + return boolSliceValue(v) + // int slice + case []int: + return intSliceValue(v) + case []uint: + return intSliceValue(v) + case []int64: + return intSliceValue(v) + case []uint64: + return intSliceValue(v) + case []uint8: + return intSliceValue(v) + case []uint16: + return intSliceValue(v) + case []uint32: + return intSliceValue(v) + case []uintptr: + return intSliceValue(v) + case []int8: + return intSliceValue(v) + case []int16: + return intSliceValue(v) + case []int32: + return intSliceValue(v) + // float slice + case []float64: + return floatSliceValue(v) + case []float32: + return floatSliceValue(v) + default: + return nil + } +} + +func intValue[T constraints.Integer](v T) *spanEventAttribute { + return &spanEventAttribute{ + Type: spanEventAttributeTypeInt, + IntValue: int64(v), + } +} + +func floatValue[T constraints.Float](v T) *spanEventAttribute { + return &spanEventAttribute{ + Type: spanEventAttributeTypeDouble, + DoubleValue: float64(v), + } +} + +func stringSliceValue(values []string) *spanEventAttribute { + arrayVal := make([]*spanEventArrayAttributeValue, 0, len(values)) + for _, v := range values { + arrayVal = append(arrayVal, &spanEventArrayAttributeValue{ + Type: spanEventArrayAttributeValueTypeString, + StringValue: v, + }) + } + return &spanEventAttribute{ + Type: spanEventAttributeTypeArray, + ArrayValue: &spanEventArrayAttribute{ + Values: arrayVal, + }, + } +} + +func boolSliceValue(values []bool) *spanEventAttribute { + arrayVal := make([]*spanEventArrayAttributeValue, 0, len(values)) + for _, v := range values { + arrayVal = append(arrayVal, &spanEventArrayAttributeValue{ + Type: spanEventArrayAttributeValueTypeBool, + BoolValue: v, + }) + } + return &spanEventAttribute{ + Type: spanEventAttributeTypeArray, + ArrayValue: &spanEventArrayAttribute{ + Values: arrayVal, + }, + } +} + +func intSliceValue[T constraints.Integer](values []T) *spanEventAttribute { + arrayVal := make([]*spanEventArrayAttributeValue, 0, len(values)) + for _, v := range values { + arrayVal = append(arrayVal, &spanEventArrayAttributeValue{ + Type: spanEventArrayAttributeValueTypeInt, + IntValue: int64(v), + }) + } + return &spanEventAttribute{ + Type: spanEventAttributeTypeArray, + ArrayValue: &spanEventArrayAttribute{ + Values: arrayVal, + }, + } +} + +func floatSliceValue[T constraints.Float](values []T) *spanEventAttribute { + arrayVal := make([]*spanEventArrayAttributeValue, 0, len(values)) + for _, v := range values { + arrayVal = append(arrayVal, &spanEventArrayAttributeValue{ + Type: spanEventArrayAttributeValueTypeDouble, + DoubleValue: float64(v), + }) + } + return &spanEventAttribute{ + Type: spanEventAttributeTypeArray, + ArrayValue: &spanEventArrayAttribute{ + Values: arrayVal, + }, + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_event_config.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_event_config.go new file mode 100644 index 00000000..e30404d7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_event_config.go @@ -0,0 +1,36 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package tracer + +import "time" + +// SpanEventConfig represent the configuration of a span event. +type SpanEventConfig struct { + // Time is the time when the event happened. + Time time.Time + + // Attributes is a map of string to attribute. + // Only the following types are supported: + // string, integer (any), boolean, float (any), []string, []integer (any), []boolean, []float (any) + Attributes map[string]any +} + +// SpanEventOption can be used to customize an event created with NewSpanEvent. +type SpanEventOption func(cfg *SpanEventConfig) + +// WithSpanEventTimestamp sets the time when the span event occurred. +func WithSpanEventTimestamp(tStamp time.Time) SpanEventOption { + return func(cfg *SpanEventConfig) { + cfg.Time = tStamp + } +} + +// WithSpanEventAttributes sets the given attributes for the span event. +func WithSpanEventAttributes(attributes map[string]any) SpanEventOption { + return func(cfg *SpanEventConfig) { + cfg.Attributes = attributes + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_event_msgp.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_event_msgp.go new file mode 100644 index 00000000..4dd83f65 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_event_msgp.go @@ -0,0 +1,768 @@ +package tracer + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *spanEvent) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "name": + z.Name, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "time_unix_nano": + z.TimeUnixNano, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "TimeUnixNano") + return + } + case "attributes": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + if z.Attributes == nil { + z.Attributes = make(map[string]*spanEventAttribute, zb0002) + } else if len(z.Attributes) > 0 { + for key := range z.Attributes { + delete(z.Attributes, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 *spanEventAttribute + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + za0002 = nil + } else { + if za0002 == nil { + za0002 = new(spanEventAttribute) + } + err = za0002.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + } + z.Attributes[za0001] = za0002 + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *spanEvent) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "name" + err = en.Append(0x83, 0xa4, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Name) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + // write "time_unix_nano" + err = en.Append(0xae, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f) + if err != nil { + return + } + err = en.WriteUint64(z.TimeUnixNano) + if err != nil { + err = msgp.WrapError(err, "TimeUnixNano") + return + } + // write "attributes" + err = en.Append(0xaa, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Attributes))) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + for za0001, za0002 := range z.Attributes { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + if za0002 == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = za0002.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *spanEvent) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 15 + msgp.Uint64Size + 11 + msgp.MapHeaderSize + if z.Attributes != nil { + for za0001, za0002 := range z.Attributes { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + if za0002 == nil { + s += msgp.NilSize + } else { + s += za0002.Msgsize() + } + } + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *spanEventArrayAttribute) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "values": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Values") + return + } + if cap(z.Values) >= int(zb0002) { + z.Values = (z.Values)[:zb0002] + } else { + z.Values = make([]*spanEventArrayAttributeValue, zb0002) + } + for za0001 := range z.Values { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Values", za0001) + return + } + z.Values[za0001] = nil + } else { + if z.Values[za0001] == nil { + z.Values[za0001] = new(spanEventArrayAttributeValue) + } + err = z.Values[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Values", za0001) + return + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *spanEventArrayAttribute) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "values" + err = en.Append(0x81, 0xa6, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Values))) + if err != nil { + err = msgp.WrapError(err, "Values") + return + } + for za0001 := range z.Values { + if z.Values[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Values[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Values", za0001) + return + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *spanEventArrayAttribute) Msgsize() (s int) { + s = 1 + 7 + msgp.ArrayHeaderSize + for za0001 := range z.Values { + if z.Values[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Values[za0001].Msgsize() + } + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *spanEventArrayAttributeValue) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "type": + { + var zb0002 int32 + zb0002, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = spanEventArrayAttributeValueType(zb0002) + } + case "string_value": + z.StringValue, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "StringValue") + return + } + case "bool_value": + z.BoolValue, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "BoolValue") + return + } + case "int_value": + z.IntValue, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "double_value": + z.DoubleValue, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "DoubleValue") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *spanEventArrayAttributeValue) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 5 bits */ + _ = zb0001Mask + if z.StringValue == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.BoolValue == false { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.IntValue == 0 { + zb0001Len-- + zb0001Mask |= 0x8 + } + if z.DoubleValue == 0 { + zb0001Len-- + zb0001Mask |= 0x10 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "type" + err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteInt32(int32(z.Type)) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + if (zb0001Mask & 0x2) == 0 { // if not omitted + // write "string_value" + err = en.Append(0xac, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteString(z.StringValue) + if err != nil { + err = msgp.WrapError(err, "StringValue") + return + } + } + if (zb0001Mask & 0x4) == 0 { // if not omitted + // write "bool_value" + err = en.Append(0xaa, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteBool(z.BoolValue) + if err != nil { + err = msgp.WrapError(err, "BoolValue") + return + } + } + if (zb0001Mask & 0x8) == 0 { // if not omitted + // write "int_value" + err = en.Append(0xa9, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.IntValue) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + } + if (zb0001Mask & 0x10) == 0 { // if not omitted + // write "double_value" + err = en.Append(0xac, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteFloat64(z.DoubleValue) + if err != nil { + err = msgp.WrapError(err, "DoubleValue") + return + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *spanEventArrayAttributeValue) Msgsize() (s int) { + s = 1 + 5 + msgp.Int32Size + 13 + msgp.StringPrefixSize + len(z.StringValue) + 11 + msgp.BoolSize + 10 + msgp.Int64Size + 13 + msgp.Float64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *spanEventArrayAttributeValueType) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 int32 + zb0001, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = spanEventArrayAttributeValueType(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z spanEventArrayAttributeValueType) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteInt32(int32(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z spanEventArrayAttributeValueType) Msgsize() (s int) { + s = msgp.Int32Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *spanEventAttribute) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "type": + { + var zb0002 int32 + zb0002, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = spanEventAttributeType(zb0002) + } + case "string_value": + z.StringValue, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "StringValue") + return + } + case "bool_value": + z.BoolValue, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "BoolValue") + return + } + case "int_value": + z.IntValue, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "double_value": + z.DoubleValue, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "DoubleValue") + return + } + case "array_value": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + z.ArrayValue = nil + } else { + if z.ArrayValue == nil { + z.ArrayValue = new(spanEventArrayAttribute) + } + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + switch msgp.UnsafeString(field) { + case "values": + var zb0004 uint32 + zb0004, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values") + return + } + if cap(z.ArrayValue.Values) >= int(zb0004) { + z.ArrayValue.Values = (z.ArrayValue.Values)[:zb0004] + } else { + z.ArrayValue.Values = make([]*spanEventArrayAttributeValue, zb0004) + } + for za0001 := range z.ArrayValue.Values { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values", za0001) + return + } + z.ArrayValue.Values[za0001] = nil + } else { + if z.ArrayValue.Values[za0001] == nil { + z.ArrayValue.Values[za0001] = new(spanEventArrayAttributeValue) + } + err = z.ArrayValue.Values[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values", za0001) + return + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *spanEventAttribute) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(6) + var zb0001Mask uint8 /* 6 bits */ + _ = zb0001Mask + if z.StringValue == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.BoolValue == false { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.IntValue == 0 { + zb0001Len-- + zb0001Mask |= 0x8 + } + if z.DoubleValue == 0 { + zb0001Len-- + zb0001Mask |= 0x10 + } + if z.ArrayValue == nil { + zb0001Len-- + zb0001Mask |= 0x20 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "type" + err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteInt32(int32(z.Type)) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + if (zb0001Mask & 0x2) == 0 { // if not omitted + // write "string_value" + err = en.Append(0xac, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteString(z.StringValue) + if err != nil { + err = msgp.WrapError(err, "StringValue") + return + } + } + if (zb0001Mask & 0x4) == 0 { // if not omitted + // write "bool_value" + err = en.Append(0xaa, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteBool(z.BoolValue) + if err != nil { + err = msgp.WrapError(err, "BoolValue") + return + } + } + if (zb0001Mask & 0x8) == 0 { // if not omitted + // write "int_value" + err = en.Append(0xa9, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.IntValue) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + } + if (zb0001Mask & 0x10) == 0 { // if not omitted + // write "double_value" + err = en.Append(0xac, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteFloat64(z.DoubleValue) + if err != nil { + err = msgp.WrapError(err, "DoubleValue") + return + } + } + if (zb0001Mask & 0x20) == 0 { // if not omitted + // write "array_value" + err = en.Append(0xab, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + if z.ArrayValue == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + // map header, size 1 + // write "values" + err = en.Append(0x81, 0xa6, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.ArrayValue.Values))) + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values") + return + } + for za0001 := range z.ArrayValue.Values { + if z.ArrayValue.Values[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.ArrayValue.Values[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values", za0001) + return + } + } + } + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *spanEventAttribute) Msgsize() (s int) { + s = 1 + 5 + msgp.Int32Size + 13 + msgp.StringPrefixSize + len(z.StringValue) + 11 + msgp.BoolSize + 10 + msgp.Int64Size + 13 + msgp.Float64Size + 12 + if z.ArrayValue == nil { + s += msgp.NilSize + } else { + s += 1 + 7 + msgp.ArrayHeaderSize + for za0001 := range z.ArrayValue.Values { + if z.ArrayValue.Values[za0001] == nil { + s += msgp.NilSize + } else { + s += z.ArrayValue.Values[za0001].Msgsize() + } + } + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *spanEventAttributeType) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 int32 + zb0001, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = spanEventAttributeType(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z spanEventAttributeType) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteInt32(int32(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z spanEventAttributeType) Msgsize() (s int) { + s = msgp.Int32Size + return +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_link_msgp.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_link_msgp.go new file mode 100644 index 00000000..a56f603f --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_link_msgp.go @@ -0,0 +1,223 @@ +package tracer + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *SpanLink) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "trace_id": + z.TraceID, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "TraceID") + return + } + case "trace_id_high": + z.TraceIDHigh, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "TraceIDHigh") + return + } + case "span_id": + z.SpanID, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "SpanID") + return + } + case "attributes": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + if z.Attributes == nil { + z.Attributes = make(map[string]string, zb0002) + } else if len(z.Attributes) > 0 { + for key := range z.Attributes { + delete(z.Attributes, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + z.Attributes[za0001] = za0002 + } + case "tracestate": + z.Tracestate, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Tracestate") + return + } + case "flags": + z.Flags, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "Flags") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *SpanLink) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(6) + var zb0001Mask uint8 /* 6 bits */ + _ = zb0001Mask + if z.TraceIDHigh == 0 { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.Attributes == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + if z.Tracestate == "" { + zb0001Len-- + zb0001Mask |= 0x10 + } + if z.Flags == 0 { + zb0001Len-- + zb0001Mask |= 0x20 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "trace_id" + err = en.Append(0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.TraceID) + if err != nil { + err = msgp.WrapError(err, "TraceID") + return + } + if (zb0001Mask & 0x2) == 0 { // if not omitted + // write "trace_id_high" + err = en.Append(0xad, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x68, 0x69, 0x67, 0x68) + if err != nil { + return + } + err = en.WriteUint64(z.TraceIDHigh) + if err != nil { + err = msgp.WrapError(err, "TraceIDHigh") + return + } + } + // write "span_id" + err = en.Append(0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.SpanID) + if err != nil { + err = msgp.WrapError(err, "SpanID") + return + } + if (zb0001Mask & 0x8) == 0 { // if not omitted + // write "attributes" + err = en.Append(0xaa, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Attributes))) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + for za0001, za0002 := range z.Attributes { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + } + } + if (zb0001Mask & 0x10) == 0 { // if not omitted + // write "tracestate" + err = en.Append(0xaa, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Tracestate) + if err != nil { + err = msgp.WrapError(err, "Tracestate") + return + } + } + if (zb0001Mask & 0x20) == 0 { // if not omitted + // write "flags" + err = en.Append(0xa5, 0x66, 0x6c, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteUint32(z.Flags) + if err != nil { + err = msgp.WrapError(err, "Flags") + return + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SpanLink) Msgsize() (s int) { + s = 1 + 9 + msgp.Uint64Size + 14 + msgp.Uint64Size + 8 + msgp.Uint64Size + 11 + msgp.MapHeaderSize + if z.Attributes != nil { + for za0001, za0002 := range z.Attributes { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 11 + msgp.StringPrefixSize + len(z.Tracestate) + 6 + msgp.Uint32Size + return +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_msgp.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_msgp.go new file mode 100644 index 00000000..5e575db1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span_msgp.go @@ -0,0 +1,713 @@ +package tracer + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *Span) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "name": + z.name, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "name") + return + } + case "service": + z.service, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "service") + return + } + case "resource": + z.resource, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "resource") + return + } + case "type": + z.spanType, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "spanType") + return + } + case "start": + z.start, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "start") + return + } + case "duration": + z.duration, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "duration") + return + } + case "meta": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "meta") + return + } + if z.meta == nil { + z.meta = make(map[string]string, zb0002) + } else if len(z.meta) > 0 { + for key := range z.meta { + delete(z.meta, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "meta") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "meta", za0001) + return + } + z.meta[za0001] = za0002 + } + case "meta_struct": + err = z.metaStruct.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "metaStruct") + return + } + case "metrics": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "metrics") + return + } + if z.metrics == nil { + z.metrics = make(map[string]float64, zb0003) + } else if len(z.metrics) > 0 { + for key := range z.metrics { + delete(z.metrics, key) + } + } + for zb0003 > 0 { + zb0003-- + var za0003 string + var za0004 float64 + za0003, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "metrics") + return + } + za0004, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "metrics", za0003) + return + } + z.metrics[za0003] = za0004 + } + case "span_id": + z.spanID, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "spanID") + return + } + case "trace_id": + z.traceID, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "traceID") + return + } + case "parent_id": + z.parentID, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "parentID") + return + } + case "error": + z.error, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "error") + return + } + case "span_links": + var zb0004 uint32 + zb0004, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "spanLinks") + return + } + if cap(z.spanLinks) >= int(zb0004) { + z.spanLinks = (z.spanLinks)[:zb0004] + } else { + z.spanLinks = make([]SpanLink, zb0004) + } + for za0005 := range z.spanLinks { + err = z.spanLinks[za0005].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "spanLinks", za0005) + return + } + } + case "span_events": + var zb0005 uint32 + zb0005, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "spanEvents") + return + } + if cap(z.spanEvents) >= int(zb0005) { + z.spanEvents = (z.spanEvents)[:zb0005] + } else { + z.spanEvents = make([]spanEvent, zb0005) + } + for za0006 := range z.spanEvents { + err = z.spanEvents[za0006].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "spanEvents", za0006) + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Span) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(15) + var zb0001Mask uint16 /* 15 bits */ + _ = zb0001Mask + if z.meta == nil { + zb0001Len-- + zb0001Mask |= 0x40 + } + if z.metrics == nil { + zb0001Len-- + zb0001Mask |= 0x100 + } + if z.spanLinks == nil { + zb0001Len-- + zb0001Mask |= 0x2000 + } + if z.spanEvents == nil { + zb0001Len-- + zb0001Mask |= 0x4000 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "name" + err = en.Append(0xa4, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.name) + if err != nil { + err = msgp.WrapError(err, "name") + return + } + // write "service" + err = en.Append(0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.service) + if err != nil { + err = msgp.WrapError(err, "service") + return + } + // write "resource" + err = en.Append(0xa8, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.resource) + if err != nil { + err = msgp.WrapError(err, "resource") + return + } + // write "type" + err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteString(z.spanType) + if err != nil { + err = msgp.WrapError(err, "spanType") + return + } + // write "start" + err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) + if err != nil { + return + } + err = en.WriteInt64(z.start) + if err != nil { + err = msgp.WrapError(err, "start") + return + } + // write "duration" + err = en.Append(0xa8, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteInt64(z.duration) + if err != nil { + err = msgp.WrapError(err, "duration") + return + } + if (zb0001Mask & 0x40) == 0 { // if not omitted + // write "meta" + err = en.Append(0xa4, 0x6d, 0x65, 0x74, 0x61) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.meta))) + if err != nil { + err = msgp.WrapError(err, "meta") + return + } + for za0001, za0002 := range z.meta { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "meta") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "meta", za0001) + return + } + } + } + // write "meta_struct" + err = en.Append(0xab, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74) + if err != nil { + return + } + err = z.metaStruct.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "metaStruct") + return + } + if (zb0001Mask & 0x100) == 0 { // if not omitted + // write "metrics" + err = en.Append(0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.metrics))) + if err != nil { + err = msgp.WrapError(err, "metrics") + return + } + for za0003, za0004 := range z.metrics { + err = en.WriteString(za0003) + if err != nil { + err = msgp.WrapError(err, "metrics") + return + } + err = en.WriteFloat64(za0004) + if err != nil { + err = msgp.WrapError(err, "metrics", za0003) + return + } + } + } + // write "span_id" + err = en.Append(0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.spanID) + if err != nil { + err = msgp.WrapError(err, "spanID") + return + } + // write "trace_id" + err = en.Append(0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.traceID) + if err != nil { + err = msgp.WrapError(err, "traceID") + return + } + // write "parent_id" + err = en.Append(0xa9, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.parentID) + if err != nil { + err = msgp.WrapError(err, "parentID") + return + } + // write "error" + err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72) + if err != nil { + return + } + err = en.WriteInt32(z.error) + if err != nil { + err = msgp.WrapError(err, "error") + return + } + if (zb0001Mask & 0x2000) == 0 { // if not omitted + // write "span_links" + err = en.Append(0xaa, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.spanLinks))) + if err != nil { + err = msgp.WrapError(err, "spanLinks") + return + } + for za0005 := range z.spanLinks { + err = z.spanLinks[za0005].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "spanLinks", za0005) + return + } + } + } + if (zb0001Mask & 0x4000) == 0 { // if not omitted + // write "span_events" + err = en.Append(0xab, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.spanEvents))) + if err != nil { + err = msgp.WrapError(err, "spanEvents") + return + } + for za0006 := range z.spanEvents { + err = z.spanEvents[za0006].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "spanEvents", za0006) + return + } + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Span) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.name) + 8 + msgp.StringPrefixSize + len(z.service) + 9 + msgp.StringPrefixSize + len(z.resource) + 5 + msgp.StringPrefixSize + len(z.spanType) + 6 + msgp.Int64Size + 9 + msgp.Int64Size + 5 + msgp.MapHeaderSize + if z.meta != nil { + for za0001, za0002 := range z.meta { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 12 + z.metaStruct.Msgsize() + 8 + msgp.MapHeaderSize + if z.metrics != nil { + for za0003, za0004 := range z.metrics { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + msgp.Float64Size + } + } + s += 8 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.Uint64Size + 6 + msgp.Int32Size + 11 + msgp.ArrayHeaderSize + for za0005 := range z.spanLinks { + s += z.spanLinks[za0005].Msgsize() + } + s += 12 + msgp.ArrayHeaderSize + for za0006 := range z.spanEvents { + s += z.spanEvents[za0006].Msgsize() + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *errorConfig) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "noDebugStack": + z.noDebugStack, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "noDebugStack") + return + } + case "stackFrames": + z.stackFrames, err = dc.ReadUint() + if err != nil { + err = msgp.WrapError(err, "stackFrames") + return + } + case "stackSkip": + z.stackSkip, err = dc.ReadUint() + if err != nil { + err = msgp.WrapError(err, "stackSkip") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z errorConfig) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "noDebugStack" + err = en.Append(0x83, 0xac, 0x6e, 0x6f, 0x44, 0x65, 0x62, 0x75, 0x67, 0x53, 0x74, 0x61, 0x63, 0x6b) + if err != nil { + return + } + err = en.WriteBool(z.noDebugStack) + if err != nil { + err = msgp.WrapError(err, "noDebugStack") + return + } + // write "stackFrames" + err = en.Append(0xab, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteUint(z.stackFrames) + if err != nil { + err = msgp.WrapError(err, "stackFrames") + return + } + // write "stackSkip" + err = en.Append(0xa9, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x53, 0x6b, 0x69, 0x70) + if err != nil { + return + } + err = en.WriteUint(z.stackSkip) + if err != nil { + err = msgp.WrapError(err, "stackSkip") + return + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z errorConfig) Msgsize() (s int) { + s = 1 + 13 + msgp.BoolSize + 12 + msgp.UintSize + 10 + msgp.UintSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *spanList) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0002) { + (*z) = (*z)[:zb0002] + } else { + (*z) = make(spanList, zb0002) + } + for zb0001 := range *z { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + (*z)[zb0001] = nil + } else { + if (*z)[zb0001] == nil { + (*z)[zb0001] = new(Span) + } + err = (*z)[zb0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z spanList) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(uint32(len(z))) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0003 := range z { + if z[zb0003] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z[zb0003].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, zb0003) + return + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z spanList) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + for zb0003 := range z { + if z[zb0003] == nil { + s += msgp.NilSize + } else { + s += z[zb0003].Msgsize() + } + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *spanLists) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0003) { + (*z) = (*z)[:zb0003] + } else { + (*z) = make(spanLists, zb0003) + } + for zb0001 := range *z { + var zb0004 uint32 + zb0004, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + if cap((*z)[zb0001]) >= int(zb0004) { + (*z)[zb0001] = ((*z)[zb0001])[:zb0004] + } else { + (*z)[zb0001] = make(spanList, zb0004) + } + for zb0002 := range (*z)[zb0001] { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, zb0001, zb0002) + return + } + (*z)[zb0001][zb0002] = nil + } else { + if (*z)[zb0001][zb0002] == nil { + (*z)[zb0001][zb0002] = new(Span) + } + err = (*z)[zb0001][zb0002].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, zb0001, zb0002) + return + } + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z spanLists) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(uint32(len(z))) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0005 := range z { + err = en.WriteArrayHeader(uint32(len(z[zb0005]))) + if err != nil { + err = msgp.WrapError(err, zb0005) + return + } + for zb0006 := range z[zb0005] { + if z[zb0005][zb0006] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z[zb0005][zb0006].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, zb0005, zb0006) + return + } + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z spanLists) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + for zb0005 := range z { + s += msgp.ArrayHeaderSize + for zb0006 := range z[zb0005] { + if z[zb0005][zb0006] == nil { + s += msgp.NilSize + } else { + s += z[zb0005][zb0006].Msgsize() + } + } + } + return +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spancontext.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spancontext.go new file mode 100644 index 00000000..b9fe3d5b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spancontext.go @@ -0,0 +1,730 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/DataDog/dd-trace-go/v2/ddtrace" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/internal/tracerstats" + sharedinternal "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/processtags" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +const TraceIDZero string = "00000000000000000000000000000000" + +var _ ddtrace.SpanContext = (*SpanContext)(nil) + +type traceID [16]byte // traceID in big endian, i.e. + +var emptyTraceID traceID + +func (t *traceID) HexEncoded() string { + return hex.EncodeToString(t[:]) +} + +func (t *traceID) Lower() uint64 { + return binary.BigEndian.Uint64(t[8:]) +} + +func (t *traceID) Upper() uint64 { + return binary.BigEndian.Uint64(t[:8]) +} + +func (t *traceID) SetLower(i uint64) { + binary.BigEndian.PutUint64(t[8:], i) +} + +func (t *traceID) SetUpper(i uint64) { + binary.BigEndian.PutUint64(t[:8], i) +} + +func (t *traceID) SetUpperFromHex(s string) error { + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return fmt.Errorf("malformed %q: %s", s, err) + } + t.SetUpper(u) + return nil +} + +func (t *traceID) Empty() bool { + return *t == emptyTraceID +} + +func (t *traceID) HasUpper() bool { + for _, b := range t[:8] { + if b != 0 { + return true + } + } + return false +} + +func (t *traceID) UpperHex() string { + return hex.EncodeToString(t[:8]) +} + +// SpanContext represents a span state that can propagate to descendant spans +// and across process boundaries. It contains all the information needed to +// spawn a direct descendant of the span that it belongs to. It can be used +// to create distributed tracing by propagating it using the provided interfaces. +type SpanContext struct { + updated bool // updated is tracking changes for priority / origin / x-datadog-tags + + // the below group should propagate only locally + + trace *trace // reference to the trace that this span belongs too + span *Span // reference to the span that hosts this context + errors atomic.Int32 // number of spans with errors in this trace + + // The 16-character hex string of the last seen Datadog Span ID + // this value will be added as the _dd.parent_id tag to spans + // created from this spanContext. + // This value is extracted from the `p` sub-key within the tracestate. + // The backend will use the _dd.parent_id tag to reparent spans in + // distributed traces if they were missing their parent span. + // Missing parent span could occur when a W3C-compliant tracer + // propagated this context, but didn't send any spans to Datadog. + reparentID string + isRemote bool + + // the below group should propagate cross-process + + traceID traceID + spanID uint64 + + mu sync.RWMutex // guards below fields + baggage map[string]string + hasBaggage uint32 // atomic int for quick checking presence of baggage. 0 indicates no baggage, otherwise baggage exists. + origin string // e.g. "synthetics" + + spanLinks []SpanLink // links to related spans in separate|external|disconnected traces + baggageOnly bool // when true, indicates this context only propagates baggage items and should not be used for distributed tracing fields +} + +// Private interface for converting v1 span contexts to v2 ones. +type spanContextV1Adapter interface { + SamplingDecision() uint32 + Origin() string + Priority() *float64 + PropagatingTags() map[string]string + Tags() map[string]string +} + +// FromGenericCtx converts a ddtrace.SpanContext to a *SpanContext, which can be used +// to start child spans. +func FromGenericCtx(c ddtrace.SpanContext) *SpanContext { + var sc SpanContext + sc.traceID = c.TraceIDBytes() + sc.spanID = c.SpanID() + sc.baggage = make(map[string]string) + c.ForeachBaggageItem(func(k, v string) bool { + sc.hasBaggage = 1 + sc.baggage[k] = v + return true + }) + ctx, ok := c.(spanContextV1Adapter) + if !ok { + return &sc + } + sc.origin = ctx.Origin() + sc.trace = newTrace() + sc.trace.priority = ctx.Priority() + sc.trace.samplingDecision = samplingDecision(ctx.SamplingDecision()) + sc.trace.tags = ctx.Tags() + sc.trace.propagatingTags = ctx.PropagatingTags() + return &sc +} + +// newSpanContext creates a new SpanContext to serve as context for the given +// span. If the provided parent is not nil, the context will inherit the trace, +// baggage and other values from it. This method also pushes the span into the +// new context's trace and as a result, it should not be called multiple times +// for the same span. +func newSpanContext(span *Span, parent *SpanContext) *SpanContext { + context := &SpanContext{ + spanID: span.spanID, + span: span, + } + + context.traceID.SetLower(span.traceID) + if parent != nil { + if !parent.baggageOnly { + context.traceID.SetUpper(parent.traceID.Upper()) + context.trace = parent.trace + context.origin = parent.origin + context.errors.Store(parent.errors.Load()) + } + parent.ForeachBaggageItem(func(k, v string) bool { + context.setBaggageItem(k, v) + return true + }) + } else if sharedinternal.BoolEnv("DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED", true) { + // add 128 bit trace id, if enabled, formatted as big-endian: + // <32-bit unix seconds> <32 bits of zero> <64 random bits> + id128 := time.Duration(span.start) / time.Second + // casting from int64 -> uint32 should be safe since the start time won't be + // negative, and the seconds should fit within 32-bits for the foreseeable future. + // (We only want 32 bits of time, then the rest is zero) + tUp := uint64(uint32(id128)) << 32 // We need the time at the upper 32 bits of the uint + context.traceID.SetUpper(tUp) + } + if context.trace == nil { + context.trace = newTrace() + } + if context.trace.root == nil { + // first span in the trace can safely be assumed to be the root + context.trace.root = span + } + // put span in context's trace + context.trace.push(span) + // setting context.updated to false here is necessary to distinguish + // between initializing properties of the span (priority) + // and updating them after extracting context through propagators + context.updated = false + return context +} + +// SpanID implements ddtrace.SpanContext. +func (c *SpanContext) SpanID() uint64 { + if c == nil { + return 0 + } + return c.spanID +} + +// TraceID implements ddtrace.SpanContext. +func (c *SpanContext) TraceID() string { + if c == nil { + return TraceIDZero + } + return c.traceID.HexEncoded() +} + +// TraceIDBytes implements ddtrace.SpanContext. +func (c *SpanContext) TraceIDBytes() [16]byte { + if c == nil { + return emptyTraceID + } + return c.traceID +} + +// TraceIDLower implements ddtrace.SpanContext. +func (c *SpanContext) TraceIDLower() uint64 { + if c == nil { + return 0 + } + return c.traceID.Lower() +} + +// TraceIDUpper implements ddtrace.SpanContext. +func (c *SpanContext) TraceIDUpper() uint64 { + if c == nil { + return 0 + } + return c.traceID.Upper() +} + +// SpanLinks implements ddtrace.SpanContext +func (c *SpanContext) SpanLinks() []SpanLink { + cp := make([]SpanLink, len(c.spanLinks)) + copy(cp, c.spanLinks) + return cp +} + +// ForeachBaggageItem implements ddtrace.SpanContext. +func (c *SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { + if c == nil { + return + } + if atomic.LoadUint32(&c.hasBaggage) == 0 { + return + } + c.mu.RLock() + defer c.mu.RUnlock() + for k, v := range c.baggage { + if !handler(k, v) { + break + } + } +} + +// sets the sampling priority and decision maker (based on `sampler`). +func (c *SpanContext) setSamplingPriority(p int, sampler samplernames.SamplerName) { + if c.trace == nil { + c.trace = newTrace() + } + if c.trace.setSamplingPriority(p, sampler) { + // the trace's sampling priority or sampler was updated: mark this as updated + c.updated = true + } +} + +func (c *SpanContext) SamplingPriority() (p int, ok bool) { + if c == nil || c.trace == nil { + return 0, false + } + return c.trace.samplingPriority() +} + +func (c *SpanContext) setBaggageItem(key, val string) { + c.mu.Lock() + defer c.mu.Unlock() + if c.baggage == nil { + atomic.StoreUint32(&c.hasBaggage, 1) + c.baggage = make(map[string]string, 1) + } + c.baggage[key] = val +} + +func (c *SpanContext) baggageItem(key string) string { + if atomic.LoadUint32(&c.hasBaggage) == 0 { + return "" + } + c.mu.RLock() + defer c.mu.RUnlock() + return c.baggage[key] +} + +// finish marks this span as finished in the trace. +func (c *SpanContext) finish() { c.trace.finishedOne(c.span) } + +// safeDebugString returns a safe string representation of the SpanContext for debug logging. +// It excludes potentially sensitive data like baggage contents while preserving useful debugging information. +func (c *SpanContext) safeDebugString() string { + if c == nil { + return "" + } + + hasBaggage := atomic.LoadUint32(&c.hasBaggage) != 0 + var baggageCount int + if hasBaggage { + c.mu.RLock() + baggageCount = len(c.baggage) + c.mu.RUnlock() + } + + return fmt.Sprintf("SpanContext{traceID=%s, spanID=%d, hasBaggage=%t, baggageCount=%d, origin=%q, updated=%t, isRemote=%t, baggageOnly=%t}", + c.TraceID(), c.SpanID(), hasBaggage, baggageCount, c.origin, c.updated, c.isRemote, c.baggageOnly) +} + +// samplingDecision is the decision to send a trace to the agent or not. +type samplingDecision uint32 + +const ( + // decisionNone is the default state of a trace. + // If no decision is made about the trace, the trace won't be sent to the agent. + decisionNone samplingDecision = iota + // decisionDrop prevents the trace from being sent to the agent. + decisionDrop + // decisionKeep ensures the trace will be sent to the agent. + decisionKeep +) + +// trace contains shared context information about a trace, such as sampling +// priority, the root reference and a buffer of the spans which are part of the +// trace, if these exist. +type trace struct { + mu sync.RWMutex // guards below fields + spans []*Span // all the spans that are part of this trace + tags map[string]string // trace level tags + propagatingTags map[string]string // trace level tags that will be propagated across service boundaries + finished int // the number of finished spans + full bool // signifies that the span buffer is full + priority *float64 // sampling priority + locked bool // specifies if the sampling priority can be altered + samplingDecision samplingDecision // samplingDecision indicates whether to send the trace to the agent. + + // root specifies the root of the trace, if known; it is nil when a span + // context is extracted from a carrier, at which point there are no spans in + // the trace yet. + root *Span +} + +var ( + // traceStartSize is the initial size of our trace buffer, + // by default we allocate for a handful of spans within the trace, + // reasonable as span is actually way bigger, and avoids re-allocating + // over and over. Could be fine-tuned at runtime. + traceStartSize = 10 + // traceMaxSize is the maximum number of spans we keep in memory for a + // single trace. This is to avoid memory leaks. If more spans than this + // are added to a trace, then the trace is dropped and the spans are + // discarded. Adding additional spans after a trace is dropped does + // nothing. + traceMaxSize = int(1e5) +) + +// newTrace creates a new trace using the given callback which will be called +// upon completion of the trace. +func newTrace() *trace { + return &trace{spans: make([]*Span, 0, traceStartSize)} +} + +func (t *trace) samplingPriorityLocked() (p int, ok bool) { + if t.priority == nil { + return 0, false + } + return int(*t.priority), true +} + +func (t *trace) samplingPriority() (p int, ok bool) { + t.mu.RLock() + defer t.mu.RUnlock() + return t.samplingPriorityLocked() +} + +// setSamplingPriority sets the sampling priority and the decision maker +// and returns true if it was modified. +func (t *trace) setSamplingPriority(p int, sampler samplernames.SamplerName) bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.setSamplingPriorityLocked(p, sampler) +} + +func (t *trace) keep() { + atomic.CompareAndSwapUint32((*uint32)(&t.samplingDecision), uint32(decisionNone), uint32(decisionKeep)) +} + +func (t *trace) drop() { + atomic.CompareAndSwapUint32((*uint32)(&t.samplingDecision), uint32(decisionNone), uint32(decisionDrop)) +} + +func (t *trace) setTag(key, value string) { + t.mu.Lock() + defer t.mu.Unlock() + t.setTagLocked(key, value) +} + +func (t *trace) setTagLocked(key, value string) { + if t.tags == nil { + t.tags = make(map[string]string, 1) + } + t.tags[key] = value +} + +func samplerToDM(sampler samplernames.SamplerName) string { + return "-" + strconv.Itoa(int(sampler)) +} + +func (t *trace) setSamplingPriorityLocked(p int, sampler samplernames.SamplerName) bool { + if t.locked { + return false + } + + updatedPriority := t.priority == nil || *t.priority != float64(p) + + if t.priority == nil { + t.priority = new(float64) + } + *t.priority = float64(p) + curDM, existed := t.propagatingTags[keyDecisionMaker] + if p > 0 && sampler != samplernames.Unknown { + // We have a positive priority and the sampling mechanism isn't set. + // Send nothing when sampler is `Unknown` for RFC compliance. + // If a global sampling rate is set, it was always applied first. And this call can be + // triggered again by applying a rule sampler. The sampling priority will be the same, but + // the decision maker will be different. So we compare the decision makers as well. + // Note that once global rate sampling is deprecated, we no longer need to compare + // the DMs. Sampling priority is sufficient to distinguish a change in DM. + dm := samplerToDM(sampler) + updatedDM := !existed || dm != curDM + if updatedDM { + t.setPropagatingTagLocked(keyDecisionMaker, dm) + return true + } + } + if p <= 0 && existed { + delete(t.propagatingTags, keyDecisionMaker) + } + + return updatedPriority +} + +func (t *trace) isLocked() bool { + t.mu.RLock() + defer t.mu.RUnlock() + return t.locked +} + +func (t *trace) setLocked(locked bool) { + t.mu.Lock() + defer t.mu.Unlock() + t.locked = locked +} + +// push pushes a new span into the trace. If the buffer is full, it returns +// a errBufferFull error. +func (t *trace) push(sp *Span) { + t.mu.Lock() + defer t.mu.Unlock() + if t.full { + return + } + tr := getGlobalTracer() + if len(t.spans) >= traceMaxSize { + // capacity is reached, we will not be able to complete this trace. + t.full = true + t.spans = nil // allow our spans to be collected by GC. + log.Error("trace buffer full (%d spans), dropping trace", traceMaxSize) + if tr != nil { + tracerstats.Signal(tracerstats.TracesDropped, 1) + } + return + } + if v, ok := sp.metrics[keySamplingPriority]; ok { + t.setSamplingPriorityLocked(int(v), samplernames.Unknown) + } + t.spans = append(t.spans, sp) + if tr != nil { + tracerstats.Signal(tracerstats.SpanStarted, 1) + } +} + +// setTraceTags sets all "trace level" tags on the provided span +// t must already be locked. +func (t *trace) setTraceTags(s *Span) { + for k, v := range t.tags { + s.setMeta(k, v) + } + for k, v := range t.propagatingTags { + s.setMeta(k, v) + } + for k, v := range sharedinternal.GetTracerGitMetadataTags() { + s.setMeta(k, v) + } + if s.context != nil && s.context.traceID.HasUpper() { + s.setMeta(keyTraceID128, s.context.traceID.UpperHex()) + } + if pTags := processtags.GlobalTags().String(); pTags != "" { + s.setMeta(keyProcessTags, pTags) + } +} + +// finishedOne acknowledges that another span in the trace has finished, and checks +// if the trace is complete, in which case it calls the onFinish function. It uses +// the given priority, if non-nil, to mark the root span. This also will trigger a partial flush +// if enabled and the total number of finished spans is greater than or equal to the partial flush limit. +// The provided span must be locked. +func (t *trace) finishedOne(s *Span) { + t.mu.Lock() + defer t.mu.Unlock() + s.finished = true + if t.full { + // capacity has been reached, the buffer is no longer tracking + // all the spans in the trace, so the below conditions will not + // be accurate and would trigger a pre-mature flush, exposing us + // to a race condition where spans can be modified while flushing. + // + // TODO(partialFlush): should we do a partial flush in this scenario? + return + } + t.finished++ + tr := getGlobalTracer() + if tr == nil { + return + } + tc := tr.TracerConf() + setPeerService(s, tc.PeerServiceDefaults, tc.PeerServiceMappings) + + // attach the _dd.base_service tag only when the globally configured service name is different from the + // span service name. + if s.service != "" && !strings.EqualFold(s.service, tc.ServiceTag) { + s.meta[keyBaseService] = tc.ServiceTag + } + if s == t.root && t.priority != nil { + // after the root has finished we lock down the priority; + // we won't be able to make changes to a span after finishing + // without causing a race condition. + t.root.setMetric(keySamplingPriority, *t.priority) + t.locked = true + } + if len(t.spans) > 0 && s == t.spans[0] { + // first span in chunk finished, lock down the tags + // + // TODO(barbayar): make sure this doesn't happen in vain when switching to + // the new wire format. We won't need to set the tags on the first span + // in the chunk there. + t.setTraceTags(s) + } + + // This is here to support the mocktracer. It would be nice to be able to not do this. + // We need to track when any single span is finished. + if mtr, ok := tr.(interface{ FinishSpan(*Span) }); ok { + mtr.FinishSpan(s) + } + + if len(t.spans) == t.finished { // perform a full flush of all spans + if tr, ok := tr.(*tracer); ok { + t.finishChunk(tr, &chunk{ + spans: t.spans, + willSend: decisionKeep == samplingDecision(atomic.LoadUint32((*uint32)(&t.samplingDecision))), + }) + } + t.spans = nil + return + } + + doPartialFlush := tc.PartialFlush && t.finished >= tc.PartialFlushMinSpans + if !doPartialFlush { + return // The trace hasn't completed and partial flushing will not occur + } + log.Debug("Partial flush triggered with %d finished spans", t.finished) + telemetry.Count(telemetry.NamespaceTracers, "trace_partial_flush.count", []string{"reason:large_trace"}).Submit(1) + finishedSpans := make([]*Span, 0, t.finished) + leftoverSpans := make([]*Span, 0, len(t.spans)-t.finished) + for _, s2 := range t.spans { + if s2.finished { + finishedSpans = append(finishedSpans, s2) + } else { + leftoverSpans = append(leftoverSpans, s2) + } + } + telemetry.Distribution(telemetry.NamespaceTracers, "trace_partial_flush.spans_closed", nil).Submit(float64(len(finishedSpans))) + telemetry.Distribution(telemetry.NamespaceTracers, "trace_partial_flush.spans_remaining", nil).Submit(float64(len(leftoverSpans))) + finishedSpans[0].setMetric(keySamplingPriority, *t.priority) + if s != t.spans[0] { + // Make sure the first span in the chunk has the trace-level tags + t.setTraceTags(finishedSpans[0]) + } + if tr, ok := tr.(*tracer); ok { + t.finishChunk(tr, &chunk{ + spans: finishedSpans, + willSend: decisionKeep == samplingDecision(atomic.LoadUint32((*uint32)(&t.samplingDecision))), + }) + } + t.spans = leftoverSpans +} + +func (t *trace) finishChunk(tr *tracer, ch *chunk) { + tr.submitChunk(ch) + t.finished = 0 // important, because a buffer can be used for several flushes +} + +// setPeerService sets the peer.service, _dd.peer.service.source, and _dd.peer.service.remapped_from +// tags as applicable for the given span. +func setPeerService(s *Span, peerServiceDefaults bool, peerServiceMappings map[string]string) { + if _, ok := s.meta[ext.PeerService]; ok { // peer.service already set on the span + s.setMeta(keyPeerServiceSource, ext.PeerService) + } else { // no peer.service currently set + spanKind := s.meta[ext.SpanKind] + isOutboundRequest := spanKind == ext.SpanKindClient || spanKind == ext.SpanKindProducer + shouldSetDefaultPeerService := isOutboundRequest && peerServiceDefaults + if !shouldSetDefaultPeerService { + return + } + source := setPeerServiceFromSource(s) + if source == "" { + log.Debug("No source tag value could be found for span %q, peer.service not set", s.name) + return + } + s.setMeta(keyPeerServiceSource, source) + } + // Overwrite existing peer.service value if remapped by the user + ps := s.meta[ext.PeerService] + if to, ok := peerServiceMappings[ps]; ok { + s.setMeta(keyPeerServiceRemappedFrom, ps) + s.setMeta(ext.PeerService, to) + } +} + +// setPeerServiceFromSource sets peer.service from the sources determined +// by the tags on the span. It returns the source tag name that it used for +// the peer.service value, or the empty string if no valid source tag was available. +func setPeerServiceFromSource(s *Span) string { + has := func(tag string) bool { + _, ok := s.meta[tag] + return ok + } + var sources []string + useTargetHost := true + switch { + // order of the cases and their sources matters here. These are in priority order (highest to lowest) + case has("aws_service"): + sources = []string{ + "queuename", + "topicname", + "streamname", + "tablename", + "bucketname", + } + case s.meta[ext.DBSystem] == ext.DBSystemCassandra: + sources = []string{ + ext.CassandraContactPoints, + } + useTargetHost = false + case has(ext.DBSystem): + sources = []string{ + ext.DBName, + ext.DBInstance, + } + case has(ext.MessagingSystem): + sources = []string{ + ext.KafkaBootstrapServers, + } + case has(ext.RPCSystem): + sources = []string{ + ext.RPCService, + } + } + // network destination tags will be used as fallback unless there are higher priority sources already set. + if useTargetHost { + sources = append(sources, []string{ + ext.NetworkDestinationName, + ext.PeerHostname, + ext.TargetHost, + }...) + } + for _, source := range sources { + if val, ok := s.meta[source]; ok { + s.setMeta(ext.PeerService, val) + return source + } + } + return "" +} + +const hexEncodingDigits = "0123456789abcdef" + +// spanIDHexEncoded returns the hex encoded string of the given span ID `u` +// with the given padding. +// +// Code is borrowed from `fmt.fmtInteger` in the standard library. +func spanIDHexEncoded(u uint64, padding int) string { + // The allocated intbuf with a capacity of 68 bytes + // is large enough for integer formatting. + var intbuf [68]byte + buf := intbuf[0:] + if padding > 68 { + buf = make([]byte, padding) + } + // Because printing is easier right-to-left: format u into buf, ending at buf[i]. + i := len(buf) + for u >= 16 { + i-- + buf[i] = hexEncodingDigits[u&0xF] + u >>= 4 + } + i-- + buf[i] = hexEncodingDigits[u] + for i > 0 && padding > len(buf)-i { + i-- + buf[i] = '0' + } + return string(buf[i:]) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spanlink.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spanlink.go new file mode 100644 index 00000000..764a8d7d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spanlink.go @@ -0,0 +1,25 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +// SpanLink represents a reference to a span that exists outside of the trace. +// +//go:generate go run github.com/tinylib/msgp -unexported -marshal=false -o=span_link_msgp.go -tests=false + +type SpanLink struct { + // TraceID represents the low 64 bits of the linked span's trace id. This field is required. + TraceID uint64 `msg:"trace_id" json:"trace_id"` + // TraceIDHigh represents the high 64 bits of the linked span's trace id. This field is only set if the linked span's trace id is 128 bits. + TraceIDHigh uint64 `msg:"trace_id_high,omitempty" json:"trace_id_high"` + // SpanID represents the linked span's span id. + SpanID uint64 `msg:"span_id" json:"span_id"` + // Attributes is a mapping of keys to string values. These values are used to add additional context to the span link. + Attributes map[string]string `msg:"attributes,omitempty" json:"attributes"` + // Tracestate is the tracestate of the linked span. This field is optional. + Tracestate string `msg:"tracestate,omitempty" json:"tracestate"` + // Flags represents the W3C trace flags of the linked span. This field is optional. + Flags uint32 `msg:"flags,omitempty" json:"flags"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/sqlcomment.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/sqlcomment.go new file mode 100644 index 00000000..3a949035 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/sqlcomment.go @@ -0,0 +1,299 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "strconv" + "strings" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" +) + +// DBMPropagationMode represents the mode of dbm propagation. +// +// Note that enabling sql comment propagation results in potentially confidential data (service names) +// being stored in the databases which can then be accessed by other 3rd parties that have been granted +// access to the database. +type DBMPropagationMode string + +const ( + // DBMPropagationModeUndefined represents the dbm propagation mode not being set. This is the same as DBMPropagationModeDisabled. + DBMPropagationModeUndefined DBMPropagationMode = "" + // DBMPropagationModeDisabled represents the dbm propagation mode where all propagation is disabled. + DBMPropagationModeDisabled DBMPropagationMode = "disabled" + // DBMPropagationModeService represents the dbm propagation mode where only service tags (name, env, version) are propagated to dbm. + DBMPropagationModeService DBMPropagationMode = "service" + // DBMPropagationModeFull represents the dbm propagation mode where both service tags and tracing tags are propagated. Tracing tags include span id, trace id and the sampled flag. + DBMPropagationModeFull DBMPropagationMode = "full" +) + +// Key names for SQL comment tags. +const ( + sqlCommentTraceParent = "traceparent" + sqlCommentParentService = "ddps" + sqlCommentDBService = "dddbs" + sqlCommentParentVersion = "ddpv" + sqlCommentEnv = "dde" + // These keys are for the database we are connecting to, instead of the service we are running in. + // "Peer" is the OpenTelemetry nomenclature for "thing I am talking to" + sqlCommentPeerHostname = "ddh" + sqlCommentPeerDBName = "dddb" + // This is for when peer.service is explicitly set as a tag + sqlCommentPeerService = "ddprs" +) + +// Current trace context version (see https://www.w3.org/TR/trace-context/#version) +const w3cContextVersion = "00" + +// SQLCommentCarrier is a carrier implementation that injects a span context in a SQL query in the form +// of a sqlcommenter formatted comment prepended to the original query text. +// See https://google.github.io/sqlcommenter/spec/ for more details. +type SQLCommentCarrier struct { + Query string + Mode DBMPropagationMode + DBServiceName string + SpanID uint64 + PeerDBHostname string + PeerDBName string + PeerService string +} + +// Inject injects a span context in the carrier's Query field as a comment. +func (c *SQLCommentCarrier) Inject(ctx *SpanContext) error { + c.SpanID = generateSpanID(now()) + tags := make(map[string]string) + switch c.Mode { + case DBMPropagationModeUndefined: + fallthrough + case DBMPropagationModeDisabled: + return nil + case DBMPropagationModeFull: + var sampled int64 + traceID := c.SpanID + if ctx != nil { + if sp, ok := ctx.SamplingPriority(); ok && sp > 0 { + sampled = 1 + } + traceID = ctx.traceID.Lower() + } + tags[sqlCommentTraceParent] = encodeTraceParent(traceID, c.SpanID, sampled) + fallthrough + case DBMPropagationModeService: + if ctx != nil && ctx.span != nil { + if e, ok := getMeta(ctx.span, ext.Environment); ok && e != "" { + tags[sqlCommentEnv] = e + } + if v, ok := getMeta(ctx.span, ext.Version); ok && v != "" { + tags[sqlCommentParentVersion] = v + } + if v, ok := getMeta(ctx.span, ext.PeerService); ok && v != "" { + tags[sqlCommentPeerService] = v + } + } + if c.PeerDBName != "" { + tags[sqlCommentPeerDBName] = c.PeerDBName + } + if c.PeerDBHostname != "" { + tags[sqlCommentPeerHostname] = c.PeerDBHostname + } + if tags[sqlCommentPeerService] == "" && c.PeerService != "" { + tags[sqlCommentPeerService] = c.PeerService + } + if globalconfig.ServiceName() != "" { + tags[sqlCommentParentService] = globalconfig.ServiceName() + } + tags[sqlCommentDBService] = c.DBServiceName + } + c.Query = commentQuery(c.Query, tags) + return nil +} + +// encodeTraceParent encodes trace parent as per the w3c trace context spec (https://www.w3.org/TR/trace-context/#version). +func encodeTraceParent(traceID uint64, spanID uint64, sampled int64) string { + var b strings.Builder + // traceparent has a fixed length of 55: + // 2 bytes for the version, 32 for the trace id, 16 for the span id, 2 for the sampled flag and 3 for separators + b.Grow(55) + b.WriteString(w3cContextVersion) + b.WriteRune('-') + tid := strconv.FormatUint(traceID, 16) + for i := 0; i < 32-len(tid); i++ { + b.WriteRune('0') + } + b.WriteString(tid) + b.WriteRune('-') + sid := strconv.FormatUint(spanID, 16) + for i := 0; i < 16-len(sid); i++ { + b.WriteRune('0') + } + b.WriteString(sid) + b.WriteRune('-') + b.WriteRune('0') + b.WriteString(strconv.FormatInt(sampled, 16)) + return b.String() +} + +var ( + keyReplacer = strings.NewReplacer(" ", "%20", "!", "%21", "#", "%23", "$", "%24", "%", "%25", "&", "%26", "'", "%27", "(", "%28", ")", "%29", "*", "%2A", "+", "%2B", ",", "%2C", "/", "%2F", ":", "%3A", ";", "%3B", "=", "%3D", "?", "%3F", "@", "%40", "[", "%5B", "]", "%5D") + valueReplacer = strings.NewReplacer(" ", "%20", "!", "%21", "#", "%23", "$", "%24", "%", "%25", "&", "%26", "'", "%27", "(", "%28", ")", "%29", "*", "%2A", "+", "%2B", ",", "%2C", "/", "%2F", ":", "%3A", ";", "%3B", "=", "%3D", "?", "%3F", "@", "%40", "[", "%5B", "]", "%5D", "'", "\\'") +) + +// commentQuery returns the given query with the tags from the SQLCommentCarrier applied to it as a +// prepended SQL comment. The format of the comment follows the sqlcommenter spec. +// See https://google.github.io/sqlcommenter/spec/ for more details. +func commentQuery(query string, tags map[string]string) string { + if len(tags) == 0 { + return "" + } + var b strings.Builder + // the sqlcommenter specification dictates that tags should be sorted. Since we know all injected keys, + // we skip a sorting operation by specifying the order of keys statically + orderedKeys := []string{sqlCommentDBService, sqlCommentEnv, sqlCommentParentService, sqlCommentParentVersion, sqlCommentTraceParent, sqlCommentPeerHostname, sqlCommentPeerDBName, sqlCommentPeerService} + first := true + for _, k := range orderedKeys { + if v, ok := tags[k]; ok { + // we need to URL-encode both keys and values and escape single quotes in values + // https://google.github.io/sqlcommenter/spec/ + key := keyReplacer.Replace(k) + val := valueReplacer.Replace(v) + if first { + b.WriteString("/*") + } else { + b.WriteRune(',') + } + b.WriteString(key) + b.WriteRune('=') + b.WriteRune('\'') + b.WriteString(val) + b.WriteRune('\'') + first = false + } + } + if b.Len() == 0 { + return query + } + b.WriteString("*/") + if query == "" { + return b.String() + } + log.Debug("Injected sql comment: %s", b.String()) + b.WriteRune(' ') + b.WriteString(query) + return b.String() +} + +// Extract parses for key value attributes in a sql query injected with trace information in order to build a span context +func (c *SQLCommentCarrier) Extract() (*SpanContext, error) { + var ctx *SpanContext + // There may be multiple comments within the sql query, so we must identify which one contains trace information. + // We look at each comment until we find one that contains a traceparent + if traceComment, found := findTraceComment(c.Query); found { + var err error + if ctx, err = spanContextFromTraceComment(traceComment); err != nil { + return nil, err + } + } else { + return nil, ErrSpanContextNotFound + } + if ctx.traceID.Empty() || ctx.spanID == 0 { + return nil, ErrSpanContextNotFound + } + return ctx, nil +} + +// spanContextFromTraceComment looks for specific kv pairs in a comment containing trace information. +// It returns a span context with the appropriate attributes +func spanContextFromTraceComment(c string) (*SpanContext, error) { + var ctx SpanContext + kvs := strings.Split(c, ",") + for _, unparsedKV := range kvs { + splitKV := strings.Split(unparsedKV, "=") + if len(splitKV) != 2 { + return nil, ErrSpanContextCorrupted + } + key := splitKV[0] + value := strings.Trim(splitKV[1], "'") + switch key { + case sqlCommentTraceParent: + traceIDLower, traceIDUpper, spanID, sampled, err := decodeTraceParent(value) + if err != nil { + return nil, err + } + ctx.traceID.SetLower(traceIDLower) + ctx.traceID.SetUpper(traceIDUpper) + ctx.spanID = spanID + ctx.setSamplingPriority(sampled, samplernames.Unknown) + default: + } + } + return &ctx, nil +} + +// decodeTraceParent decodes trace parent as per the w3c trace context spec (https://www.w3.org/TR/trace-context/#version). +// this also supports decoding traceparents from open telemetry sql comments which are 128 bit +func decodeTraceParent(traceParent string) (traceIDLower uint64, traceIDUpper uint64, spanID uint64, sampled int, err error) { + if len(traceParent) < 55 { + return 0, 0, 0, 0, ErrSpanContextCorrupted + } + version := traceParent[0:2] + switch version { + case w3cContextVersion: + if traceIDUpper, err = strconv.ParseUint(traceParent[3:19], 16, 64); err != nil { + return 0, 0, 0, 0, ErrSpanContextCorrupted + } + if traceIDLower, err = strconv.ParseUint(traceParent[19:35], 16, 64); err != nil { + return 0, 0, 0, 0, ErrSpanContextCorrupted + } + if spanID, err = strconv.ParseUint(traceParent[36:52], 16, 64); err != nil { + return 0, 0, 0, 0, ErrSpanContextCorrupted + } + if sampled, err = strconv.Atoi(traceParent[53:55]); err != nil { + return 0, 0, 0, 0, ErrSpanContextCorrupted + } + default: + } + return traceIDLower, traceIDUpper, spanID, sampled, err +} + +// findTraceComment looks for a sql comment that contains trace information by looking for the keyword traceparent +func findTraceComment(query string) (traceComment string, found bool) { + startIndex := -1 + containsTrace := false + keyLength := len(sqlCommentTraceParent) + qLength := len(query) + for i := 0; i < qLength-1; { + if query[i] == '/' && query[i+1] == '*' { + // look for leading /* + startIndex = i + i += 2 + containsTrace = false + } else if query[i] == '*' && query[i+1] == '/' { + // look for closing */ + if startIndex == -1 { + // malformed comment, it did not have a leading /* + return "", false + } + if !containsTrace { + // ignore this comment, it was not a trace comment + startIndex = -1 + i += 2 + } else { + // do not return the query with the leading /* or trailing */ + return query[startIndex+2 : i], true + } + } else if !containsTrace && i+keyLength < qLength && query[i:i+keyLength] == sqlCommentTraceParent { + // look for occurrence of keyword in the query if not yet found and make sure we don't go out of range + containsTrace = true + i += keyLength + } else { + i++ + } + } + return "", false +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/stats.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/stats.go new file mode 100644 index 00000000..575bfe70 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/stats.go @@ -0,0 +1,245 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/DataDog/datadog-agent/pkg/obfuscate" + "github.com/DataDog/datadog-agent/pkg/trace/stats" + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/processtags" + + "github.com/DataDog/datadog-go/v5/statsd" +) + +// tracerObfuscationVersion indicates which version of stats obfuscation logic we implement +// In the future this can be pulled directly from our obfuscation import. +var tracerObfuscationVersion = 1 + +// defaultStatsBucketSize specifies the default span of time that will be +// covered in one stats bucket. +var defaultStatsBucketSize = (10 * time.Second).Nanoseconds() + +// concentrator aggregates and stores statistics on incoming spans in time buckets, +// flushing them occasionally to the underlying transport located in the given +// tracer config. +type concentrator struct { + // In specifies the channel to be used for feeding data to the concentrator. + // In order for In to have a consumer, the concentrator must be started using + // a call to Start. + In chan *tracerStatSpan + + // stopped reports whether the concentrator is stopped (when non-zero) + stopped uint32 + + spanConcentrator *stats.SpanConcentrator + + aggregationKey stats.PayloadAggregationKey + + wg sync.WaitGroup // waits for any active goroutines + bucketSize int64 // the size of a bucket in nanoseconds + stop chan struct{} // closing this channel triggers shutdown + cfg *config // tracer startup configuration + statsdClient internal.StatsdClient // statsd client for sending metrics. +} + +type tracerStatSpan struct { + statSpan *stats.StatSpan + origin string +} + +// newConcentrator creates a new concentrator using the given tracer +// configuration c. It creates buckets of bucketSize nanoseconds duration. +func newConcentrator(c *config, bucketSize int64, statsdClient internal.StatsdClient) *concentrator { + sCfg := &stats.SpanConcentratorConfig{ + ComputeStatsBySpanKind: true, + BucketInterval: defaultStatsBucketSize, + } + env := c.agent.defaultEnv + if c.env != "" { + env = c.env + } + if env == "" { + // We do this to avoid a panic in the stats calculation logic when env is empty + // This should never actually happen as the agent MUST have an env configured to start-up + // That panic will be removed in a future release at which point we can remove this + env = "unknown-env" + log.Debug("No DD Env found, normally the agent should have one") + } + gitCommitSha := "" + if c.ciVisibilityEnabled { + // We only have this data if we're in CI Visibility + gitCommitSha = utils.GetCITags()[constants.GitCommitSHA] + } + aggKey := stats.PayloadAggregationKey{ + Hostname: c.hostname, + Env: env, + Version: c.version, + ContainerID: "", // This intentionally left empty as the Agent will attach the container ID only in certain situations. + GitCommitSha: gitCommitSha, + ImageTag: "", + } + spanConcentrator := stats.NewSpanConcentrator(sCfg, time.Now()) + return &concentrator{ + In: make(chan *tracerStatSpan, 10000), + bucketSize: bucketSize, + stopped: 1, + cfg: c, + aggregationKey: aggKey, + spanConcentrator: spanConcentrator, + statsdClient: statsdClient, + } +} + +// alignTs returns the provided timestamp truncated to the bucket size. +// It gives us the start time of the time bucket in which such timestamp falls. +func alignTs(ts, bucketSize int64) int64 { return ts - ts%bucketSize } + +// Start starts the concentrator. A started concentrator needs to be stopped +// in order to gracefully shut down, using Stop. +func (c *concentrator) Start() { + if atomic.SwapUint32(&c.stopped, 0) == 0 { + // already running + log.Warn("(*concentrator).Start called more than once. This is likely a programming error.") + return + } + c.stop = make(chan struct{}) + c.wg.Add(1) + go func() { + defer c.wg.Done() + tick := time.NewTicker(time.Duration(c.bucketSize) * time.Nanosecond) + defer tick.Stop() + c.runFlusher(tick.C) + }() + c.wg.Add(1) + go func() { + defer c.wg.Done() + c.runIngester() + }() +} + +// runFlusher runs the flushing loop which sends stats to the underlying transport. +func (c *concentrator) runFlusher(tick <-chan time.Time) { + for { + select { + case now := <-tick: + c.flushAndSend(now, withoutCurrentBucket) + case <-c.stop: + return + } + } +} + +// statsd returns any tracer configured statsd client, or a no-op. +func (c *concentrator) statsd() internal.StatsdClient { + if c.statsdClient == nil { + return &statsd.NoOpClientDirect{} + } + return c.statsdClient +} + +// runIngester runs the loop which accepts incoming data on the concentrator's In +// channel. +func (c *concentrator) runIngester() { + for { + select { + case s := <-c.In: + c.statsd().Incr("datadog.tracer.stats.spans_in", nil, 1) + c.add(s) + case <-c.stop: + return + } + } +} + +func (c *concentrator) newTracerStatSpan(s *Span, obfuscator *obfuscate.Obfuscator) (*tracerStatSpan, bool) { + resource := s.resource + if c.shouldObfuscate() { + resource = obfuscatedResource(obfuscator, s.spanType, s.resource) + } + statSpan, ok := c.spanConcentrator.NewStatSpan(s.service, resource, + s.name, s.spanType, s.parentID, s.start, s.duration, s.error, s.meta, s.metrics, c.cfg.agent.peerTags) + if !ok { + return nil, false + } + origin := s.meta[keyOrigin] + return &tracerStatSpan{ + statSpan: statSpan, + origin: origin, + }, true +} + +func (c *concentrator) shouldObfuscate() bool { + // Obfuscate if agent reports an obfuscation version AND our version is at least as new + return c.cfg.agent.obfuscationVersion > 0 && c.cfg.agent.obfuscationVersion <= tracerObfuscationVersion +} + +// add s into the concentrator's internal stats buckets. +func (c *concentrator) add(s *tracerStatSpan) { + c.spanConcentrator.AddSpan(s.statSpan, c.aggregationKey, "", nil, s.origin) +} + +// Stop stops the concentrator and blocks until the operation completes. +func (c *concentrator) Stop() { + if atomic.SwapUint32(&c.stopped, 1) > 0 { + return + } + close(c.stop) + c.wg.Wait() +drain: + for { + select { + case s := <-c.In: + c.statsd().Incr("datadog.tracer.stats.spans_in", nil, 1) + c.add(s) + default: + break drain + } + } + c.flushAndSend(time.Now(), withCurrentBucket) +} + +const ( + withCurrentBucket = true + withoutCurrentBucket = false +) + +// flushAndSend flushes all the stats buckets with the given timestamp and sends them using the transport specified in +// the concentrator config. The current bucket is only included if includeCurrent is true, such as during shutdown. +func (c *concentrator) flushAndSend(timenow time.Time, includeCurrent bool) { + csps := c.spanConcentrator.Flush(timenow.UnixNano(), includeCurrent) + + obfVersion := 0 + if c.shouldObfuscate() { + obfVersion = tracerObfuscationVersion + } else { + log.Debug("Stats Obfuscation was skipped, agent will obfuscate (tracer %d, agent %d)", tracerObfuscationVersion, c.cfg.agent.obfuscationVersion) + } + + if len(csps) == 0 { + // nothing to flush + return + } + c.statsd().Incr("datadog.tracer.stats.flush_payloads", nil, float64(len(csps))) + flushedBuckets := 0 + // Given we use a constant PayloadAggregationKey there should only ever be 1 of these, but to be forward + // compatible in case this ever changes we can just iterate through all of them. + for _, csp := range csps { + csp.ProcessTags = processtags.GlobalTags().String() + flushedBuckets += len(csp.Stats) + if err := c.cfg.transport.sendStats(csp, obfVersion); err != nil { + c.statsd().Incr("datadog.tracer.stats.flush_errors", nil, 1) + log.Error("Error sending stats payload: %s", err.Error()) + } + } + c.statsd().Incr("datadog.tracer.stats.flush_buckets", nil, float64(flushedBuckets)) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/telemetry.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/telemetry.go new file mode 100644 index 00000000..4704efd7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/telemetry.go @@ -0,0 +1,130 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "fmt" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +var additionalConfigs []telemetry.Configuration + +func reportTelemetryOnAppStarted(c telemetry.Configuration) { + additionalConfigs = append(additionalConfigs, c) +} + +// startTelemetry starts the global instrumentation telemetry client with tracer data +// unless instrumentation telemetry is disabled via the DD_INSTRUMENTATION_TELEMETRY_ENABLED +// env var. +// If the telemetry client has already been started by the profiler, then +// an app-product-change event is sent with appsec information and an app-client-configuration-change +// event is sent with tracer config data. +// Note that the tracer is not considered as a standalone product by telemetry so we cannot send +// an app-product-change event for the tracer. +func startTelemetry(c *config) telemetry.Client { + if telemetry.Disabled() { + // Do not do extra work populating config data if instrumentation telemetry is disabled. + return nil + } + + telemetry.ProductStarted(telemetry.NamespaceTracers) + telemetryConfigs := []telemetry.Configuration{ + {Name: "agent_feature_drop_p0s", Value: c.agent.DropP0s}, + {Name: "stats_computation_enabled", Value: c.canComputeStats()}, + {Name: "dogstatsd_port", Value: c.agent.StatsdPort}, + {Name: "lambda_mode", Value: c.logToStdout}, + {Name: "send_retries", Value: c.sendRetries}, + {Name: "retry_interval", Value: c.retryInterval}, + {Name: "trace_startup_logs_enabled", Value: c.logStartup}, + {Name: "service", Value: c.serviceName}, + {Name: "universal_version", Value: c.universalVersion}, + {Name: "env", Value: c.env}, + {Name: "version", Value: c.version}, + {Name: "trace_agent_url", Value: c.agentURL.String()}, + {Name: "agent_hostname", Value: c.hostname}, + {Name: "runtime_metrics_v2_enabled", Value: c.runtimeMetricsV2}, + {Name: "dogstatsd_addr", Value: c.dogstatsdAddr}, + {Name: "debug_stack_enabled", Value: !c.noDebugStack}, + {Name: "profiling_hotspots_enabled", Value: c.profilerHotspots}, + {Name: "profiling_endpoints_enabled", Value: c.profilerEndpoints}, + {Name: "trace_span_attribute_schema", Value: c.spanAttributeSchemaVersion}, + {Name: "trace_peer_service_defaults_enabled", Value: c.peerServiceDefaultsEnabled}, + {Name: "orchestrion_enabled", Value: c.orchestrionCfg.Enabled, Origin: telemetry.OriginCode}, + {Name: "trace_enabled", Value: c.enabled.current, Origin: c.enabled.cfgOrigin}, + {Name: "trace_log_directory", Value: c.logDirectory}, + c.traceSampleRate.toTelemetry(), + c.headerAsTags.toTelemetry(), + c.globalTags.toTelemetry(), + c.traceSampleRules.toTelemetry(), + {Name: "span_sample_rules", Value: c.spanRules}, + } + var peerServiceMapping []string + for key, value := range c.peerServiceMappings { + peerServiceMapping = append(peerServiceMapping, fmt.Sprintf("%s:%s", key, value)) + } + telemetryConfigs = append(telemetryConfigs, + telemetry.Configuration{Name: "trace_peer_service_mapping", Value: strings.Join(peerServiceMapping, ",")}) + + if chained, ok := c.propagator.(*chainedPropagator); ok { + telemetryConfigs = append(telemetryConfigs, + telemetry.Configuration{Name: "trace_propagation_style_inject", Value: chained.injectorNames}) + telemetryConfigs = append(telemetryConfigs, + telemetry.Configuration{Name: "trace_propagation_style_extract", Value: chained.extractorsNames}) + } + for k, v := range c.featureFlags { + telemetryConfigs = append(telemetryConfigs, telemetry.Configuration{Name: k, Value: v}) + } + for k, v := range c.serviceMappings { + telemetryConfigs = append(telemetryConfigs, telemetry.Configuration{Name: "service_mapping_" + k, Value: v}) + } + for k, v := range c.globalTags.get() { + telemetryConfigs = append(telemetryConfigs, telemetry.Configuration{Name: "global_tag_" + k, Value: v}) + } + rules := append(c.spanRules, c.traceRules...) + for _, rule := range rules { + var service string + var name string + if rule.Service != nil { + service = rule.Service.String() + } + if rule.Name != nil { + name = rule.Name.String() + } + telemetryConfigs = append(telemetryConfigs, + telemetry.Configuration{Name: fmt.Sprintf("sr_%s_(%s)_(%s)", rule.ruleType.String(), service, name), + Value: fmt.Sprintf("rate:%f_maxPerSecond:%f", rule.Rate, rule.MaxPerSecond)}) + } + if c.orchestrionCfg.Enabled { + telemetryConfigs = append(telemetryConfigs, telemetry.Configuration{Name: "orchestrion_version", Value: c.orchestrionCfg.Metadata.Version, Origin: telemetry.OriginCode}) + } + telemetryConfigs = append(telemetryConfigs, additionalConfigs...) + telemetry.RegisterAppConfigs(telemetryConfigs...) + cfg := telemetry.ClientConfig{ + HTTPClient: c.httpClient, + AgentURL: c.agentURL.String(), + } + if c.logToStdout || c.ciVisibilityAgentless { + cfg.APIKey = env.Get("DD_API_KEY") + } + client, err := telemetry.NewClient(c.serviceName, c.env, c.version, cfg) + if err != nil { + log.Debug("tracer: failed to create telemetry client: %s", err.Error()) + return nil + } + + if c.orchestrionCfg.Enabled { + // If orchestrion is enabled, report it to the back-end via a telemetry metric on every flush. + handle := client.Gauge(telemetry.NamespaceTracers, "orchestrion.enabled", []string{"version:" + c.orchestrionCfg.Metadata.Version}) + client.AddFlushTicker(func(_ telemetry.Client) { handle.Submit(1) }) + } + + telemetry.StartApp(client) + return client +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/textmap.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/textmap.go new file mode 100644 index 00000000..3bb4992d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/textmap.go @@ -0,0 +1,1472 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "sync/atomic" + + "maps" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" +) + +// HTTPHeadersCarrier wraps an http.Header as a TextMapWriter and TextMapReader, allowing +// it to be used using the provided Propagator implementation. +type HTTPHeadersCarrier http.Header + +var _ TextMapWriter = (*HTTPHeadersCarrier)(nil) +var _ TextMapReader = (*HTTPHeadersCarrier)(nil) + +// Set implements TextMapWriter. +func (c HTTPHeadersCarrier) Set(key, val string) { + http.Header(c).Set(key, val) +} + +// ForeachKey implements TextMapReader. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} + +// TextMapCarrier allows the use of a regular map[string]string as both TextMapWriter +// and TextMapReader, making it compatible with the provided Propagator. +type TextMapCarrier map[string]string + +var _ TextMapWriter = (*TextMapCarrier)(nil) +var _ TextMapReader = (*TextMapCarrier)(nil) + +// Set implements TextMapWriter. +func (c TextMapCarrier) Set(key, val string) { + c[key] = val +} + +// ForeachKey conforms to the TextMapReader interface. +func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { + for k, v := range c { + if err := handler(k, v); err != nil { + return err + } + } + return nil +} + +const ( + headerPropagationStyleInject = "DD_TRACE_PROPAGATION_STYLE_INJECT" + headerPropagationStyleExtract = "DD_TRACE_PROPAGATION_STYLE_EXTRACT" + headerPropagationStyle = "DD_TRACE_PROPAGATION_STYLE" +) + +const ( + // DefaultBaggageHeaderPrefix specifies the prefix that will be used in + // HTTP headers or text maps to prefix baggage keys. + DefaultBaggageHeaderPrefix = "ot-baggage-" + + // DefaultTraceIDHeader specifies the key that will be used in HTTP headers + // or text maps to store the trace ID. + DefaultTraceIDHeader = "x-datadog-trace-id" + + // DefaultParentIDHeader specifies the key that will be used in HTTP headers + // or text maps to store the parent ID. + DefaultParentIDHeader = "x-datadog-parent-id" + + // DefaultPriorityHeader specifies the key that will be used in HTTP headers + // or text maps to store the sampling priority value. + DefaultPriorityHeader = "x-datadog-sampling-priority" + + // DefaultBaggageHeader specifies the key that will be used in HTTP headers + // or text maps to store the baggage value. + DefaultBaggageHeader = "baggage" +) + +// originHeader specifies the name of the header indicating the origin of the trace. +// It is used with the Synthetics product and usually has the value "synthetics". +const originHeader = "x-datadog-origin" + +// traceTagsHeader holds the propagated trace tags +const traceTagsHeader = "x-datadog-tags" + +// propagationExtractMaxSize limits the total size of incoming propagated tags to parse +const propagationExtractMaxSize = 512 + +// PropagatorConfig defines the configuration for initializing a propagator. +type PropagatorConfig struct { + // BaggagePrefix specifies the prefix that will be used to store baggage + // items in a map. It defaults to DefaultBaggageHeaderPrefix. + BaggagePrefix string + + // TraceHeader specifies the map key that will be used to store the trace ID. + // It defaults to DefaultTraceIDHeader. + TraceHeader string + + // ParentHeader specifies the map key that will be used to store the parent ID. + // It defaults to DefaultParentIDHeader. + ParentHeader string + + // PriorityHeader specifies the map key that will be used to store the sampling priority. + // It defaults to DefaultPriorityHeader. + PriorityHeader string + + // MaxTagsHeaderLen specifies the maximum length of trace tags header value. + // It defaults to defaultMaxTagsHeaderLen, a value of 0 disables propagation of tags. + MaxTagsHeaderLen int + + // B3 specifies if B3 headers should be added for trace propagation. + // See https://github.com/openzipkin/b3-propagation + B3 bool + + // BaggageHeader specifies the map key that will be used to store the baggage key-value pairs. + // It defaults to DefaultBaggageHeader. + BaggageHeader string +} + +// NewPropagator returns a new propagator which uses TextMap to inject +// and extract values. It propagates trace and span IDs and baggage. +// To use the defaults, nil may be provided in place of the config. +// +// The inject and extract propagators are determined using environment variables +// with the following order of precedence: +// 1. DD_TRACE_PROPAGATION_STYLE_INJECT +// 2. DD_TRACE_PROPAGATION_STYLE (applies to both inject and extract) +// 3. If none of the above, use default values +func NewPropagator(cfg *PropagatorConfig, propagators ...Propagator) Propagator { + if cfg == nil { + cfg = new(PropagatorConfig) + } + if cfg.BaggagePrefix == "" { + cfg.BaggagePrefix = DefaultBaggageHeaderPrefix + } + if cfg.TraceHeader == "" { + cfg.TraceHeader = DefaultTraceIDHeader + } + if cfg.ParentHeader == "" { + cfg.ParentHeader = DefaultParentIDHeader + } + if cfg.PriorityHeader == "" { + cfg.PriorityHeader = DefaultPriorityHeader + } + if cfg.BaggageHeader == "" { + cfg.BaggageHeader = DefaultBaggageHeader + } + cp := new(chainedPropagator) + cp.onlyExtractFirst = internal.BoolEnv("DD_TRACE_PROPAGATION_EXTRACT_FIRST", false) + if len(propagators) > 0 { + cp.injectors = propagators + cp.extractors = propagators + return cp + } + injectorsPs := env.Get(headerPropagationStyleInject) + extractorsPs := env.Get(headerPropagationStyleExtract) + cp.injectors, cp.injectorNames = getPropagators(cfg, injectorsPs) + cp.extractors, cp.extractorsNames = getPropagators(cfg, extractorsPs) + return cp +} + +// chainedPropagator implements Propagator and applies a list of injectors and extractors. +// When injecting, all injectors are called to propagate the span context. +// When extracting, it tries each extractor, selecting the first successful one. +type chainedPropagator struct { + injectors []Propagator + extractors []Propagator + injectorNames string + extractorsNames string + onlyExtractFirst bool // value of DD_TRACE_PROPAGATION_EXTRACT_FIRST +} + +// getPropagators returns a list of propagators based on ps, which is a comma seperated +// list of propagators. If the list doesn't contain any valid values, the +// default propagator will be returned. Any invalid values in the list will log +// a warning and be ignored. +func getPropagators(cfg *PropagatorConfig, ps string) ([]Propagator, string) { + dd := &propagator{cfg} + defaultPs := []Propagator{dd, &propagatorW3c{}, &propagatorBaggage{}} + defaultPsName := "datadog,tracecontext,baggage" + if cfg.B3 { + defaultPs = append(defaultPs, &propagatorB3{}) + defaultPsName += ",b3" + } + if ps == "" { + if prop := getDDorOtelConfig("propagationStyle"); prop != "" { + ps = prop // use the generic DD_TRACE_PROPAGATION_STYLE if set + } else { + return defaultPs, defaultPsName // no env set, so use default from configuration + } + } + ps = strings.ToLower(ps) + if ps == "none" { + return nil, "" + } + var list []Propagator + var listNames []string + if cfg.B3 { + list = append(list, &propagatorB3{}) + listNames = append(listNames, "b3") + } + for _, v := range strings.Split(ps, ",") { + switch v := strings.ToLower(v); v { + case "datadog": + list = append(list, dd) + listNames = append(listNames, v) + case "tracecontext": + list = append(list, &propagatorW3c{}) + listNames = append(listNames, v) + case "baggage": + list = append(list, &propagatorBaggage{}) + listNames = append(listNames, v) + case "b3", "b3multi": + if !cfg.B3 { + // propagatorB3 hasn't already been added, add a new one. + list = append(list, &propagatorB3{}) + listNames = append(listNames, v) + } + case "b3 single header": + list = append(list, &propagatorB3SingleHeader{}) + listNames = append(listNames, v) + case "none": + log.Warn("Propagator \"none\" has no effect when combined with other propagators. " + + "To disable the propagator, set to `none`") + default: + log.Warn("unrecognized propagator: %s\n", v) + } + } + if len(list) == 0 { + return defaultPs, defaultPsName // no valid propagators, so return default + } + return list, strings.Join(listNames, ",") +} + +// Inject defines the Propagator to propagate SpanContext data +// out of the current process. The implementation propagates the +// TraceID and the current active SpanID, as well as the Span baggage. +func (p *chainedPropagator) Inject(spanCtx *SpanContext, carrier interface{}) error { + if spanCtx == nil { + return ErrInvalidSpanContext + } + for _, v := range p.injectors { + err := v.Inject(spanCtx, carrier) + if err != nil { + return err + } + } + return nil +} + +// Extract implements Propagator. This method will attempt to extract a span context +// based on the precedence order of the propagators. Generally, the first valid +// trace context that could be extracted will be returned. However, the W3C tracestate +// header value will always be extracted and stored in the local trace context even if +// a previous propagator has succeeded so long as the trace-ids match. +// Furthermore, if we have already successfully extracted a trace context and a +// subsequent trace context has conflicting trace information, such information will +// be relayed in the returned SpanContext with a SpanLink. +func (p *chainedPropagator) Extract(carrier interface{}) (*SpanContext, error) { + var ctx *SpanContext + var links []SpanLink + pendingBaggage := make(map[string]string) // used to store baggage items temporarily + + for _, v := range p.extractors { + firstExtract := (ctx == nil) // ctx stores the most recently extracted ctx across iterations; if it's nil, no extractor has run yet + extractedCtx, err := v.Extract(carrier) + + // If this is the baggage propagator, just stash its items into pendingBaggage + if _, isBaggage := v.(*propagatorBaggage); isBaggage { + if extractedCtx != nil && len(extractedCtx.baggage) > 0 { + for k, v := range extractedCtx.baggage { + pendingBaggage[k] = v + } + } + continue + } + + if firstExtract { + if err != nil { + if p.onlyExtractFirst { // Every error is relevant when we are relying on the first extractor + return nil, err + } + if err != ErrSpanContextNotFound { // We don't care about ErrSpanContextNotFound because we could find a span context in a subsequent extractor + return nil, err + } + } + if p.onlyExtractFirst { + return extractedCtx, nil + } + ctx = extractedCtx + } else { // A local trace context has already been extracted + extractedCtx2 := extractedCtx + ctx2 := ctx + + // If we can't cast to spanContext, we can't propgate tracestate or create span links + if extractedCtx2.TraceID() == ctx2.TraceID() { + if pW3C, ok := v.(*propagatorW3c); ok { + pW3C.propagateTracestate(ctx2, extractedCtx2) + // If trace IDs match but span IDs do not, use spanID from `*propagatorW3c` extractedCtx for parenting + if extractedCtx2.SpanID() != ctx2.SpanID() { + var ddCtx *SpanContext + // Grab the datadog-propagated spancontext again + if ddp := getDatadogPropagator(p); ddp != nil { + if ddSpanCtx, err := ddp.Extract(carrier); err == nil { + ddCtx = ddSpanCtx + } + } + overrideDatadogParentID(ctx2, extractedCtx2, ddCtx) + } + } + } else if extractedCtx2 != nil { // Trace IDs do not match - create span links + link := SpanLink{TraceID: extractedCtx2.TraceIDLower(), SpanID: extractedCtx2.SpanID(), TraceIDHigh: extractedCtx2.TraceIDUpper(), Attributes: map[string]string{"reason": "terminated_context", "context_headers": getPropagatorName(v)}} + if trace := extractedCtx2.trace; trace != nil { + if flags := uint32(*trace.priority); flags > 0 { // Set the flags based on the sampling priority + link.Flags = 1 + } else { + link.Flags = 0 + } + link.Tracestate = extractedCtx2.trace.propagatingTag(tracestateHeader) + } + links = append(links, link) + } + } + } + + if ctx == nil { + if len(pendingBaggage) > 0 { + ctx := &SpanContext{ + baggage: make(map[string]string, len(pendingBaggage)), + baggageOnly: true, + } + maps.Copy(ctx.baggage, pendingBaggage) + atomic.StoreUint32(&ctx.hasBaggage, 1) + return ctx, nil + } + // 0 successful extractions + return nil, ErrSpanContextNotFound + } + if len(pendingBaggage) > 0 { + if ctx.baggage == nil { + ctx.baggage = make(map[string]string, len(pendingBaggage)) + } + for k, v := range pendingBaggage { + ctx.baggage[k] = v + } + atomic.StoreUint32(&ctx.hasBaggage, 1) + } + + if len(links) > 0 { + ctx.spanLinks = links + } + log.Debug("Extracted span context: %s", ctx.safeDebugString()) + return ctx, nil +} + +func getPropagatorName(p Propagator) string { + switch p.(type) { + case *propagator: + return "datadog" + case *propagatorB3: + return "b3multi" + case *propagatorB3SingleHeader: + return "b3" + case *propagatorW3c: + return "tracecontext" + case *propagatorBaggage: + return "baggage" + default: + return "" + } +} + +// propagateTracestate will add the tracestate propagating tag to the given +// *spanContext. The W3C trace context will be extracted from the provided +// carrier. The trace id of this W3C trace context must match the trace id +// provided by the given *spanContext. If it matches, then the tracestate +// will be re-composed based on the composition of the given *spanContext, +// but will include the non-DD vendors in the W3C trace context's tracestate. +func (p *propagatorW3c) propagateTracestate(ctx *SpanContext, w3cCtx *SpanContext) { + if w3cCtx == nil { + return // It's not valid, so ignore it. + } + if ctx.TraceID() != w3cCtx.TraceID() { + return // The trace-ids must match. + } + if w3cCtx.trace == nil { + return // this shouldn't happen, since it should have a propagating tag already + } + if ctx.trace == nil { + ctx.trace = newTrace() + } + // Get the tracestate header from extracted w3C context, and propagate + // it to the span context that will be returned. + // Note: Other trace context fields like sampling priority, propagated tags, + // and origin will remain unchanged. + ts := w3cCtx.trace.propagatingTag(tracestateHeader) + priority, _ := ctx.SamplingPriority() + setPropagatingTag(ctx, tracestateHeader, composeTracestate(ctx, priority, ts)) + ctx.isRemote = (w3cCtx.isRemote) +} + +// propagator implements Propagator and injects/extracts span contexts +// using datadog headers. Only TextMap carriers are supported. +type propagator struct { + cfg *PropagatorConfig +} + +func (p *propagator) Inject(spanCtx *SpanContext, carrier interface{}) error { + if spanCtx == nil { + return ErrInvalidSpanContext + } + switch c := carrier.(type) { + case TextMapWriter: + return p.injectTextMap(spanCtx, c) + default: + return ErrInvalidCarrier + } +} + +func (p *propagator) injectTextMap(spanCtx *SpanContext, writer TextMapWriter) error { + ctx := spanCtx + if ctx.traceID.Empty() || ctx.spanID == 0 { + return ErrInvalidSpanContext + } + // propagate the TraceID and the current active SpanID + if ctx.traceID.HasUpper() { + setPropagatingTag(ctx, keyTraceID128, ctx.traceID.UpperHex()) + } else if ctx.trace != nil { + ctx.trace.unsetPropagatingTag(keyTraceID128) + } + writer.Set(p.cfg.TraceHeader, strconv.FormatUint(ctx.traceID.Lower(), 10)) + writer.Set(p.cfg.ParentHeader, strconv.FormatUint(ctx.spanID, 10)) + if sp, ok := ctx.SamplingPriority(); ok { + writer.Set(p.cfg.PriorityHeader, strconv.Itoa(sp)) + } + if ctx.origin != "" { + writer.Set(originHeader, ctx.origin) + } + ctx.ForeachBaggageItem(func(k, v string) bool { + // Propagate OpenTracing baggage. + writer.Set(p.cfg.BaggagePrefix+k, v) + return true + }) + if p.cfg.MaxTagsHeaderLen <= 0 { + return nil + } + if s := p.marshalPropagatingTags(ctx); len(s) > 0 { + writer.Set(traceTagsHeader, s) + } + return nil +} + +// marshalPropagatingTags marshals all propagating tags included in ctx to a comma separated string +func (p *propagator) marshalPropagatingTags(ctx *SpanContext) string { + var sb strings.Builder + if ctx.trace == nil { + return "" + } + + var properr string + ctx.trace.iteratePropagatingTags(func(k, v string) bool { + if k == tracestateHeader || k == traceparentHeader { + return true // don't propagate W3C headers with the DD propagator + } + if err := isValidPropagatableTag(k, v); err != nil { + log.Warn("Won't propagate tag %q: %s", k, err.Error()) + properr = "encoding_error" + return true + } + if tagLen := sb.Len() + len(k) + len(v); tagLen > p.cfg.MaxTagsHeaderLen { + sb.Reset() + log.Warn("Won't propagate tag %q: %q length is (%d) which exceeds the maximum len of (%d).", k, v, tagLen, p.cfg.MaxTagsHeaderLen) + properr = "inject_max_size" + return false + } + if sb.Len() > 0 { + sb.WriteByte(',') + } + sb.WriteString(k) + sb.WriteByte('=') + sb.WriteString(v) + return true + }) + if properr != "" { + ctx.trace.setTag(keyPropagationError, properr) + } + return sb.String() +} + +func (p *propagator) Extract(carrier interface{}) (*SpanContext, error) { + switch c := carrier.(type) { + case TextMapReader: + return p.extractTextMap(c) + default: + return nil, ErrInvalidCarrier + } +} + +func (p *propagator) extractTextMap(reader TextMapReader) (*SpanContext, error) { + var ctx SpanContext + err := reader.ForeachKey(func(k, v string) error { + var err error + key := strings.ToLower(k) + switch key { + case p.cfg.TraceHeader: + var lowerTid uint64 + lowerTid, err = parseUint64(v) + if err != nil { + return ErrSpanContextCorrupted + } + ctx.traceID.SetLower(lowerTid) + case p.cfg.ParentHeader: + ctx.spanID, err = parseUint64(v) + if err != nil { + return ErrSpanContextCorrupted + } + case p.cfg.PriorityHeader: + priority, err := strconv.Atoi(v) + if err != nil { + return ErrSpanContextCorrupted + } + ctx.setSamplingPriority(priority, samplernames.Unknown) + case originHeader: + ctx.origin = v + case traceTagsHeader: + unmarshalPropagatingTags(&ctx, v) + default: + if strings.HasPrefix(key, p.cfg.BaggagePrefix) { + ctx.setBaggageItem(strings.TrimPrefix(key, p.cfg.BaggagePrefix), v) + } + } + return nil + }) + if err != nil { + return nil, err + } + if ctx.trace != nil { + tid := ctx.trace.propagatingTag(keyTraceID128) + if err := validateTID(tid); err != nil { + log.Debug("Invalid hex traceID: %s", err.Error()) + ctx.trace.unsetPropagatingTag(keyTraceID128) + } else if err := ctx.traceID.SetUpperFromHex(tid); err != nil { + log.Debug("Attempted to set an invalid hex traceID: %s", err.Error()) + ctx.trace.unsetPropagatingTag(keyTraceID128) + } + } + if ctx.traceID.Empty() || (ctx.spanID == 0 && ctx.origin != "synthetics") { + return nil, ErrSpanContextNotFound + } + return &ctx, nil +} + +func validateTID(tid string) error { + if len(tid) != 16 { + return fmt.Errorf("invalid length: %q", tid) + } + if !isValidID(tid) { + return fmt.Errorf("malformed: %q", tid) + } + return nil +} + +// getDatadogPropagator returns the Datadog Propagator +func getDatadogPropagator(cp *chainedPropagator) *propagator { + for _, e := range cp.extractors { + p, isDatadog := (e).(*propagator) + if isDatadog { + return p + } + } + return nil +} + +// overrideDatadogParentID overrides the span ID of a context with the ID extracted from tracecontext headers. +// If the reparenting ID is not set on the context, the span ID from datadog headers is used. +// spanContexts are passed by reference to avoid copying lock value in spanContext type +func overrideDatadogParentID(ctx, w3cCtx, ddCtx *SpanContext) { + if ctx == nil || w3cCtx == nil || ddCtx == nil { + return + } + ctx.spanID = w3cCtx.spanID + if w3cCtx.reparentID != "" { + ctx.reparentID = w3cCtx.reparentID + } else { + // NIT: could be done without using fmt.Sprintf? Is it worth it? + ctx.reparentID = fmt.Sprintf("%016x", ddCtx.SpanID()) + } +} + +// unmarshalPropagatingTags unmarshals tags from v into ctx +func unmarshalPropagatingTags(ctx *SpanContext, v string) { + if ctx.trace == nil { + ctx.trace = newTrace() + } + if len(v) > propagationExtractMaxSize { + log.Warn("Did not extract %s, size limit exceeded: %d. Incoming tags will not be propagated further.", traceTagsHeader, propagationExtractMaxSize) + ctx.trace.setTag(keyPropagationError, "extract_max_size") + return + } + tags, err := parsePropagatableTraceTags(v) + if err != nil { + log.Warn("Did not extract %q: %s. Incoming tags will not be propagated further.", traceTagsHeader, err.Error()) + ctx.trace.setTag(keyPropagationError, "decoding_error") + } + ctx.trace.replacePropagatingTags(tags) +} + +// setPropagatingTag adds the key value pair to the map of propagating tags on the trace, +// creating the map if one is not initialized. +func setPropagatingTag(ctx *SpanContext, k, v string) { + if ctx.trace == nil { + // extractors initialize a new spanContext, so the trace might be nil + ctx.trace = newTrace() + } + ctx.trace.setPropagatingTag(k, v) +} + +const ( + b3TraceIDHeader = "x-b3-traceid" + b3SpanIDHeader = "x-b3-spanid" + b3SampledHeader = "x-b3-sampled" + b3SingleHeader = "b3" +) + +// propagatorB3 implements Propagator and injects/extracts span contexts +// using B3 headers. Only TextMap carriers are supported. +type propagatorB3 struct{} + +func (p *propagatorB3) Inject(spanCtx *SpanContext, carrier interface{}) error { + if spanCtx == nil { + return ErrInvalidSpanContext + } + switch c := carrier.(type) { + case TextMapWriter: + return p.injectTextMap(spanCtx, c) + default: + return ErrInvalidCarrier + } +} + +func (*propagatorB3) injectTextMap(spanCtx *SpanContext, writer TextMapWriter) error { + if spanCtx == nil { + return ErrInvalidSpanContext + } + ctx := spanCtx + if ctx.traceID.Empty() || ctx.spanID == 0 { + return ErrInvalidSpanContext + } + if !ctx.traceID.HasUpper() { // 64-bit trace id + writer.Set(b3TraceIDHeader, fmt.Sprintf("%016x", ctx.traceID.Lower())) + } else { // 128-bit trace id + writer.Set(b3TraceIDHeader, ctx.TraceID()) + } + writer.Set(b3SpanIDHeader, fmt.Sprintf("%016x", ctx.spanID)) + if p, ok := ctx.SamplingPriority(); ok { + if p >= ext.PriorityAutoKeep { + writer.Set(b3SampledHeader, "1") + } else { + writer.Set(b3SampledHeader, "0") + } + } + return nil +} + +func (p *propagatorB3) Extract(carrier interface{}) (*SpanContext, error) { + switch c := carrier.(type) { + case TextMapReader: + return p.extractTextMap(c) + default: + return nil, ErrInvalidCarrier + } +} + +func (*propagatorB3) extractTextMap(reader TextMapReader) (*SpanContext, error) { + var ctx SpanContext + err := reader.ForeachKey(func(k, v string) error { + var err error + key := strings.ToLower(k) + switch key { + case b3TraceIDHeader: + if err := extractTraceID128(&ctx, v); err != nil { + return nil + } + case b3SpanIDHeader: + ctx.spanID, err = strconv.ParseUint(v, 16, 64) + if err != nil { + return ErrSpanContextCorrupted + } + case b3SampledHeader: + priority, err := strconv.Atoi(v) + if err != nil { + return ErrSpanContextCorrupted + } + ctx.setSamplingPriority(priority, samplernames.Unknown) + default: + } + return nil + }) + if err != nil { + return nil, err + } + if ctx.traceID.Empty() || ctx.spanID == 0 { + return nil, ErrSpanContextNotFound + } + return &ctx, nil +} + +// propagatorB3 implements Propagator and injects/extracts span contexts +// using B3 headers. Only TextMap carriers are supported. +type propagatorB3SingleHeader struct{} + +func (p *propagatorB3SingleHeader) Inject(spanCtx *SpanContext, carrier interface{}) error { + if spanCtx == nil { + return ErrInvalidSpanContext + } + switch c := carrier.(type) { + case TextMapWriter: + return p.injectTextMap(spanCtx, c) + default: + return ErrInvalidCarrier + } +} + +func (*propagatorB3SingleHeader) injectTextMap(spanCtx *SpanContext, writer TextMapWriter) error { + if spanCtx == nil { + return ErrInvalidSpanContext + } + ctx := spanCtx + if ctx.traceID.Empty() || ctx.spanID == 0 { + return ErrInvalidSpanContext + } + sb := strings.Builder{} + var traceID string + if !ctx.traceID.HasUpper() { // 64-bit trace id + traceID = fmt.Sprintf("%016x", ctx.traceID.Lower()) + } else { // 128-bit trace id + traceID = ctx.TraceID() + } + sb.WriteString(fmt.Sprintf("%s-%016x", traceID, ctx.spanID)) + if p, ok := ctx.SamplingPriority(); ok { + if p >= ext.PriorityAutoKeep { + sb.WriteString("-1") + } else { + sb.WriteString("-0") + } + } + writer.Set(b3SingleHeader, sb.String()) + return nil +} + +func (p *propagatorB3SingleHeader) Extract(carrier interface{}) (*SpanContext, error) { + switch c := carrier.(type) { + case TextMapReader: + return p.extractTextMap(c) + default: + return nil, ErrInvalidCarrier + } +} + +func (*propagatorB3SingleHeader) extractTextMap(reader TextMapReader) (*SpanContext, error) { + var ctx SpanContext + err := reader.ForeachKey(func(k, v string) error { + var err error + key := strings.ToLower(k) + switch key { + case b3SingleHeader: + b3Parts := strings.Split(v, "-") + if len(b3Parts) >= 2 { + if err = extractTraceID128(&ctx, b3Parts[0]); err != nil { + return err + } + ctx.spanID, err = strconv.ParseUint(b3Parts[1], 16, 64) + if err != nil { + return ErrSpanContextCorrupted + } + if len(b3Parts) >= 3 { + switch b3Parts[2] { + case "": + break + case "1", "d": // Treat 'debug' traces as priority 1 + ctx.setSamplingPriority(ext.PriorityAutoKeep, samplernames.Unknown) + case "0": + ctx.setSamplingPriority(ext.PriorityAutoReject, samplernames.Unknown) + default: + return ErrSpanContextCorrupted + } + } + } else { + return ErrSpanContextCorrupted + } + default: + } + return nil + }) + if err != nil { + return nil, err + } + if ctx.traceID.Empty() || ctx.spanID == 0 { + return nil, ErrSpanContextNotFound + } + return &ctx, nil +} + +const ( + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" +) + +// propagatorW3c implements Propagator and injects/extracts span contexts +// using W3C tracecontext/traceparent headers. Only TextMap carriers are supported. +type propagatorW3c struct{} + +func (p *propagatorW3c) Inject(spanCtx *SpanContext, carrier interface{}) error { + if spanCtx == nil { + return ErrInvalidSpanContext + } + switch c := carrier.(type) { + case TextMapWriter: + return p.injectTextMap(spanCtx, c) + default: + return ErrInvalidCarrier + } +} + +// injectTextMap propagates span context attributes into the writer, +// in the format of the traceparentHeader and tracestateHeader. +// traceparentHeader encodes W3C Trace Propagation version, 128-bit traceID, +// spanID, and a flags field, which supports 8 unique flags. +// The current specification only supports a single flag called sampled, +// which is equal to 00000001 when no other flag is present. +// tracestateHeader is a comma-separated list of list-members with a = format, +// where each list-member is managed by a vendor or instrumentation library. +func (*propagatorW3c) injectTextMap(spanCtx *SpanContext, writer TextMapWriter) error { + if spanCtx == nil { + return ErrInvalidSpanContext + } + ctx := spanCtx + if ctx.traceID.Empty() || ctx.spanID == 0 { + return ErrInvalidSpanContext + } + flags := "" + p, ok := ctx.SamplingPriority() + if ok && p >= ext.PriorityAutoKeep { + flags = "01" + } else { + flags = "00" + } + + var traceID string + if ctx.traceID.HasUpper() { + setPropagatingTag(ctx, keyTraceID128, ctx.traceID.UpperHex()) + traceID = ctx.TraceID() + } else { + traceID = fmt.Sprintf("%032x", ctx.traceID) + if ctx.trace != nil { + ctx.trace.unsetPropagatingTag(keyTraceID128) + } + } + writer.Set(traceparentHeader, fmt.Sprintf("00-%s-%016x-%v", traceID, ctx.spanID, flags)) + // if context priority / origin / tags were updated after extraction, + // or if there is a span on the trace + // or the tracestateHeader doesn't start with `dd=` + // we need to recreate tracestate + if ctx.updated || + (!ctx.isRemote || ctx.isRemote && ctx.trace != nil && ctx.trace.root != nil) || + (ctx.trace != nil && !strings.HasPrefix(ctx.trace.propagatingTag(tracestateHeader), "dd=")) || + ctx.trace.propagatingTagsLen() == 0 { + // compose a new value for the tracestate + writer.Set(tracestateHeader, composeTracestate(ctx, p, ctx.trace.propagatingTag(tracestateHeader))) + } else { + // use a cached value for the tracestate (e.g., no updating p: key) + writer.Set(tracestateHeader, ctx.trace.propagatingTag(tracestateHeader)) + } + return nil +} + +// stringMutator maps characters in a string to new characters. It is a state machine intended +// to replace regex patterns for simple character replacement, including collapsing runs of a +// specific range. +// +// It's designed after the `hash#Hash` interface, and to work with `strings.Map`. +type stringMutator struct { + // n is the current state of the mutator. It is used to track runs of characters that should + // be collapsed. + n bool + // fn is the function that implements the character replacement logic. + // It returns the rune to use as replacement and a bool to tell if next consecutive + // characters must be dropped if they fall in the currently matched character set. + // It's possible to return `-1` to immediately drop the current rune. + // + // This logic allows for: + // - Replace only the current rune: return , false + // - Drop only the current rune: return -1, false + // - Replace the current rune and drop the next consecutive runes if they match the same case: return , true + // - Drop all the consecutive runes matching the same case as the current one: return -1, true + // + // A known limitation is that we can only support a single case of consecutive runes. + fn func(rune) (rune, bool) +} + +// Mutate the mapped string using `strings.Map` and the provided function implementing the character +// replacement logic. +func (sm *stringMutator) Mutate(fn func(rune) (rune, bool), s string) string { + sm.fn = fn + rs := strings.Map(sm.mapping, s) + sm.reset() + + return rs +} + +func (sm *stringMutator) mapping(r rune) rune { + v, dropConsecutiveMatches := sm.fn(r) + if v < 0 { + // We reset the state machine in any match that is not related to a consecutive run + sm.reset() + return -1 + } + if dropConsecutiveMatches { + if !sm.n { + sm.n = true + return v + } + return -1 + } + // We reset the state machine in any match that is not related to a consecutive run + sm.reset() + return v +} + +// reset resets the state of the mutator. +func (sm *stringMutator) reset() { + sm.n = false +} + +var ( + // keyDisallowedFn is used to sanitize the keys of the datadog propagating tags. + // Disallowed characters are comma (reserved as a list-member separator), + // equals (reserved for list-member key-value separator), + // space and characters outside the ASCII range 0x20 to 0x7E. + // Disallowed characters must be replaced with the underscore. + // Equivalent to regexp.MustCompile(",|=|[^\\x20-\\x7E]+") + keyDisallowedFn = func(r rune) (rune, bool) { + switch { + case r == ',' || r == '=': + return '_', false + case r < 0x20 || r > 0x7E: + return '_', true + } + return r, false + } + + // valueDisallowedFn is used to sanitize the values of the datadog propagating tags. + // Disallowed characters are comma (reserved as a list-member separator), + // semi-colon (reserved for separator between entries in the dd list-member), + // tilde (reserved, will represent 0x3D (equals) in the encoded tag value, + // and characters outside the ASCII range 0x20 to 0x7E. + // Equals character must be encoded with a tilde. + // Other disallowed characters must be replaced with the underscore. + // Equivalent to regexp.MustCompile(",|;|~|[^\\x20-\\x7E]+") + valueDisallowedFn = func(r rune) (rune, bool) { + switch { + case r == '=': + return '~', false + case r == ',' || r == '~' || r == ';': + return '_', false + case r < 0x20 || r > 0x7E: + return '_', true + } + return r, false + } + + // originDisallowedFn is used to sanitize the value of the datadog origin tag. + // Disallowed characters are comma (reserved as a list-member separator), + // semi-colon (reserved for separator between entries in the dd list-member), + // equals (reserved for list-member key-value separator), + // and characters outside the ASCII range 0x21 to 0x7E. + // Equals character must be encoded with a tilde. + // Other disallowed characters must be replaced with the underscore. + // Equivalent to regexp.MustCompile(",|~|;|[^\\x21-\\x7E]+") + originDisallowedFn = func(r rune) (rune, bool) { + switch { + case r == '=': + return '~', false + case r == ',' || r == '~' || r == ';': + return '_', false + case r < 0x21 || r > 0x7E: + return '_', true + } + return r, false + } +) + +const ( + asciiLowerA = 97 + asciiLowerF = 102 + asciiZero = 48 + asciiNine = 57 +) + +// isValidID is used to verify that the input is a valid hex string. +// This is an equivalent check to the regexp ^[a-f0-9]+$ +// In benchmarks, this function is roughly 10x faster than the equivalent +// regexp, which is why we split it out. +// isValidID is applicable for both trace and span IDs. +func isValidID(id string) bool { + if len(id) == 0 { + return false + } + + for _, c := range id { + ascii := int(c) + if ascii < asciiZero || ascii > asciiLowerF || (ascii > asciiNine && ascii < asciiLowerA) { + return false + } + } + + return true +} + +// composeTracestate creates a tracestateHeader from the spancontext. +// The Datadog tracing library is only responsible for managing the list member with key dd, +// which holds the values of the sampling decision(`s:`), origin(`o:`), +// the last parent ID of a Datadog span (`p:`), +// and propagated tags prefixed with `t.`(e.g. _dd.p.usr.id:usr_id tag will become `t.usr.id:usr_id`). +func composeTracestate(ctx *SpanContext, priority int, oldState string) string { + var ( + b strings.Builder + sm = &stringMutator{} + ) + + b.Grow(128) + b.WriteString("dd=s:") + b.WriteString(strconv.Itoa(priority)) + listLength := 1 + + if ctx.origin != "" { + oWithSub := sm.Mutate(originDisallowedFn, ctx.origin) + b.WriteString(";o:") + b.WriteString(oWithSub) + } + + // if the context is remote and there is a reparentID, set p as reparentId + // if the context is remote and there is no reparentID, don't set p + // if the context is not remote, set p as context.spanId + // this ID can be used by downstream tracers to set a _dd.parent_id tag + // to allow the backend to reparent orphaned spans if necessary + if !ctx.isRemote { + b.WriteString(";p:") + b.WriteString(spanIDHexEncoded(ctx.SpanID(), 16)) + } else if ctx.reparentID != "" { + b.WriteString(";p:") + b.WriteString(ctx.reparentID) + } + + ctx.trace.iteratePropagatingTags(func(k, v string) bool { + if !strings.HasPrefix(k, "_dd.p.") { + return true + } + // Datadog propagating tags must be appended to the tracestateHeader + // with the `t.` prefix. Tag value must have all `=` signs replaced with a tilde (`~`). + key := sm.Mutate(keyDisallowedFn, k[len("_dd.p."):]) + value := sm.Mutate(valueDisallowedFn, v) + if b.Len()+len(key)+len(value)+4 > 256 { // the +4 here is to account for the `t.` prefix, the `;` needed between the tags, and the `:` between the key and value + return false + } + b.WriteString(";t.") + b.WriteString(key) + b.WriteString(":") + b.WriteString(value) + return true + }) + // the old state is split by vendors, must be concatenated with a `,` + if len(oldState) == 0 { + return b.String() + } + for _, s := range strings.Split(strings.Trim(oldState, " \t"), ",") { + if strings.HasPrefix(s, "dd=") { + continue + } + listLength++ + // if the resulting tracestateHeader exceeds 32 list-members, + // remove the rightmost list-member(s) + if listLength > 32 { + break + } + b.WriteString(",") + b.WriteString(strings.Trim(s, " \t")) + } + return b.String() +} + +func (p *propagatorW3c) Extract(carrier interface{}) (*SpanContext, error) { + switch c := carrier.(type) { + case TextMapReader: + return p.extractTextMap(c) + default: + return nil, ErrInvalidCarrier + } +} + +func (*propagatorW3c) extractTextMap(reader TextMapReader) (*SpanContext, error) { + var parentHeader string + var stateHeader string + var ctx SpanContext + ctx.isRemote = true + // to avoid parsing tracestate header(s) if traceparent is invalid + if err := reader.ForeachKey(func(k, v string) error { + key := strings.ToLower(k) + switch key { + case traceparentHeader: + if parentHeader != "" { + return ErrSpanContextCorrupted + } + parentHeader = v + case tracestateHeader: + stateHeader = v + default: + if strings.HasPrefix(key, DefaultBaggageHeaderPrefix) { + ctx.setBaggageItem(strings.TrimPrefix(key, DefaultBaggageHeaderPrefix), v) + } + } + return nil + }); err != nil { + return nil, err + } + if err := parseTraceparent(&ctx, parentHeader); err != nil { + return nil, err + } + parseTracestate(&ctx, stateHeader) + return &ctx, nil +} + +// parseTraceparent attempts to parse traceparentHeader which describes the position +// of the incoming request in its trace graph in a portable, fixed-length format. +// The format of the traceparentHeader is `-` separated string with in the +// following format: `version-traceId-spanID-flags`, with an optional `-` if version > 0. +// where: +// - version - represents the version of the W3C Tracecontext Propagation format in hex format. +// - traceId - represents the propagated traceID in the format of 32 hex-encoded digits. +// - spanID - represents the propagated spanID (parentID) in the format of 16 hex-encoded digits. +// - flags - represents the propagated flags in the format of 2 hex-encoded digits, and supports 8 unique flags. +// Example value of HTTP `traceparent` header: `00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01`, +// Currently, Go tracer doesn't support 128-bit traceIDs, so the full traceID (32 hex-encoded digits) must be +// stored into a field that is accessible from the span's context. TraceId will be parsed from the least significant 16 +// hex-encoded digits into a 64-bit number. +func parseTraceparent(ctx *SpanContext, header string) error { + nonWordCutset := "_-\t \n" + header = strings.ToLower(strings.Trim(header, "\t -")) + headerLen := len(header) + if headerLen == 0 { + return ErrSpanContextNotFound + } + if headerLen < 55 { + return ErrSpanContextCorrupted + } + parts := strings.SplitN(header, "-", 5) // 5 because we expect 4 required + 1 optional substrings + if len(parts) < 4 { + return ErrSpanContextCorrupted + } + version := strings.Trim(parts[0], nonWordCutset) + if len(version) != 2 { + return ErrSpanContextCorrupted + } + v, err := strconv.ParseUint(version, 16, 64) + if err != nil || v == 255 { + // version 255 (0xff) is invalid + return ErrSpanContextCorrupted + } + if v == 0 && headerLen != 55 { + // The header length in v0 has to be 55. + // It's allowed to be longer in other versions. + return ErrSpanContextCorrupted + } + // parsing traceID + fullTraceID := strings.Trim(parts[1], nonWordCutset) + if len(fullTraceID) != 32 { + return ErrSpanContextCorrupted + } + // checking that the entire TraceID is a valid hex string + if !isValidID(fullTraceID) { + return ErrSpanContextCorrupted + } + if ctx.trace != nil { + // Ensure that the 128-bit trace id tag doesn't propagate + ctx.trace.unsetPropagatingTag(keyTraceID128) + } + if err := extractTraceID128(ctx, fullTraceID); err != nil { + return err + } + // parsing spanID + spanID := strings.Trim(parts[2], nonWordCutset) + if len(spanID) != 16 { + return ErrSpanContextCorrupted + } + if !isValidID(spanID) { + return ErrSpanContextCorrupted + } + if ctx.spanID, err = strconv.ParseUint(spanID, 16, 64); err != nil { + return ErrSpanContextCorrupted + } + if ctx.spanID == 0 { + return ErrSpanContextNotFound + } + // parsing flags + flags := parts[3] + f, err := strconv.ParseInt(flags, 16, 8) + if err != nil { + return ErrSpanContextCorrupted + } + ctx.setSamplingPriority(int(f)&0x1, samplernames.Unknown) + return nil +} + +// parseTracestate attempts to parse tracestateHeader which is a list +// with up to 32 comma-separated (,) list-members. +// An example value would be: `vendorname1=opaqueValue1,vendorname2=opaqueValue2,dd=s:1;o:synthetics`, +// Where `dd` list contains values that would be in x-datadog-tags as well as those needed for propagation information. +// The keys to the "dd" values have been shortened as follows to save space: +// `sampling_priority` = `s` +// `origin` = `o` +// `last parent` = `p` +// `_dd.p.` prefix = `t.` +func parseTracestate(ctx *SpanContext, header string) { + if header == "" { + // The W3C spec says tracestate can be empty but should avoid sending it. + // https://www.w3.org/TR/trace-context-1/#tracestate-header-field-values + return + } + // if multiple headers are present, they must be combined and stored + setPropagatingTag(ctx, tracestateHeader, header) + combined := strings.Split(strings.Trim(header, "\t "), ",") + for _, group := range combined { + if !strings.HasPrefix(group, "dd=") { + continue + } + ddMembers := strings.Split(group[len("dd="):], ";") + dropDM := false + // indicate that backend could reparent this as a root + for _, member := range ddMembers { + keyVal := strings.SplitN(member, ":", 2) + if len(keyVal) != 2 { + continue + } + key, val := keyVal[0], keyVal[1] + if key == "o" { + ctx.origin = strings.ReplaceAll(val, "~", "=") + } else if key == "s" { + stateP, err := strconv.Atoi(val) + if err != nil { + // If the tracestate priority is absent, + // we rely on the traceparent sampled flag + // set in the parseTraceparent function. + continue + } + // The sampling priority and decision maker values are set based on + // the specification in the internal W3C context propagation RFC. + // See the document for more details. + parentP, _ := ctx.SamplingPriority() + if (parentP == 1 && stateP > 0) || (parentP == 0 && stateP <= 0) { + // As extracted from tracestate + ctx.setSamplingPriority(stateP, samplernames.Unknown) + } + if parentP == 1 && stateP <= 0 { + // Auto keep (1) and set the decision maker to default + ctx.setSamplingPriority(ext.PriorityAutoKeep, samplernames.Default) + } + if parentP == 0 && stateP > 0 { + // Auto drop (0) and drop the decision maker + ctx.setSamplingPriority(ext.PriorityAutoReject, samplernames.Unknown) + dropDM = true + } + } else if key == "p" { + ctx.reparentID = val + } else if strings.HasPrefix(key, "t.dm") { + if ctx.trace.hasPropagatingTag(keyDecisionMaker) || dropDM { + continue + } + setPropagatingTag(ctx, keyDecisionMaker, val) + } else if strings.HasPrefix(key, "t.") { + keySuffix := key[len("t."):] + val = strings.ReplaceAll(val, "~", "=") + setPropagatingTag(ctx, "_dd.p."+keySuffix, val) + } + } + } +} + +// extractTraceID128 extracts the trace id from v and populates the traceID +// field, and the traceID128 field (if applicable) of the provided ctx, +// returning an error if v is invalid. +func extractTraceID128(ctx *SpanContext, v string) error { + if len(v) > 32 { + v = v[len(v)-32:] + } + v = strings.TrimLeft(v, "0") + var err error + if len(v) <= 16 { // 64-bit trace id + var tid uint64 + tid, err = strconv.ParseUint(v, 16, 64) + ctx.traceID.SetLower(tid) + } else { // 128-bit trace id + idUpper := v[:len(v)-16] + ctx.traceID.SetUpperFromHex(idUpper) + var l uint64 + l, err = strconv.ParseUint(v[len(idUpper):], 16, 64) + ctx.traceID.SetLower(l) + } + if err != nil { + return ErrSpanContextCorrupted + } + return nil +} + +const ( + baggageMaxItems = 64 + baggageMaxBytes = 8192 + safeCharactersKey = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!#$%&'*+-.^_`|~" + safeCharactersValue = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!#$%&'()*+-./:<>?@[]^_`{|}~" +) + +// encodeKey encodes a key with the specified safe characters +func encodeKey(key string) string { + return urlEncode(strings.TrimSpace(key), safeCharactersKey) +} + +// encodeValue encodes a value with the specified safe characters +func encodeValue(value string) string { + return urlEncode(strings.TrimSpace(value), safeCharactersValue) +} + +// urlEncode performs percent-encoding while respecting the safe characters +func urlEncode(input string, safeCharacters string) string { + var encoded strings.Builder + for _, c := range input { + if strings.ContainsRune(safeCharacters, c) { + encoded.WriteRune(c) + } else { + encoded.WriteString(url.QueryEscape(string(c))) + } + } + return encoded.String() +} + +// propagatorBaggage implements Propagator and injects/extracts span contexts +// using baggage headers. +type propagatorBaggage struct{} + +func (p *propagatorBaggage) Inject(spanCtx *SpanContext, carrier interface{}) error { + switch c := carrier.(type) { + case TextMapWriter: + return p.injectTextMap(spanCtx, c) + default: + return ErrInvalidCarrier + } +} + +// injectTextMap propagates baggage items from the span context into the writer, +// in the format of a single HTTP "baggage" header. Baggage consists of key=value pairs, +// separated by commas. This function enforces a maximum number of baggage items and a maximum overall size. +// If either limit is exceeded, excess items or bytes are dropped, and a warning is logged. +// +// Example of a single "baggage" header: +// baggage: foo=bar,baz=qux +// +// Each key and value pair is encoded and added to the existing baggage header in = format, +// joined together by commas, +func (*propagatorBaggage) injectTextMap(ctx *SpanContext, writer TextMapWriter) error { + if ctx == nil { + return nil + } + + ctr := 0 + var baggageBuilder strings.Builder + ctx.ForeachBaggageItem(func(k, v string) bool { + if ctr >= baggageMaxItems { + return false + } + + var itemBuilder strings.Builder + if ctr > 0 { + itemBuilder.WriteRune(',') + } + + itemBuilder.WriteString(encodeKey(k)) + itemBuilder.WriteRune('=') + itemBuilder.WriteString(encodeValue(v)) + if itemBuilder.Len()+baggageBuilder.Len() > baggageMaxBytes { + return false + } + baggageBuilder.WriteString(itemBuilder.String()) + ctr++ + return true + }) + if baggageBuilder.Len() > 0 { + writer.Set("baggage", baggageBuilder.String()) + } + return nil +} + +func (p *propagatorBaggage) Extract(carrier interface{}) (*SpanContext, error) { + switch c := carrier.(type) { + case TextMapReader: + return p.extractTextMap(c) + default: + return nil, ErrInvalidCarrier + } +} + +func (*propagatorBaggage) extractTextMap(reader TextMapReader) (*SpanContext, error) { + var baggageHeader string + var ctx SpanContext + err := reader.ForeachKey(func(k, v string) error { + if strings.ToLower(k) == "baggage" { + // Expect only one baggage header, return early + baggageHeader = v + return nil + } + return nil + }) + if err != nil { + return nil, err + } + + if baggageHeader == "" { + return &ctx, nil + } + + parts := strings.Split(baggageHeader, ",") + + // 1) validation & single-trim pass + for i, kv := range parts { + k, v, ok := strings.Cut(kv, "=") + trimmedK := strings.TrimSpace(k) + trimmedV := strings.TrimSpace(v) + if !ok || trimmedK == "" || trimmedV == "" { + log.Warn("invalid baggage item: %q, dropping entire header", kv) + return &ctx, nil + } + // store back the trimmed pair so we don't re-trim below + parts[i] = trimmedK + "=" + trimmedV + } + + // 2) safe to URL-decode & apply + for _, kv := range parts { + rawK, rawV, _ := strings.Cut(kv, "=") + key, _ := url.QueryUnescape(rawK) + val, _ := url.QueryUnescape(rawV) + ctx.setBaggageItem(key, val) + } + + return &ctx, nil +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/time.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time.go rename to vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/time.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/time_windows.go similarity index 87% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go rename to vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/time_windows.go index f1ecd4f9..a4993c96 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/time_windows.go @@ -10,7 +10,7 @@ import ( "golang.org/x/sys/windows" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/log" ) // This method is more precise than the go1.8 time.Now on Windows @@ -31,7 +31,7 @@ func lowPrecisionNow() int64 { // nil dereference panic. var now func() int64 = func() func() int64 { if err := windows.LoadGetSystemTimePreciseAsFileTime(); err != nil { - log.Warn("Unable to load high precison timer, defaulting to time.Now()") + log.Warn("Unable to load high precision timer, defaulting to time.Now()") return lowPrecisionNow } else { return highPrecisionNow @@ -40,7 +40,7 @@ var now func() int64 = func() func() int64 { var nowTime func() time.Time = func() func() time.Time { if err := windows.LoadGetSystemTimePreciseAsFileTime(); err != nil { - log.Warn("Unable to load high precison timer, defaulting to time.Now()") + log.Warn("Unable to load high precision timer, defaulting to time.Now()") return func() time.Time { return time.Unix(0, lowPrecisionNow()) } } else { return func() time.Time { return time.Unix(0, highPrecisionNow()) } diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer.go new file mode 100644 index 00000000..402998b8 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer.go @@ -0,0 +1,1059 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + gocontext "context" + "encoding/binary" + "fmt" + "log/slog" + "math" + "os" + "runtime/pprof" + rt "runtime/trace" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/internal/tracerstats" + globalinternal "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/appsec" + appsecConfig "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/datastreams" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/llmobs" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/remoteconfig" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + "github.com/DataDog/dd-trace-go/v2/internal/traceprof" + "github.com/DataDog/dd-trace-go/v2/internal/version" + "github.com/google/uuid" + + "github.com/DataDog/datadog-agent/pkg/obfuscate" + "github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics" +) + +type TracerConf struct { //nolint:revive + CanComputeStats bool + CanDropP0s bool + DebugAbandonedSpans bool + Disabled bool + PartialFlush bool + PartialFlushMinSpans int + PeerServiceDefaults bool + PeerServiceMappings map[string]string + EnvTag string + VersionTag string + ServiceTag string + TracingAsTransport bool +} + +// Tracer specifies an implementation of the Datadog tracer which allows starting +// and propagating spans. The official implementation if exposed as functions +// within the "tracer" package. +type Tracer interface { + // StartSpan starts a span with the given operation name and options. + StartSpan(operationName string, opts ...StartSpanOption) *Span + + // Extract extracts a span context from a given carrier. Note that baggage item + // keys will always be lower-cased to maintain consistency. It is impossible to + // maintain the original casing due to MIME header canonicalization standards. + Extract(carrier interface{}) (*SpanContext, error) + + // Inject injects a span context into the given carrier. + Inject(context *SpanContext, carrier interface{}) error + + // TracerConf returns a snapshot of the current configuration of the tracer. + TracerConf() TracerConf + + // Flush flushes any buffered traces. Flush is in effect only if a tracer + // is started. Users do not have to call Flush in order to ensure that + // traces reach Datadog. It is a convenience method dedicated to specific + // use cases. + Flush() + + // Stop stops the tracer. Calls to Stop should be idempotent. + Stop() +} + +var _ Tracer = (*tracer)(nil) + +// tracer creates, buffers and submits Spans which are used to time blocks of +// computation. They are accumulated and streamed into an internal payload, +// which is flushed to the agent whenever its size exceeds a specific threshold +// or when a certain interval of time has passed, whichever happens first. +// +// tracer operates based on a worker loop which responds to various request +// channels. It additionally holds two buffers which accumulates error and trace +// queues to be processed by the payload encoder. +type tracer struct { + config *config + + // stats specifies the concentrator used to compute statistics, when client-side + // stats are enabled. + stats *concentrator + + // traceWriter is responsible for sending finished traces to their + // destination, such as the Trace Agent or Datadog Forwarder. + traceWriter traceWriter + + // out receives chunk with spans to be added to the payload. + out chan *chunk + + // flush receives a channel onto which it will confirm after a flush has been + // triggered and completed. + flush chan chan<- struct{} + + // stop causes the tracer to shut down when closed. + stop chan struct{} + + // stopOnce ensures the tracer is stopped exactly once. + stopOnce sync.Once + + // wg waits for all goroutines to exit when stopping. + wg sync.WaitGroup + + // These maps count the spans started and finished from + // each component, including contribs and "manual" spans. + spansStarted, spansFinished globalinternal.XSyncMapCounterMap + + // Keeps track of the total number of traces dropped for accurate logging. + totalTracesDropped uint32 + + logDroppedTraces *time.Ticker + + // prioritySampling holds an instance of the priority sampler. + prioritySampling *prioritySampler + + // pid of the process + pid int + + // rulesSampling holds an instance of the rules sampler used to apply either trace sampling, + // or single span sampling rules on spans. These are user-defined + // rules for applying a sampling rate to spans that match the designated service + // or operation name. + rulesSampling *rulesSampler + + // obfuscator holds the obfuscator used to obfuscate resources in aggregated stats. + // obfuscator may be nil if disabled. + obfuscator *obfuscate.Obfuscator + + // statsd is used for tracking metrics associated with the runtime and the tracer. + statsd globalinternal.StatsdClient + + // dataStreams processes data streams monitoring information + dataStreams *datastreams.Processor + + // abandonedSpansDebugger specifies where and how potentially abandoned spans are stored + // when abandoned spans debugging is enabled. + abandonedSpansDebugger *abandonedSpansDebugger + + // logFile contains a pointer to the file for writing tracer logs along with helper functionality for closing the file + // logFile is closed when tracer stops + // by default, tracer logs to stderr and this setting is unused + logFile *log.ManagedFile + + // runtimeMetrics is submitting runtime metrics to the agent using statsd. + runtimeMetrics *runtimemetrics.Emitter + + // telemetry is the telemetry client for the tracer. + telemetry telemetry.Client +} + +const ( + // flushInterval is the interval at which the payload contents will be flushed + // to the transport. + flushInterval = 2 * time.Second + + // payloadMaxLimit is the maximum payload size allowed and should indicate the + // maximum size of the package that the agent can receive. + payloadMaxLimit = 9.5 * 1024 * 1024 // 9.5 MB + + // payloadSizeLimit specifies the maximum allowed size of the payload before + // it will trigger a flush to the transport. + payloadSizeLimit = payloadMaxLimit / 2 + + // concurrentConnectionLimit specifies the maximum number of concurrent outgoing + // connections allowed. + concurrentConnectionLimit = 100 +) + +// statsInterval is the interval at which health metrics will be sent with the +// statsd client; replaced in tests. +var statsInterval = 10 * time.Second + +// startStopMu ensures that calling Start and Stop concurrently doesn't leak +// goroutines. In particular, without this lock TestTracerCleanStop will leak +// goroutines from the internal telemetry client. +// +// TODO: The entire Start/Stop code should be refactored, it's pretty gnarly. +var startStopMu sync.Mutex + +// Start starts the tracer with the given set of options. It will stop and replace +// any running tracer, meaning that calling it several times will result in a restart +// of the tracer by replacing the current instance with a new one. +func Start(opts ...StartOption) error { + startStopMu.Lock() + defer startStopMu.Unlock() + + defer func(now time.Time) { + telemetry.Distribution(telemetry.NamespaceGeneral, "init_time", nil).Submit(float64(time.Since(now).Milliseconds())) + }(time.Now()) + t, err := newTracer(opts...) + if err != nil { + return err + } + if !t.config.enabled.current { + // TODO: instrumentation telemetry client won't get started + // if tracing is disabled, but we still want to capture this + // telemetry information. Will be fixed when the tracer and profiler + // share control of the global telemetry client. + t.Stop() + return nil + } + setGlobalTracer(t) + if t.dataStreams != nil { + t.dataStreams.Start() + } + if t.config.ciVisibilityAgentless { + // CI Visibility agentless mode doesn't require remote configuration. + + // start instrumentation telemetry unless it is disabled through the + // DD_INSTRUMENTATION_TELEMETRY_ENABLED env var + t.telemetry = startTelemetry(t.config) + + globalinternal.SetTracerInitialized(true) + return nil + } + + // Start AppSec with remote configuration + cfg := remoteconfig.DefaultClientConfig() + cfg.AgentURL = t.config.agentURL.String() + cfg.AppVersion = t.config.version + cfg.Env = t.config.env + cfg.HTTP = t.config.httpClient + cfg.ServiceName = t.config.serviceName + if err := t.startRemoteConfig(cfg); err != nil { + log.Warn("Remote config startup error: %s", err.Error()) + } + + // appsec.Start() may use the telemetry client to report activation, so it is + // important this happens _AFTER_ startTelemetry() has been called, so the + // client is appropriately configured. + appsecopts := make([]appsecConfig.StartOption, 0, len(t.config.appsecStartOptions)+1) + appsecopts = append(appsecopts, t.config.appsecStartOptions...) + appsecopts = append(appsecopts, appsecConfig.WithRCConfig(cfg), appsecConfig.WithMetaStructAvailable(t.config.agent.metaStructAvailable)) + + appsec.Start(appsecopts...) + + if t.config.llmobs.Enabled { + if err := llmobs.Start(t.config.llmobs, &llmobsTracerAdapter{}); err != nil { + return fmt.Errorf("failed to start llmobs: %w", err) + } + } + if t.config.logStartup { + logStartup(t) + } + + // start instrumentation telemetry unless it is disabled through the + // DD_INSTRUMENTATION_TELEMETRY_ENABLED env var + t.telemetry = startTelemetry(t.config) + + // store the configuration in an in-memory file, allowing it to be read to + // determine if the process is instrumented with a tracer and to retrive + // relevant tracing information. + storeConfig(t.config) + + globalinternal.SetTracerInitialized(true) + return nil +} + +func storeConfig(c *config) { + uuid, _ := uuid.NewRandom() + name := fmt.Sprintf("datadog-tracer-info-%s", uuid.String()[0:8]) + + metadata := Metadata{ + SchemaVersion: 1, + RuntimeID: globalconfig.RuntimeID(), + Language: "go", + Version: version.Tag, + Hostname: c.hostname, + ServiceName: c.serviceName, + ServiceEnvironment: c.env, + ServiceVersion: c.version, + } + + data, _ := metadata.MarshalMsg(nil) + _, err := globalinternal.CreateMemfd(name, data) + if err != nil { + log.Error("failed to store the configuration: %s", err.Error()) + } +} + +// Stop stops the started tracer. Subsequent calls are valid but become no-op. +func Stop() { + startStopMu.Lock() + defer startStopMu.Unlock() + + llmobs.Stop() + setGlobalTracer(&NoopTracer{}) + globalinternal.SetTracerInitialized(false) + log.Flush() +} + +// StartSpan starts a new span with the given operation name and set of options. +// If the tracer is not started, calling this function is a no-op. +func StartSpan(operationName string, opts ...StartSpanOption) *Span { + return getGlobalTracer().StartSpan(operationName, opts...) +} + +// Extract extracts a SpanContext from the carrier. The carrier is expected +// to implement TextMapReader, otherwise an error is returned. +// If the tracer is not started, calling this function is a no-op. +func Extract(carrier interface{}) (*SpanContext, error) { + return getGlobalTracer().Extract(carrier) +} + +// Inject injects the given SpanContext into the carrier. The carrier is +// expected to implement TextMapWriter, otherwise an error is returned. +// If the tracer is not started, calling this function is a no-op. +func Inject(ctx *SpanContext, carrier interface{}) error { + return getGlobalTracer().Inject(ctx, carrier) +} + +// SetUser associates user information to the current trace which the +// provided span belongs to. The options can be used to tune which user +// bit of information gets monitored. In case of distributed traces, +// the user id can be propagated across traces using the WithPropagation() option. +// See https://docs.datadoghq.com/security_platform/application_security/setup_and_configure/?tab=set_user#add-user-information-to-traces +func SetUser(s *Span, id string, opts ...UserMonitoringOption) { + if s == nil { + return + } + s.SetUser(id, opts...) +} + +// payloadQueueSize is the buffer size of the trace channel. +const payloadQueueSize = 1000 + +func newUnstartedTracer(opts ...StartOption) (t *tracer, err error) { + c, err := newConfig(opts...) + if err != nil { + return nil, err + } + sampler := newPrioritySampler() + statsd, err := newStatsdClient(c) + if err != nil { + log.Error("Runtime and health metrics disabled: %s", err.Error()) + return nil, fmt.Errorf("could not initialize statsd client: %s", err.Error()) + } + defer func() { + if err != nil { + statsd.Close() + } + }() + var writer traceWriter + if c.ciVisibilityEnabled { + writer = newCiVisibilityTraceWriter(c) + } else if c.logToStdout { + writer = newLogTraceWriter(c, statsd) + } else { + writer = newAgentTraceWriter(c, sampler, statsd) + } + traces, spans, err := samplingRulesFromEnv() + if err != nil { + log.Warn("DIAGNOSTICS Error(s) parsing sampling rules: found errors: %s", err.Error()) + return nil, fmt.Errorf("found errors when parsing sampling rules: %w", err) + } + if traces != nil { + c.traceRules = traces + } + if spans != nil { + c.spanRules = spans + } + + rulesSampler := newRulesSampler(c.traceRules, c.spanRules, c.globalSampleRate, c.traceRateLimitPerSecond) + c.traceSampleRate = newDynamicConfig("trace_sample_rate", c.globalSampleRate, rulesSampler.traces.setGlobalSampleRate, equal[float64]) + // If globalSampleRate returns NaN, it means the environment variable was not set or valid. + // We could always set the origin to "env_var" inconditionally, but then it wouldn't be possible + // to distinguish between the case where the environment variable was not set and the case where + // it default to NaN. + if !math.IsNaN(c.globalSampleRate) { + c.traceSampleRate.cfgOrigin = telemetry.OriginEnvVar + } + c.traceSampleRules = newDynamicConfig("trace_sample_rules", c.traceRules, + rulesSampler.traces.setTraceSampleRules, EqualsFalseNegative) + var dataStreamsProcessor *datastreams.Processor + if c.dataStreamsMonitoringEnabled { + dataStreamsProcessor = datastreams.NewProcessor(statsd, c.env, c.serviceName, c.version, c.agentURL, c.httpClient) + } + var logFile *log.ManagedFile + if v := c.logDirectory; v != "" { + logFile, err = log.OpenFileAtPath(v) + if err != nil { + log.Warn("%s", err.Error()) + c.logDirectory = "" + } + } + t = &tracer{ + config: c, + traceWriter: writer, + out: make(chan *chunk, payloadQueueSize), + stop: make(chan struct{}), + flush: make(chan chan<- struct{}), + rulesSampling: rulesSampler, + prioritySampling: sampler, + pid: os.Getpid(), + logDroppedTraces: time.NewTicker(1 * time.Second), + stats: newConcentrator(c, defaultStatsBucketSize, statsd), + spansStarted: *globalinternal.NewXSyncMapCounterMap(), + spansFinished: *globalinternal.NewXSyncMapCounterMap(), + obfuscator: obfuscate.NewObfuscator(obfuscate.Config{ + SQL: obfuscate.SQLConfig{ + TableNames: c.agent.HasFlag("table_names"), + ReplaceDigits: c.agent.HasFlag("quantize_sql_tables") || c.agent.HasFlag("replace_sql_digits"), + KeepSQLAlias: c.agent.HasFlag("keep_sql_alias"), + DollarQuotedFunc: c.agent.HasFlag("dollar_quoted_func"), + }, + }), + statsd: statsd, + dataStreams: dataStreamsProcessor, + logFile: logFile, + } + return t, nil +} + +// newTracer creates a new tracer and starts it. +// NOTE: This function does NOT set the global tracer, which is required for +// most finish span/flushing operations to work as expected. If you are calling +// span.Finish and/or expecting flushing to work, you must call +// setGlobalTracer(...) with the tracer provided by this function. +func newTracer(opts ...StartOption) (*tracer, error) { + t, err := newUnstartedTracer(opts...) + if err != nil { + return nil, err + } + c := t.config + t.statsd.Incr("datadog.tracer.started", nil, 1) + if c.runtimeMetrics { + log.Debug("Runtime metrics enabled.") + t.wg.Add(1) + go func() { + defer t.wg.Done() + t.reportRuntimeMetrics(defaultMetricsReportInterval) + }() + } + if c.runtimeMetricsV2 { + l := slog.New(slogHandler{}) + opts := &runtimemetrics.Options{Logger: l} + if t.runtimeMetrics, err = runtimemetrics.NewEmitter(t.statsd, opts); err == nil { + l.Debug("Runtime metrics v2 enabled.") + } else { + l.Error("Failed to enable runtime metrics v2", "err", err.Error()) + } + } + if c.debugAbandonedSpans { + log.Info("Abandoned spans logs enabled.") + t.abandonedSpansDebugger = newAbandonedSpansDebugger() + t.abandonedSpansDebugger.Start(t.config.spanTimeout) + } + t.wg.Add(1) + go func() { + defer t.wg.Done() + tick := t.config.tickChan + if tick == nil { + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + tick = ticker.C + } + t.worker(tick) + }() + t.wg.Add(1) + go func() { + defer t.wg.Done() + t.reportHealthMetricsAtInterval(statsInterval) + }() + t.stats.Start() + return t, nil +} + +// Flush flushes any buffered traces. Flush is in effect only if a tracer +// is started. Users do not have to call Flush in order to ensure that +// traces reach Datadog. It is a convenience method dedicated to a specific +// use case described below. +// +// Flush is of use in Lambda environments, where starting and stopping +// the tracer on each invocation may create too much latency. In this +// scenario, a tracer may be started and stopped by the parent process +// whereas the invocation can make use of Flush to ensure any created spans +// reach the agent. +func Flush() { + if t := getGlobalTracer(); t != nil { + t.Flush() + } + llmobs.Flush() +} + +// Flush triggers a flush and waits for it to complete. +func (t *tracer) Flush() { + done := make(chan struct{}) + t.flush <- done + <-done + if t.dataStreams != nil { + t.dataStreams.Flush() + } +} + +// worker receives finished traces to be added into the payload, as well +// as periodically flushes traces to the transport. +func (t *tracer) worker(tick <-chan time.Time) { + for { + select { + case trace := <-t.out: + t.sampleChunk(trace) + if len(trace.spans) > 0 { + t.traceWriter.add(trace.spans) + } + case <-tick: + t.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:scheduled"}, 1) + t.traceWriter.flush() + + case done := <-t.flush: + t.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:invoked"}, 1) + t.traceWriter.flush() + t.statsd.Flush() + if !t.config.tracingAsTransport { + t.stats.flushAndSend(time.Now(), withCurrentBucket) + } + // TODO(x): In reality, the traceWriter.flush() call is not synchronous + // when using the agent traceWriter. However, this functionality is used + // in Lambda so for that purpose this mechanism should suffice. + done <- struct{}{} + + case <-t.stop: + loop: + // the loop ensures that the payload channel is fully drained + // before the final flush to ensure no traces are lost (see #526) + for { + select { + case trace := <-t.out: + t.sampleChunk(trace) + if len(trace.spans) > 0 { + t.traceWriter.add(trace.spans) + } + default: + break loop + } + } + return + } + } +} + +// chunk holds information about a trace chunk to be flushed, including its spans. +// The chunk may be a fully finished local trace chunk, or only a portion of the local trace chunk in the case of +// partial flushing. +// +// It's exported for supporting `mocktracer`. +type chunk struct { + spans []*Span + willSend bool // willSend indicates whether the trace will be sent to the agent. +} + +// sampleChunk applies single-span sampling to the provided trace. +func (t *tracer) sampleChunk(c *chunk) { + if len(c.spans) > 0 { + if p, ok := c.spans[0].context.SamplingPriority(); ok && p > 0 { + // The trace is kept, no need to run single span sampling rules. + return + } + } + var kept []*Span + if t.rulesSampling.HasSpanRules() { + // Apply sampling rules to individual spans in the trace. + for _, span := range c.spans { + if t.rulesSampling.SampleSpan(span) { + kept = append(kept, span) + } + } + if len(kept) > 0 && len(kept) < len(c.spans) { + // Some spans in the trace were kept, so a partial trace will be sent. + tracerstats.Signal(tracerstats.PartialTraces, 1) + } + } + tracerstats.Signal(tracerstats.DroppedP0Spans, uint32(len(c.spans)-len(kept))) + if !c.willSend { + if len(kept) == 0 { + tracerstats.Signal(tracerstats.DroppedP0Traces, 1) + } + c.spans = kept + } +} + +func (t *tracer) pushChunk(trace *chunk) { + tracerstats.Signal(tracerstats.SpansFinished, uint32(len(trace.spans))) + select { + case <-t.stop: + return + default: + } + select { + case t.out <- trace: + default: + log.Debug("payload queue full, trace dropped %d spans", len(trace.spans)) + atomic.AddUint32(&t.totalTracesDropped, 1) + } + select { + case <-t.logDroppedTraces.C: + if t := atomic.SwapUint32(&t.totalTracesDropped, 0); t > 0 { + log.Error("%d traces dropped through payload queue", t) + } + default: + } +} + +func spanStart(operationName string, options ...StartSpanOption) *Span { + var opts StartSpanConfig + for _, fn := range options { + if fn == nil { + continue + } + fn(&opts) + } + var startTime int64 + if opts.StartTime.IsZero() { + startTime = now() + } else { + startTime = opts.StartTime.UnixNano() + } + var context *SpanContext + // The default pprof context is taken from the start options and is + // not nil when using StartSpanFromContext() + pprofContext := opts.Context + if opts.Parent != nil { + context = opts.Parent + if pprofContext == nil && context.span != nil { + // Inherit the context.Context from parent span if it was propagated + // using ChildOf() rather than StartSpanFromContext(), see + // applyPPROFLabels() below. + context.span.mu.RLock() + pprofContext = context.span.pprofCtxActive + context.span.mu.RUnlock() + } + } + if pprofContext == nil { + // For root span's without context, there is no pprofContext, but we need + // one to avoid a panic() in pprof.WithLabels(). Using context.Background() + // is not ideal here, as it will cause us to remove all labels from the + // goroutine when the span finishes. However, the alternatives of not + // applying labels for such spans or to leave the endpoint/hotspot labels + // on the goroutine after it finishes are even less appealing. We'll have + // to properly document this for users. + pprofContext = gocontext.Background() + } + id := opts.SpanID + if id == 0 { + id = generateSpanID(startTime) + } + // span defaults + span := &Span{ + name: operationName, + service: "", + resource: operationName, + spanID: id, + traceID: id, + start: startTime, + integration: "manual", + } + + span.spanLinks = append(span.spanLinks, opts.SpanLinks...) + + if context != nil && !context.baggageOnly { + // this is a child span + span.traceID = context.traceID.Lower() + span.parentID = context.spanID + if p, ok := context.SamplingPriority(); ok { + span.setMetric(keySamplingPriority, float64(p)) + } + if context.span != nil { + // local parent, inherit service + context.span.mu.RLock() + span.service = context.span.service + context.span.mu.RUnlock() + } else { + // remote parent + if context.origin != "" { + // mark origin + span.setMeta(keyOrigin, context.origin) + } + } + + if context.reparentID != "" { + span.setMeta(keyReparentID, context.reparentID) + } + + } + span.context = newSpanContext(span, context) + if pprofContext != nil { + setLLMObsPropagatingTags(pprofContext, span.context) + } + span.setMeta("language", "go") + // add tags from options + for k, v := range opts.Tags { + span.SetTag(k, v) + } + isRootSpan := context == nil || context.span == nil + if isRootSpan { + traceprof.SetProfilerRootTags(span) + } + if isRootSpan || context.span.service != span.service { + // The span is the local root span. + span.setMetric(keyTopLevel, 1) + // all top level spans are measured. So the measured tag is redundant. + delete(span.metrics, keyMeasured) + } + pprofContext, span.taskEnd = startExecutionTracerTask(pprofContext, span) + span.pprofCtxRestore = pprofContext + return span +} + +// StartSpan creates, starts, and returns a new Span with the given `operationName`. +func (t *tracer) StartSpan(operationName string, options ...StartSpanOption) *Span { + if !t.config.enabled.current { + return nil + } + span := spanStart(operationName, options...) + if span.service == "" { + span.service = t.config.serviceName + } + span.noDebugStack = t.config.noDebugStack + if t.config.hostname != "" { + span.setMeta(keyHostname, t.config.hostname) + } + span.supportsEvents = t.config.agent.spanEventsAvailable + + // add global tags + for k, v := range t.config.globalTags.get() { + span.SetTag(k, v) + } + if t.config.serviceMappings != nil { + if newSvc, ok := t.config.serviceMappings[span.service]; ok { + span.service = newSvc + } + } + if t.config.version != "" { + if t.config.universalVersion || (!t.config.universalVersion && span.service == t.config.serviceName) { + span.setMeta(ext.Version, t.config.version) + } + } + if t.config.env != "" { + span.setMeta(ext.Environment, t.config.env) + } + if _, ok := span.context.SamplingPriority(); !ok { + // if not already sampled or a brand new trace, sample it + t.sample(span) + } + if t.config.serviceMappings != nil { + if newSvc, ok := t.config.serviceMappings[span.service]; ok { + span.service = newSvc + } + } + if log.DebugEnabled() { + // avoid allocating the ...interface{} argument if debug logging is disabled + log.Debug("Started Span: %v, Operation: %s, Resource: %s, Tags: %v, %v", //nolint:gocritic // Debug logging needs full span representation + span, span.name, span.resource, span.meta, span.metrics) + } + if t.config.profilerHotspots || t.config.profilerEndpoints { + t.applyPPROFLabels(span.pprofCtxRestore, span) + } else { + span.pprofCtxRestore = nil + } + if t.config.debugAbandonedSpans { + select { + case t.abandonedSpansDebugger.In <- newAbandonedSpanCandidate(span, false): + // ok + default: + log.Error("Abandoned spans channel full, disregarding span.") + } + } + if span.metrics[keyTopLevel] == 1 { + // The span is the local root span. + span.setMetric(keySpanAttributeSchemaVersion, float64(t.config.spanAttributeSchemaVersion)) + } + span.setMetric(ext.Pid, float64(t.pid)) + t.spansStarted.Inc(span.integration) + + return span +} + +// applyPPROFLabels applies pprof labels for the profiler's code hotspots and +// endpoint filtering feature to span. When span finishes, any pprof labels +// found in ctx are restored. Additionally, this func informs the profiler how +// many times each endpoint is called. +func (t *tracer) applyPPROFLabels(ctx gocontext.Context, span *Span) { + // Important: The label keys are ordered alphabetically to take advantage of + // an upstream optimization that landed in go1.24. This results in ~10% + // better performance on BenchmarkStartSpan. See + // https://go-review.googlesource.com/c/go/+/574516 for more information. + labels := make([]string, 0, 3*2 /* 3 key value pairs */) + localRootSpan := span.Root() + if t.config.profilerHotspots && localRootSpan != nil { + localRootSpan.mu.RLock() + labels = append(labels, traceprof.LocalRootSpanID, strconv.FormatUint(localRootSpan.spanID, 10)) + localRootSpan.mu.RUnlock() + } + if t.config.profilerHotspots { + labels = append(labels, traceprof.SpanID, strconv.FormatUint(span.spanID, 10)) + } + if t.config.profilerEndpoints && localRootSpan != nil { + localRootSpan.mu.RLock() + if spanResourcePIISafe(localRootSpan) { + labels = append(labels, traceprof.TraceEndpoint, localRootSpan.resource) + if span == localRootSpan { + // Inform the profiler of endpoint hits. This is used for the unit of + // work feature. We can't use APM stats for this since the stats don't + // have enough cardinality (e.g. runtime-id tags are missing). + traceprof.GlobalEndpointCounter().Inc(localRootSpan.resource) + } + } + localRootSpan.mu.RUnlock() + } + if len(labels) > 0 { + span.pprofCtxRestore = ctx + span.pprofCtxActive = pprof.WithLabels(ctx, pprof.Labels(labels...)) + pprof.SetGoroutineLabels(span.pprofCtxActive) + } +} + +// spanResourcePIISafe returns true if s.resource can be considered to not +// include PII with reasonable confidence. E.g. SQL queries may contain PII, +// but http, rpc or custom (s.spanType == "") span resource names generally do not. +func spanResourcePIISafe(s *Span) bool { + return s.spanType == ext.SpanTypeWeb || s.spanType == ext.AppTypeRPC || s.spanType == "" +} + +// Stop stops the tracer. +func (t *tracer) Stop() { + t.stopOnce.Do(func() { + close(t.stop) + t.statsd.Incr("datadog.tracer.stopped", nil, 1) + }) + t.abandonedSpansDebugger.Stop() + t.stats.Stop() + t.wg.Wait() + t.traceWriter.stop() + if t.runtimeMetrics != nil { + t.runtimeMetrics.Stop() + } + t.statsd.Close() + if t.dataStreams != nil { + t.dataStreams.Stop() + } + appsec.Stop() + remoteconfig.Stop() + // Close log file last to account for any logs from the above calls + if t.logFile != nil { + t.logFile.Close() + } + if t.telemetry != nil { + t.telemetry.Close() + } + t.config.httpClient.CloseIdleConnections() +} + +// Inject uses the configured or default TextMap Propagator. +func (t *tracer) Inject(ctx *SpanContext, carrier interface{}) error { + if !t.config.enabled.current { + return nil + } + + if t.config.tracingAsTransport { + // in tracing as transport mode, only propagate when there is an upstream appsec event + if ctx.trace != nil && + !globalinternal.VerifyTraceSourceEnabled(ctx.trace.propagatingTag(keyPropagatedTraceSource), globalinternal.ASMTraceSource) { + return nil + } + } + + t.updateSampling(ctx) + return t.config.propagator.Inject(ctx, carrier) +} + +// updateSampling runs trace sampling rules on the context, since properties like resource / tags +// could change and impact the result of sampling. This must be done once before context is propagated. +func (t *tracer) updateSampling(ctx *SpanContext) { + if ctx == nil { + return + } + // without this check some mock spans tests fail + if t.rulesSampling == nil || ctx.trace == nil || ctx.trace.root == nil { + return + } + // want to avoid locking the entire trace from a span for long. + // if SampleTrace successfully samples the trace, + // it will lock the span and the trace mutexes in span.setSamplingPriorityLocked + // and trace.setSamplingPriority respectively, so we can't rely on those mutexes. + if ctx.trace.isLocked() { + // trace sampling decision already taken and locked, no re-sampling shall occur + return + } + + // the span was sampled with ManualKeep rules shouldn't override + if ctx.trace.propagatingTag(keyDecisionMaker) == "-4" { + return + } + // if sampling was successful, need to lock the trace to prevent further re-sampling + if t.rulesSampling.SampleTrace(ctx.trace.root) { + ctx.trace.setLocked(true) + } +} + +// Extract uses the configured or default TextMap Propagator. +func (t *tracer) Extract(carrier interface{}) (*SpanContext, error) { + if !t.config.enabled.current { + return nil, nil + } + ctx, err := t.config.propagator.Extract(carrier) + if t.config.tracingAsTransport && ctx != nil { + // in tracing as transport mode, reset upstream sampling decision to make sure we keep 1 trace/minute + if ctx.trace != nil && + !globalinternal.VerifyTraceSourceEnabled(ctx.trace.propagatingTag(keyPropagatedTraceSource), globalinternal.ASMTraceSource) { + ctx.trace.priority = nil + } + } + if ctx != nil && ctx.trace != nil { + if _, ok := ctx.trace.samplingPriority(); ok { + // ensure that the trace isn't resampled + ctx.trace.setLocked(true) + } + } + return ctx, err +} + +func (t *tracer) TracerConf() TracerConf { + return TracerConf{ + CanComputeStats: t.config.canComputeStats(), + CanDropP0s: t.config.canDropP0s(), + DebugAbandonedSpans: t.config.debugAbandonedSpans, + Disabled: !t.config.enabled.current, + PartialFlush: t.config.partialFlushEnabled, + PartialFlushMinSpans: t.config.partialFlushMinSpans, + PeerServiceDefaults: t.config.peerServiceDefaultsEnabled, + PeerServiceMappings: t.config.peerServiceMappings, + EnvTag: t.config.env, + VersionTag: t.config.version, + ServiceTag: t.config.serviceName, + TracingAsTransport: t.config.tracingAsTransport, + } +} + +func (t *tracer) submit(s *Span) { + if !t.config.enabled.current { + return + } + // we have an active tracer + if !t.config.canDropP0s() { + return + } + statSpan, shouldCalc := t.stats.newTracerStatSpan(s, t.obfuscator) + if !shouldCalc { + return + } + // the agent supports computed stats + select { + case t.stats.In <- statSpan: + // ok + default: + log.Error("Stats channel full, disregarding span.") + } +} + +func (t *tracer) submitAbandonedSpan(s *Span, finished bool) { + select { + case t.abandonedSpansDebugger.In <- newAbandonedSpanCandidate(s, finished): + // ok + default: + log.Error("Abandoned spans channel full, disregarding span.") + } +} + +func (t *tracer) submitChunk(c *chunk) { + t.pushChunk(c) +} + +// sampleRateMetricKey is the metric key holding the applied sample rate. Has to be the same as the Agent. +const sampleRateMetricKey = "_sample_rate" + +// Sample samples a span with the internal sampler. +func (t *tracer) sample(span *Span) { + if _, ok := span.context.SamplingPriority(); ok { + // sampling decision was already made + return + } + sampler := t.config.sampler + if !sampler.Sample(span) { + span.context.trace.drop() + span.context.trace.setSamplingPriority(ext.PriorityAutoReject, samplernames.RuleRate) + return + } + if sampler.Rate() < 1 { + span.setMetric(sampleRateMetricKey, sampler.Rate()) + } + if t.rulesSampling.SampleTrace(span) { + return + } + if t.rulesSampling.SampleTraceGlobalRate(span) { + return + } + t.prioritySampling.apply(span) +} + +func startExecutionTracerTask(ctx gocontext.Context, span *Span) (gocontext.Context, func()) { + if !rt.IsEnabled() { + return ctx, func() {} + } + span.goExecTraced = true + // Task name is the resource (operationName) of the span, e.g. + // "POST /foo/bar" (http) or "/foo/pkg.Method" (grpc). + taskName := span.resource + // If the resource could contain PII (e.g. SQL query that's not using bind + // arguments), play it safe and just use the span type as the taskName, + // e.g. "sql". + if !spanResourcePIISafe(span) { + taskName = span.spanType + } + // The task name is an arbitrary string from the user. If it's too + // large, like a big SQL query, the execution tracer can crash when we + // create the task. Cap it at an arbirary length. For "normal" task + // names this should be plenty that we can still have the task names for + // debugging. + taskName = taskName[:min(128, len(taskName))] + end := noopTaskEnd + if !globalinternal.IsExecutionTraced(ctx) { + var task *rt.Task + ctx, task = rt.NewTask(ctx, taskName) + end = task.End + } else { + // We only want to skip task creation for this particular span, + // not necessarily for child spans which can come from different + // integrations. So update this context to be "not" execution + // traced so that derived contexts used by child spans don't get + // skipped. + ctx = globalinternal.WithExecutionNotTraced(ctx) + } + var b [8]byte + binary.LittleEndian.PutUint64(b[:], span.spanID) + // TODO: can we make string(b[:]) not allocate? e.g. with unsafe + // shenanigans? rt.Log won't retain the message string, though perhaps + // we can't assume that will always be the case. + rt.Log(ctx, "datadog.uint64_span_id", string(b[:])) + return ctx, end +} + +func noopTaskEnd() {} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer_metadata.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer_metadata.go new file mode 100644 index 00000000..cf0bb8d9 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer_metadata.go @@ -0,0 +1,27 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023 Datadog, Inc. +package tracer + +// Metadata represents the configuration of the tracer. +// +//go:generate go run github.com/tinylib/msgp -unexported -marshal=true -o=tracer_metadata_msgp.go -tests=false +type Metadata struct { + // Version of the schema. + SchemaVersion uint8 `msg:"schema_version"` + // Runtime UUID. + RuntimeID string `msg:"runtime_id"` + // Programming language of the tracer. + Language string `msg:"tracer_language"` + // Version of the tracer + Version string `msg:"tracer_version"` + // Identfier of the machine running the process. + Hostname string `msg:"hostname"` + // Name of the service being instrumented. + ServiceName string `msg:"service_name"` + // Environment of the service being instrumented. + ServiceEnvironment string `msg:"service_env"` + // Version of the service being instrumented. + ServiceVersion string `msg:"service_version"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer_metadata_msgp.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer_metadata_msgp.go new file mode 100644 index 00000000..9fb356e1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer_metadata_msgp.go @@ -0,0 +1,285 @@ +package tracer + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *Metadata) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "schema_version": + z.SchemaVersion, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "SchemaVersion") + return + } + case "runtime_id": + z.RuntimeID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + case "tracer_language": + z.Language, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Language") + return + } + case "tracer_version": + z.Version, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "hostname": + z.Hostname, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + case "service_name": + z.ServiceName, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ServiceName") + return + } + case "service_env": + z.ServiceEnvironment, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ServiceEnvironment") + return + } + case "service_version": + z.ServiceVersion, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ServiceVersion") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Metadata) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 8 + // write "schema_version" + err = en.Append(0x88, 0xae, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteUint8(z.SchemaVersion) + if err != nil { + err = msgp.WrapError(err, "SchemaVersion") + return + } + // write "runtime_id" + err = en.Append(0xaa, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteString(z.RuntimeID) + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + // write "tracer_language" + err = en.Append(0xaf, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Language) + if err != nil { + err = msgp.WrapError(err, "Language") + return + } + // write "tracer_version" + err = en.Append(0xae, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Version) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + // write "hostname" + err = en.Append(0xa8, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Hostname) + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + // write "service_name" + err = en.Append(0xac, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.ServiceName) + if err != nil { + err = msgp.WrapError(err, "ServiceName") + return + } + // write "service_env" + err = en.Append(0xab, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x76) + if err != nil { + return + } + err = en.WriteString(z.ServiceEnvironment) + if err != nil { + err = msgp.WrapError(err, "ServiceEnvironment") + return + } + // write "service_version" + err = en.Append(0xaf, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.ServiceVersion) + if err != nil { + err = msgp.WrapError(err, "ServiceVersion") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Metadata) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 8 + // string "schema_version" + o = append(o, 0x88, 0xae, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint8(o, z.SchemaVersion) + // string "runtime_id" + o = append(o, 0xaa, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x64) + o = msgp.AppendString(o, z.RuntimeID) + // string "tracer_language" + o = append(o, 0xaf, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65) + o = msgp.AppendString(o, z.Language) + // string "tracer_version" + o = append(o, 0xae, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.Version) + // string "hostname" + o = append(o, 0xa8, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Hostname) + // string "service_name" + o = append(o, 0xac, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.ServiceName) + // string "service_env" + o = append(o, 0xab, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x76) + o = msgp.AppendString(o, z.ServiceEnvironment) + // string "service_version" + o = append(o, 0xaf, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.ServiceVersion) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Metadata) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "schema_version": + z.SchemaVersion, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemaVersion") + return + } + case "runtime_id": + z.RuntimeID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + case "tracer_language": + z.Language, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Language") + return + } + case "tracer_version": + z.Version, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "hostname": + z.Hostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + case "service_name": + z.ServiceName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ServiceName") + return + } + case "service_env": + z.ServiceEnvironment, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ServiceEnvironment") + return + } + case "service_version": + z.ServiceVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ServiceVersion") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Metadata) Msgsize() (s int) { + s = 1 + 15 + msgp.Uint8Size + 11 + msgp.StringPrefixSize + len(z.RuntimeID) + 16 + msgp.StringPrefixSize + len(z.Language) + 15 + msgp.StringPrefixSize + len(z.Version) + 9 + msgp.StringPrefixSize + len(z.Hostname) + 13 + msgp.StringPrefixSize + len(z.ServiceName) + 12 + msgp.StringPrefixSize + len(z.ServiceEnvironment) + 16 + msgp.StringPrefixSize + len(z.ServiceVersion) + return +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/transport.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/transport.go new file mode 100644 index 00000000..1046392d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/transport.go @@ -0,0 +1,233 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "runtime" + "strconv" + "strings" + "time" + + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/internal/tracerstats" + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/version" + + "github.com/tinylib/msgp/msgp" +) + +const ( + // headerComputedTopLevel specifies that the client has marked top-level spans, when set. + // Any non-empty value will mean 'yes'. + headerComputedTopLevel = "Datadog-Client-Computed-Top-Level" +) + +func defaultDialer(timeout time.Duration) *net.Dialer { + return &net.Dialer{ + Timeout: timeout, + KeepAlive: 30 * time.Second, + DualStack: true, + } +} + +func defaultHTTPClient(timeout time.Duration, disableKeepAlives bool) *http.Client { + if timeout == 0 { + timeout = defaultHTTPTimeout + } + return &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: defaultDialer(timeout).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableKeepAlives: disableKeepAlives, + }, + Timeout: timeout, + } +} + +const ( + defaultHostname = "localhost" + defaultPort = "8126" + defaultAddress = defaultHostname + ":" + defaultPort + defaultURL = "http://" + defaultAddress + defaultHTTPTimeout = 10 * time.Second // defines the current timeout before giving up with the send process + traceCountHeader = "X-Datadog-Trace-Count" // header containing the number of traces in the payload + obfuscationVersionHeader = "Datadog-Obfuscation-Version" // header containing the version of obfuscation used, if any + + tracesAPIPath = "/v0.4/traces" + statsAPIPath = "/v0.6/stats" +) + +// transport is an interface for communicating data to the agent. +type transport interface { + // send sends the payload p to the agent using the transport set up. + // It returns a non-nil response body when no error occurred. + send(p payload) (body io.ReadCloser, err error) + // sendStats sends the given stats payload to the agent. + // tracerObfuscationVersion is the version of obfuscation applied (0 if none was applied) + sendStats(s *pb.ClientStatsPayload, tracerObfuscationVersion int) error + // endpoint returns the URL to which the transport will send traces. + endpoint() string +} + +type httpTransport struct { + traceURL string // the delivery URL for traces + statsURL string // the delivery URL for stats + client *http.Client // the HTTP client used in the POST + headers map[string]string // the Transport headers +} + +// newTransport returns a new Transport implementation that sends traces to a +// trace agent at the given url, using a given *http.Client. +// +// In general, using this method is only necessary if you have a trace agent +// running on a non-default port, if it's located on another machine, or when +// otherwise needing to customize the transport layer, for instance when using +// a unix domain socket. +func newHTTPTransport(url string, client *http.Client) *httpTransport { + // initialize the default EncoderPool with Encoder headers + defaultHeaders := map[string]string{ + "Datadog-Meta-Lang": "go", + "Datadog-Meta-Lang-Version": strings.TrimPrefix(runtime.Version(), "go"), + "Datadog-Meta-Lang-Interpreter": runtime.Compiler + "-" + runtime.GOARCH + "-" + runtime.GOOS, + "Datadog-Meta-Tracer-Version": version.Tag, + "Content-Type": "application/msgpack", + } + if cid := internal.ContainerID(); cid != "" { + defaultHeaders["Datadog-Container-ID"] = cid + } + if eid := internal.EntityID(); eid != "" { + defaultHeaders["Datadog-Entity-ID"] = eid + } + if extEnv := internal.ExternalEnvironment(); extEnv != "" { + defaultHeaders["Datadog-External-Env"] = extEnv + } + return &httpTransport{ + traceURL: fmt.Sprintf("%s%s", url, tracesAPIPath), + statsURL: fmt.Sprintf("%s%s", url, statsAPIPath), + client: client, + headers: defaultHeaders, + } +} + +func (t *httpTransport) sendStats(p *pb.ClientStatsPayload, tracerObfuscationVersion int) error { + var buf bytes.Buffer + if err := msgp.Encode(&buf, p); err != nil { + return err + } + req, err := http.NewRequest("POST", t.statsURL, &buf) + if err != nil { + return err + } + for header, value := range t.headers { + req.Header.Set(header, value) + } + if tracerObfuscationVersion > 0 { + req.Header.Set(obfuscationVersionHeader, strconv.Itoa(tracerObfuscationVersion)) + } + resp, err := t.client.Do(req) + if err != nil { + reportAPIErrorsMetric(resp, err, statsAPIPath) + return err + } + defer resp.Body.Close() + if code := resp.StatusCode; code >= 400 { + reportAPIErrorsMetric(resp, err, statsAPIPath) + // error, check the body for context information and + // return a nice error. + msg := make([]byte, 1000) + n, _ := resp.Body.Read(msg) + resp.Body.Close() + txt := http.StatusText(code) + if n > 0 { + return fmt.Errorf("%s (Status: %s)", msg[:n], txt) + } + return fmt.Errorf("%s", txt) + } + return nil +} + +func (t *httpTransport) send(p payload) (body io.ReadCloser, err error) { + req, err := http.NewRequest("POST", t.traceURL, p) + if err != nil { + return nil, fmt.Errorf("cannot create http request: %s", err.Error()) + } + stats := p.stats() + req.ContentLength = int64(stats.size) + for header, value := range t.headers { + req.Header.Set(header, value) + } + req.Header.Set(traceCountHeader, strconv.Itoa(stats.itemCount)) + req.Header.Set(headerComputedTopLevel, "yes") + if t := getGlobalTracer(); t != nil { + tc := t.TracerConf() + if tc.TracingAsTransport || tc.CanComputeStats { + // tracingAsTransport uses this header to disable the trace agent's stats computation + // while making canComputeStats() always false to also disable client stats computation. + req.Header.Set("Datadog-Client-Computed-Stats", "yes") + } + droppedTraces := int(tracerstats.Count(tracerstats.AgentDroppedP0Traces)) + partialTraces := int(tracerstats.Count(tracerstats.PartialTraces)) + droppedSpans := int(tracerstats.Count(tracerstats.AgentDroppedP0Spans)) + if tt, ok := t.(*tracer); ok { + if stats := tt.statsd; stats != nil { + stats.Count("datadog.tracer.dropped_p0_traces", int64(droppedTraces), + []string{fmt.Sprintf("partial:%s", strconv.FormatBool(partialTraces > 0))}, 1) + stats.Count("datadog.tracer.dropped_p0_spans", int64(droppedSpans), nil, 1) + } + } + req.Header.Set("Datadog-Client-Dropped-P0-Traces", strconv.Itoa(droppedTraces)) + req.Header.Set("Datadog-Client-Dropped-P0-Spans", strconv.Itoa(droppedSpans)) + } + response, err := t.client.Do(req) + if err != nil { + reportAPIErrorsMetric(response, err, tracesAPIPath) + return nil, err + } + if code := response.StatusCode; code >= 400 { + reportAPIErrorsMetric(response, err, tracesAPIPath) + // error, check the body for context information and + // return a nice error. + msg := make([]byte, 1000) + n, _ := response.Body.Read(msg) + response.Body.Close() + txt := http.StatusText(code) + if n > 0 { + return nil, fmt.Errorf("%s (Status: %s)", msg[:n], txt) + } + return nil, fmt.Errorf("%s", txt) + } + return response.Body, nil +} + +func reportAPIErrorsMetric(response *http.Response, err error, endpoint string) { + if t, ok := getGlobalTracer().(*tracer); ok { + var reason string + if err != nil { + reason = "network_failure" + } + if response != nil { + reason = fmt.Sprintf("server_response_%d", response.StatusCode) + } + tags := []string{"reason:" + reason, "endpoint:" + endpoint} + t.statsd.Incr("datadog.tracer.api.errors", tags, 1) + } else { + return + } +} + +func (t *httpTransport) endpoint() string { + return t.traceURL +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/util.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/util.go new file mode 100644 index 00000000..ac8402b7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/util.go @@ -0,0 +1,129 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "fmt" + "strconv" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" +) + +// parseUint64 parses a uint64 from either an unsigned 64 bit base-10 string +// or a signed 64 bit base-10 string representing an unsigned integer +func parseUint64(str string) (uint64, error) { + if strings.HasPrefix(str, "-") { + id, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0, err + } + return uint64(id), nil + } + return strconv.ParseUint(str, 10, 64) +} + +func isValidPropagatableTag(k, v string) error { + if len(k) == 0 { + return fmt.Errorf("key length must be greater than zero") + } + for _, ch := range k { + if ch < 32 || ch > 126 || ch == ' ' || ch == '=' || ch == ',' { + return fmt.Errorf("key contains an invalid character %d", ch) + } + } + if len(v) == 0 { + return fmt.Errorf("value length must be greater than zero") + } + for _, ch := range v { + if ch < 32 || ch > 126 || ch == ',' { + return fmt.Errorf("value contains an invalid character %d", ch) + } + } + return nil +} + +func parsePropagatableTraceTags(s string) (map[string]string, error) { + if len(s) == 0 { + return nil, nil + } + tags := make(map[string]string) + searchingKey, start := true, 0 + var key string + for i, ch := range s { + switch ch { + case '=': + if searchingKey { + if i-start == 0 { + return nil, fmt.Errorf("invalid format") + } + key = s[start:i] + searchingKey, start = false, i+1 + } + case ',': + if searchingKey || i-start == 0 { + return nil, fmt.Errorf("invalid format") + } + tags[key] = s[start:i] + searchingKey, start = true, i+1 + } + } + if searchingKey || len(s)-start == 0 { + return nil, fmt.Errorf("invalid format") + } + tags[key] = s[start:] + return tags, nil +} + +func dereference(value any) any { + // Falling into one of the cases will dereference the pointer and return the + // value of the pointer. It adds one allocation due to casting. + switch v := value.(type) { + case *bool: + return dereferenceGeneric(v) + case *string: + return dereferenceGeneric(v) + // Supported type by toFloat64 + case *byte: + return dereferenceGeneric(v) + case *float32: + return dereferenceGeneric(v) + case *float64: + return dereferenceGeneric(v) + case *int: + return dereferenceGeneric(v) + case *int8: + return dereferenceGeneric(v) + case *int16: + return dereferenceGeneric(v) + case *int32: + return dereferenceGeneric(v) + case *int64: + return dereferenceGeneric(v) + case *uint: + return dereferenceGeneric(v) + case *uint16: + return dereferenceGeneric(v) + case *uint32: + return dereferenceGeneric(v) + case *uint64: + return dereferenceGeneric(v) + case *samplernames.SamplerName: + if v == nil { + return samplernames.Unknown + } + return *v + } + return value +} + +func dereferenceGeneric[T any](value *T) T { + if value == nil { + var v T + return v + } + return *value +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/writer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/writer.go new file mode 100644 index 00000000..ea5c1c55 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/writer.go @@ -0,0 +1,387 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package tracer + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "math" + "os" + "strconv" + "sync" + "sync/atomic" + "time" + + globalinternal "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +type traceWriter interface { + // add adds traces to be sent by the writer. + add([]*Span) + + // flush causes the writer to send any buffered traces. + flush() + + // stop gracefully shuts down the writer. + stop() +} + +type agentTraceWriter struct { + // config holds the tracer configuration + config *config + + // mu synchronizes access to payload operations + mu sync.Mutex + + // payload encodes and buffers traces in msgpack format + payload payload + + // climit limits the number of concurrent outgoing connections + climit chan struct{} + + // wg waits for all uploads to finish + wg sync.WaitGroup + + // prioritySampling is the prioritySampler into which agentTraceWriter will + // read sampling rates sent by the agent + prioritySampling *prioritySampler + + // statsd is used to send metrics + statsd globalinternal.StatsdClient + + tracesQueued uint32 +} + +func newAgentTraceWriter(c *config, s *prioritySampler, statsdClient globalinternal.StatsdClient) *agentTraceWriter { + tw := &agentTraceWriter{ + config: c, + climit: make(chan struct{}, concurrentConnectionLimit), + prioritySampling: s, + statsd: statsdClient, + } + tw.payload = tw.newPayload() + return tw +} + +func (h *agentTraceWriter) add(trace []*Span) { + h.mu.Lock() + stats, err := h.payload.push(trace) + if err != nil { + h.mu.Unlock() + h.statsd.Incr("datadog.tracer.traces_dropped", []string{"reason:encoding_error"}, 1) + log.Error("Error encoding msgpack: %s", err.Error()) + return + } + // TODO: This does not differentiate between complete traces and partial chunks + atomic.AddUint32(&h.tracesQueued, 1) + + needsFlush := stats.size > payloadSizeLimit + h.mu.Unlock() + + if needsFlush { + h.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:size"}, 1) + h.flush() + } +} + +func (h *agentTraceWriter) stop() { + h.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:shutdown"}, 1) + h.flush() + h.wg.Wait() +} + +// newPayload returns a new payload based on the trace protocol. +func (h *agentTraceWriter) newPayload() payload { + return newPayload(h.config.traceProtocol) +} + +// flush will push any currently buffered traces to the server. +func (h *agentTraceWriter) flush() { + h.mu.Lock() + oldp := h.payload + // Check after acquiring lock + if oldp.itemCount() == 0 { + h.mu.Unlock() + return + } + h.payload = h.newPayload() + h.mu.Unlock() + + h.climit <- struct{}{} + h.wg.Add(1) + go func(p payload) { + defer func(start time.Time) { + // Once the payload has been used, clear the buffer for garbage + // collection to avoid a memory leak when references to this object + // may still be kept by faulty transport implementations or the + // standard library. See dd-trace-go#976 + h.statsd.Count("datadog.tracer.queue.enqueued.traces", int64(atomic.SwapUint32(&h.tracesQueued, 0)), nil, 1) + p.clear() + + <-h.climit + h.statsd.Timing("datadog.tracer.flush_duration", time.Since(start), nil, 1) + h.wg.Done() + }(time.Now()) + + stats := p.stats() + var err error + for attempt := 0; attempt <= h.config.sendRetries; attempt++ { + log.Debug("Attempt to send payload: size: %d traces: %d\n", stats.size, stats.itemCount) + var rc io.ReadCloser + rc, err = h.config.transport.send(p) + if err == nil { + log.Debug("sent traces after %d attempts", attempt+1) + h.statsd.Count("datadog.tracer.flush_bytes", int64(stats.size), nil, 1) + h.statsd.Count("datadog.tracer.flush_traces", int64(stats.itemCount), nil, 1) + if err := h.prioritySampling.readRatesJSON(rc); err != nil { + h.statsd.Incr("datadog.tracer.decode_error", nil, 1) + } + return + } + + if attempt+1%5 == 0 { + log.Error("failure sending traces (attempt %d of %d): %v", attempt+1, h.config.sendRetries+1, err.Error()) + } + p.reset() + time.Sleep(h.config.retryInterval) + } + h.statsd.Count("datadog.tracer.traces_dropped", int64(stats.itemCount), []string{"reason:send_failed"}, 1) + log.Error("lost %d traces: %v", stats.itemCount, err.Error()) + }(oldp) +} + +// logWriter specifies the output target of the logTraceWriter; replaced in tests. +var logWriter io.Writer = os.Stdout + +// logTraceWriter encodes traces into a format understood by the Datadog Forwarder +// (https://github.com/DataDog/datadog-serverless-functions/tree/master/aws/logs_monitoring) +// and writes them to os.Stdout. This is used to send traces from an AWS Lambda environment. +type logTraceWriter struct { + config *config + buf bytes.Buffer + hasTraces bool + w io.Writer + statsd globalinternal.StatsdClient +} + +func newLogTraceWriter(c *config, statsdClient globalinternal.StatsdClient) *logTraceWriter { + w := &logTraceWriter{ + config: c, + w: logWriter, + statsd: statsdClient, + } + w.resetBuffer() + return w +} + +const ( + // maxFloatLength is the maximum length that a string encoded by encodeFloat will be. + maxFloatLength = 24 + + // logBufferSuffix is the final string that the trace writer has to append to a buffer to close + // the JSON. + logBufferSuffix = "]}\n" + + // logBufferLimit is the maximum size log line allowed by cloudwatch + logBufferLimit = 256 * 1024 +) + +func (h *logTraceWriter) resetBuffer() { + h.buf.Reset() + h.buf.WriteString(`{"traces": [`) + h.hasTraces = false +} + +// encodeFloat correctly encodes float64 into the JSON format followed by ES6. +// This code is reworked from Go's encoding/json package +// (https://github.com/golang/go/blob/go1.15/src/encoding/json/encode.go#L573) +// +// One important departure from encoding/json is that infinities and nans are encoded +// as null rather than signalling an error. +func encodeFloat(p []byte, f float64) []byte { + if math.IsInf(f, 0) || math.IsNaN(f) { + return append(p, "null"...) + } + abs := math.Abs(f) + if abs != 0 && (abs < 1e-6 || abs >= 1e21) { + p = strconv.AppendFloat(p, f, 'e', -1, 64) + // clean up e-09 to e-9 + n := len(p) + if n >= 4 && p[n-4] == 'e' && p[n-3] == '-' && p[n-2] == '0' { + p[n-2] = p[n-1] + p = p[:n-1] + } + } else { + p = strconv.AppendFloat(p, f, 'f', -1, 64) + } + return p +} + +func (h *logTraceWriter) encodeSpan(s *Span) { + var scratch [maxFloatLength]byte + h.buf.WriteString(`{"trace_id":"`) + h.buf.Write(strconv.AppendUint(scratch[:0], uint64(s.traceID), 16)) + h.buf.WriteString(`","span_id":"`) + h.buf.Write(strconv.AppendUint(scratch[:0], uint64(s.spanID), 16)) + h.buf.WriteString(`","parent_id":"`) + h.buf.Write(strconv.AppendUint(scratch[:0], uint64(s.parentID), 16)) + h.buf.WriteString(`","name":`) + h.marshalString(s.name) + h.buf.WriteString(`,"resource":`) + h.marshalString(s.resource) + h.buf.WriteString(`,"error":`) + h.buf.Write(strconv.AppendInt(scratch[:0], int64(s.error), 10)) + h.buf.WriteString(`,"meta":{`) + first := true + for k, v := range s.meta { + if first { + first = false + } else { + h.buf.WriteString(`,`) + } + h.marshalString(k) + h.buf.WriteString(":") + h.marshalString(v) + } + // We cannot pack messagepack into JSON, so we need to marshal the meta struct as JSON, and send them through the `meta` field + for k, v := range s.metaStruct { + if first { + first = false + } else { + h.buf.WriteString(`,`) + } + h.marshalString(k) + h.buf.WriteString(":") + jsonValue, err := json.Marshal(v) + if err != nil { + log.Error("Error marshaling value %q: %v", v, err.Error()) + continue + } + h.marshalString(string(jsonValue)) + } + h.buf.WriteString(`},"metrics":{`) + first = true + for k, v := range s.metrics { + if math.IsNaN(v) || math.IsInf(v, 0) { + // The trace forwarder does not support infinity or nan, so we do not send metrics with those values. + continue + } + if first { + first = false + } else { + h.buf.WriteString(`,`) + } + h.marshalString(k) + h.buf.WriteString(`:`) + h.buf.Write(encodeFloat(scratch[:0], v)) + } + h.buf.WriteString(`},"start":`) + h.buf.Write(strconv.AppendInt(scratch[:0], s.start, 10)) + h.buf.WriteString(`,"duration":`) + h.buf.Write(strconv.AppendInt(scratch[:0], s.duration, 10)) + h.buf.WriteString(`,"service":`) + h.marshalString(s.service) + h.buf.WriteString(`}`) +} + +// marshalString marshals the string str as JSON into the writer's buffer. +// Should be used whenever writing non-constant string data to ensure correct sanitization. +func (h *logTraceWriter) marshalString(str string) { + m, err := json.Marshal(str) + if err != nil { + log.Error("Error marshaling value %q: %v", str, err.Error()) + } else { + h.buf.Write(m) + } +} + +type encodingError struct { + cause error + dropReason string +} + +// writeTrace makes an effort to write the trace into the current buffer. It returns +// the number of spans (n) that it wrote and an error (err), if one occurred. +// n may be less than len(trace), meaning that only the first n spans of the trace +// fit into the current buffer. Once the buffer is flushed, the remaining spans +// from the trace can be retried. +// An error, if one is returned, indicates that a span in the trace is too large +// to fit in one buffer, and the trace cannot be written. +func (h *logTraceWriter) writeTrace(trace []*Span) (n int, err *encodingError) { + startn := h.buf.Len() + if !h.hasTraces { + h.buf.WriteByte('[') + } else { + h.buf.WriteString(", [") + } + written := 0 + for i, s := range trace { + n := h.buf.Len() + if i > 0 { + h.buf.WriteByte(',') + } + h.encodeSpan(s) + if h.buf.Len() > logBufferLimit-len(logBufferSuffix) { + // This span is too big to fit in the current buffer. + if i == 0 { + // This was the first span in this trace. This means we should truncate + // everything we wrote in writeTrace + h.buf.Truncate(startn) + if !h.hasTraces { + // This is the first span of the first trace in the buffer and it's too big. + // We will never be able to send this trace, so we will drop it. + return 0, &encodingError{cause: errors.New("span too large for buffer"), dropReason: "trace_too_large"} + } + return 0, nil + } + // This span was too big, but it might fit in the next buffer. + // We can finish this trace and try again with an empty buffer (see *logTaceWriter.add) + h.buf.Truncate(n) + break + } + written++ + } + h.buf.WriteByte(']') + h.hasTraces = true + return written, nil +} + +// add adds a trace to the writer's buffer. +func (h *logTraceWriter) add(trace []*Span) { + // Try adding traces to the buffer until we flush them all or encounter an error. + for len(trace) > 0 { + n, err := h.writeTrace(trace) + if err != nil { + log.Error("Lost a trace: %s", err.cause) + h.statsd.Count("datadog.tracer.traces_dropped", 1, []string{"reason:" + err.dropReason}, 1) + return + } + trace = trace[n:] + // If there are traces left that didn't fit into the buffer, flush the buffer and loop to + // write the remaining spans. + if len(trace) > 0 { + h.flush() + } + } +} + +func (h *logTraceWriter) stop() { + h.statsd.Incr("datadog.tracer.flush_triggered", []string{"reason:shutdown"}, 1) + h.flush() +} + +// flush will write any buffered traces to standard output. +func (h *logTraceWriter) flush() { + if !h.hasTraces { + return + } + h.buf.WriteString(logBufferSuffix) + h.w.Write(h.buf.Bytes()) + h.resetBuffer() +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/v1.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/v1.go new file mode 100644 index 00000000..225a74c6 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/v1.go @@ -0,0 +1,28 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package ddtrace + +import ( + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/version" +) + +func init() { + checkV1NonTransitional() +} + +func checkV1NonTransitional() { + version, transitional, found := version.FindV1Version() + if !found { + // No v1 version detected + return + } + if transitional { + // v1 version is transitional + return + } + log.Warn("Detected %q non-transitional version of dd-trace-go. This version is not compatible with v2 - please upgrade to v1.74.0 or later", version) +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/operation.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo/operation.go similarity index 89% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/operation.go rename to vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo/operation.go index b5c0a168..ae2ab084 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/operation.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo/operation.go @@ -22,10 +22,11 @@ package dyngo import ( "context" + "runtime" "sync" "sync/atomic" - "gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion" + "github.com/DataDog/dd-trace-go/v2/internal/orchestrion" ) // LogError is the function used to log errors in the dyngo package. @@ -76,8 +77,8 @@ var rootOperation atomic.Pointer[Operation] // SwapRootOperation allows to atomically swap the current root operation with // the given new one. Concurrent uses of the old root operation on already // existing and running operation are still valid. -func SwapRootOperation(new Operation) { - rootOperation.Swap(&new) +func SwapRootOperation(newOp Operation) { + rootOperation.Swap(&newOp) // Note: calling Finish(old, ...) could result into mem leaks because // some finish event listeners, possibly releasing memory and resources, // wouldn't be called anymore (because Finish() disables the operation and @@ -89,7 +90,7 @@ func SwapRootOperation(new Operation) { // bubble-up the operation stack, which allows listening to future events that // might happen in the operation lifetime. type operation struct { - parent *operation + parent Operation eventRegister dataBroadcaster @@ -146,11 +147,7 @@ func NewOperation(parent Operation) Operation { parent = *ptr } } - var parentOp *operation - if parent != nil { - parentOp = parent.unwrap() - } - return &operation{parent: parentOp} + return &operation{parent: parent} } // FromContext looks into the given context (or the GLS if orchestrion is enabled) for a parent Operation and returns it. @@ -164,13 +161,33 @@ func FromContext(ctx context.Context) (Operation, bool) { return op, ok } +// FindOperation looks into the current operation tree for the first operation matching the given type. +// It has a hardcoded limit of 32 levels of depth even looking for the operation in the parent tree +func FindOperation[T any, O interface { + Operation + *T +}](ctx context.Context) (*T, bool) { + op, found := FromContext(ctx) + if !found { + return nil, false + } + + for current := op; current != nil; current = current.unwrap().parent { + if o, ok := current.(O); ok { + return o, true + } + } + + return nil, false +} + // StartOperation starts a new operation along with its arguments and emits a // start event with the operation arguments. func StartOperation[O Operation, E ArgOf[O]](op O, args E) { // Bubble-up the start event starting from the parent operation as you can't // listen for your own start event - for current := op.unwrap().parent; current != nil; current = current.parent { - emitEvent(¤t.eventRegister, op, args) + for current := op.unwrap().parent; current != nil; current = current.Parent() { + emitEvent(¤t.unwrap().eventRegister, op, args) } } @@ -205,8 +222,9 @@ func FinishOperation[O Operation, E ResultOf[O]](op O, results E) { return } - for current := o; current != nil; current = current.parent { - emitEvent(¤t.eventRegister, op, results) + var current Operation = op + for ; current != nil; current = current.Parent() { + emitEvent(¤t.unwrap().eventRegister, op, results) } } @@ -274,8 +292,8 @@ func EmitData[T any](op Operation, data T) { // Bubble up the data to the stack of operations. Contrary to events, // we also send the data to ourselves since SDK operations are leaf operations // that both emit and listen for data (errors). - for current := o; current != nil; current = current.parent { - emitData(¤t.dataBroadcaster, data) + for current := op; current != nil; current = current.Parent() { + emitData(¤t.unwrap().dataBroadcaster, data) } } @@ -313,16 +331,12 @@ func addDataListener[T any](b *dataBroadcaster, l DataListener[T]) { b.listeners[key] = append(b.listeners[key], l) } -func (b *dataBroadcaster) clear() { - b.mu.Lock() - defer b.mu.Unlock() - b.listeners = nil -} - func emitData[T any](b *dataBroadcaster, v T) { defer func() { if r := recover(); r != nil { - LogError("appsec: recovered from an unexpected panic from an event listener: %+v", r) + var buf [4_096]byte + n := runtime.Stack(buf[:], false) + LogError("appsec: recovered from an unexpected panic from a data listener (for %T): %+v\n%s", v, r, string(buf[:n])) } }() b.mu.RLock() @@ -353,7 +367,9 @@ func (r *eventRegister) clear() { func emitEvent[O Operation, T any](r *eventRegister, op O, v T) { defer func() { if r := recover(); r != nil { - LogError("appsec: recovered from an unexpected panic from an event listener: %+v", r) + var buf [4_096]byte + n := runtime.Stack(buf[:], false) + LogError("appsec: recovered from an unexpected panic from an event listener (%T > %T): %+v\n%s", op, v, r, string(buf[:n])) } }() r.mu.RLock() diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/README.md b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/README.md new file mode 100644 index 00000000..c350c3c3 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/README.md @@ -0,0 +1,25 @@ +## GraphQL Threat Monitoring + +This package provides `dyngo` support for GraphQL operations, which are listened +to according to the following sequence diagram: + +```mermaid +sequenceDiagram + participant Root + participant Request + participant Execution + participant Field + + Root ->>+ Request: graphqlsec.StartRequest(...) + + Request ->>+ Execution: grapgqlsec.StartExecution(...) + + par for each field + Execution ->>+ Field: graphqlsec.StartField(...) + Field -->>- Execution: field.Finish(...) + end + + Execution -->>- Request: execution.Finish(...) + + Request -->>- Root: request.Finish(...) +``` diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/execution.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/execution.go new file mode 100644 index 00000000..06b6981b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/execution.go @@ -0,0 +1,62 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package graphqlsec + +import ( + "context" + + "github.com/DataDog/dd-trace-go/v2/internal/log" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" +) + +type ( + ExecutionOperation struct { + dyngo.Operation + } + + // ExecutionOperationArgs describes arguments passed to a GraphQL query operation. + ExecutionOperationArgs struct { + // Variables is the user-provided variables object for the query. + Variables map[string]any + // Query is the query that is being executed. + Query string + // OperationName is the user-provided operation name for the query. + OperationName string + } + + ExecutionOperationRes struct { + // Data is the data returned from processing the GraphQL operation. + Data any + // Error is the error returned by processing the GraphQL Operation, if any. + Error error + } +) + +// Finish the GraphQL query operation, along with the given results, and emit a finish event up in +// the operation stack. +func (q *ExecutionOperation) Finish(res ExecutionOperationRes) { + dyngo.FinishOperation(q, res) +} + +func (ExecutionOperationArgs) IsArgOf(*ExecutionOperation) {} +func (ExecutionOperationRes) IsResultOf(*ExecutionOperation) {} + +// StartExecutionOperation starts a new GraphQL query operation, along with the given arguments, and +// emits a start event up in the operation stack. The operation is tracked on the returned context, +// and can be extracted later on using FromContext. +func StartExecutionOperation(ctx context.Context, args ExecutionOperationArgs) (context.Context, *ExecutionOperation) { + parent, ok := dyngo.FromContext(ctx) + if !ok { + log.Debug("appsec: StartExecutionOperation: no parent operation found in context") + } + + op := &ExecutionOperation{ + Operation: dyngo.NewOperation(parent), + } + + return dyngo.StartAndRegisterOperation(ctx, op, args), op +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/request.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/request.go new file mode 100644 index 00000000..e1f363b9 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/request.go @@ -0,0 +1,74 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package graphqlsec is the GraphQL instrumentation API and contract for AppSec +// defining an abstract run-time representation of AppSec middleware. GraphQL +// integrations must use this package to enable AppSec features for GraphQL, +// which listens to this package's operation events. +package graphqlsec + +import ( + "context" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" +) + +type ( + RequestOperation struct { + dyngo.Operation + // used in case we don't have a parent operation + *waf.ContextOperation + + // wafContextOwner indicates if the waf.ContextOperation was started by us or not and if we need to close it. + wafContextOwner bool + } + + // RequestOperationArgs describes arguments passed to a GraphQL request. + RequestOperationArgs struct { + RawQuery string // The raw, not-yet-parsed GraphQL query + OperationName string // The user-provided operation name for the query + Variables map[string]any // The user-provided variables object for this request + } + + RequestOperationRes struct { + // Data is the data returned from processing the GraphQL operation. + Data any + // Error is the error returned by processing the GraphQL Operation, if any. + Error error + } +) + +// Finish the GraphQL query operation, along with the given results, and emit a finish event up in +// the operation stack. +func (op *RequestOperation) Finish(res RequestOperationRes) { + dyngo.FinishOperation(op, res) + if op.wafContextOwner { + op.ContextOperation.Finish() + } +} + +func (RequestOperationArgs) IsArgOf(*RequestOperation) {} +func (RequestOperationRes) IsResultOf(*RequestOperation) {} + +// StartRequestOperation starts a new GraphQL request operation, along with the given arguments, and +// emits a start event up in the operation stack. The operation is usually linked to tge global root +// operation. The operation is tracked on the returned context, and can be extracted later on using +// FromContext. +func StartRequestOperation(ctx context.Context, span trace.TagSetter, args RequestOperationArgs) (context.Context, *RequestOperation) { + wafOp, found := dyngo.FindOperation[waf.ContextOperation](ctx) + if !found { // Usually we can find the HTTP Handler Operation as the parent, but it's technically optional + wafOp, ctx = waf.StartContextOperation(ctx, span) + } + + op := &RequestOperation{ + Operation: dyngo.NewOperation(wafOp), + ContextOperation: wafOp, + wafContextOwner: !found, // If we started the parent operation, we finish it, otherwise we don't + } + + return dyngo.StartAndRegisterOperation(ctx, op, args), op +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/resolve.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/resolve.go new file mode 100644 index 00000000..fd1b1b63 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec/resolve.go @@ -0,0 +1,63 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package graphqlsec + +import ( + "context" + + "github.com/DataDog/dd-trace-go/v2/internal/log" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" +) + +type ( + ResolveOperation struct { + dyngo.Operation + } + + // ResolveOperationArgs describes arguments passed to a GraphQL field operation. + ResolveOperationArgs struct { + // TypeName is the name of the field's type + TypeName string + // FieldName is the name of the field + FieldName string + // Arguments is the arguments provided to the field resolver + Arguments map[string]any + // Trivial determines whether the resolution is trivial or not. Leave as false if undetermined. + Trivial bool + } + + ResolveOperationRes struct { + // Data is the data returned from processing the GraphQL operation. + Data any + // Error is the error returned by processing the GraphQL Operation, if any. + Error error + } +) + +// Finish the GraphQL Field operation, along with the given results, and emit a finish event up in +// the operation stack. +func (q *ResolveOperation) Finish(res ResolveOperationRes) { + dyngo.FinishOperation(q, res) +} + +func (ResolveOperationArgs) IsArgOf(*ResolveOperation) {} +func (ResolveOperationRes) IsResultOf(*ResolveOperation) {} + +// StartResolveOperation starts a new GraphQL Resolve operation, along with the given arguments, and +// emits a start event up in the operation stack. The operation is tracked on the returned context, +// and can be extracted later on using FromContext. +func StartResolveOperation(ctx context.Context, args ResolveOperationArgs) (context.Context, *ResolveOperation) { + parent, ok := dyngo.FromContext(ctx) + if !ok { + log.Debug("appsec: StartResolveOperation: no parent operation found in context") + } + + op := &ResolveOperation{ + Operation: dyngo.NewOperation(parent), + } + return dyngo.StartAndRegisterOperation(ctx, op, args), op +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/grpcsec/grpc.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/grpcsec/grpc.go new file mode 100644 index 00000000..f6b6691f --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/grpcsec/grpc.go @@ -0,0 +1,125 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package grpcsec is the gRPC instrumentation API and contract for AppSec +// defining an abstract run-time representation of gRPC handlers. +// gRPC integrations must use this package to enable AppSec features for gRPC, +// which listens to this package's operation events. +// +// Abstract gRPC server handler operation definitions. It is based on two +// operations allowing to describe every type of RPC: the HandlerOperation type +// which represents the RPC handler, and the ReceiveOperation type which +// represents the messages the RPC handler receives during its lifetime. +// This means that the ReceiveOperation(s) will happen within the +// HandlerOperation. +// Every type of RPC, unary, client streaming, server streaming, and +// bidirectional streaming RPCs, can be all represented with a HandlerOperation +// having one or several ReceiveOperation. +// The send operation is not required for now and therefore not defined, which +// means that server and bidirectional streaming RPCs currently have the same +// run-time representation as unary and client streaming RPCs. +package grpcsec + +import ( + "context" + "sync/atomic" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" +) + +type ( + // HandlerOperation represents a gRPC server handler operation. + // It must be created with StartHandlerOperation() and finished with its + // Finish() method. + // Security events observed during the operation lifetime should be added + // to the operation using its AddSecurityEvent() method. + HandlerOperation struct { + dyngo.Operation + *waf.ContextOperation + + // wafContextOwner indicates if the waf.ContextOperation was started by us or not and if we need to close it. + wafContextOwner bool + } + + // HandlerOperationArgs is the grpc handler arguments. + HandlerOperationArgs struct { + // Method is the gRPC method name. + // Corresponds to the address `grpc.server.method`. + Method string + + // RPC metadata received by the gRPC handler. + // Corresponds to the address `grpc.server.request.metadata`. + Metadata map[string][]string + + // RemoteAddr is the IP address of the client that initiated the gRPC request. + // May be used as the address `http.client_ip`. + RemoteAddr string + } + + // HandlerOperationRes is the grpc handler results. Empty as of today. + HandlerOperationRes struct { + // Raw gRPC status code. + // Corresponds to the address `grpc.server.response.status`. + StatusCode int + } +) + +func (HandlerOperationArgs) IsArgOf(*HandlerOperation) {} +func (HandlerOperationRes) IsResultOf(*HandlerOperation) {} + +// StartHandlerOperation starts an gRPC server handler operation, along with the +// given arguments and parent operation, and emits a start event up in the +// operation stack. When parent is nil, the operation is linked to the global +// root operation. +func StartHandlerOperation(ctx context.Context, span trace.TagSetter, args HandlerOperationArgs) (context.Context, *HandlerOperation, *atomic.Pointer[actions.BlockGRPC]) { + wafOp, found := dyngo.FindOperation[waf.ContextOperation](ctx) + if !found { + wafOp, ctx = waf.StartContextOperation(ctx, span) + } + op := &HandlerOperation{ + Operation: dyngo.NewOperation(wafOp), + ContextOperation: wafOp, + wafContextOwner: !found, + } + + var block atomic.Pointer[actions.BlockGRPC] + dyngo.OnData(op, func(err *actions.BlockGRPC) { + block.Store(err) + }) + + return dyngo.StartAndRegisterOperation(ctx, op, args), op, &block +} + +// MonitorRequestMessage monitors the gRPC request message body as the WAF address `grpc.server.request.message`. +func MonitorRequestMessage(ctx context.Context, msg any) error { + return waf.RunSimple(ctx, + addresses.NewAddressesBuilder(). + WithGRPCRequestMessage(msg). + Build(), + "appsec: failed to monitor gRPC request message body") +} + +// MonitorResponseMessage monitors the gRPC response message body as the WAF address `grpc.server.response.message`. +func MonitorResponseMessage(ctx context.Context, msg any) error { + return waf.RunSimple(ctx, + addresses.NewAddressesBuilder(). + WithGRPCResponseMessage(msg). + Build(), + "appsec: failed to monitor gRPC response message body") + +} + +// Finish the gRPC handler operation, along with the given results, and emit a +// finish event up in the operation stack. +func (op *HandlerOperation) Finish(res HandlerOperationRes) { + dyngo.FinishOperation(op, res) + if op.wafContextOwner { + op.ContextOperation.Finish() + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/config.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/config.go new file mode 100644 index 00000000..e61acd0d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/config.go @@ -0,0 +1,30 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package httpsec + +import ( + "net/http" +) + +type Config struct { + // Framework is the name of the framework or library being used (optional). + Framework string + // OnBlock is a list of callbacks to be invoked when a block decision is made. + OnBlock []func() + // ResponseHeaderCopier provides a way to access response headers for reading + // purposes (the value may be provided by copy). This allows customers to + // apply synchronization if they allow http.ResponseWriter objects to be + // accessed by multiple goroutines. + ResponseHeaderCopier func(http.ResponseWriter) http.Header + // Route is the route name to be used for the request. + Route string + // RouteParams is a map of route parameters to be used for the request. + RouteParams map[string]string +} + +var defaultWrapHandlerConfig = &Config{ + ResponseHeaderCopier: func(w http.ResponseWriter) http.Header { return w.Header() }, +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/http.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/http.go new file mode 100644 index 00000000..63a985df --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/http.go @@ -0,0 +1,292 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package httpsec defines is the HTTP instrumentation API and contract for +// AppSec. It defines an abstract representation of HTTP handlers, along with +// helper functions to wrap (aka. instrument) standard net/http handlers. +// HTTP integrations must use this package to enable AppSec features for HTTP, +// which listens to this package's operation events. +package httpsec + +import ( + "context" + // Blank import needed to use embed for the default blocked response payloads + _ "embed" + "net/http" + "sync/atomic" + + "github.com/DataDog/dd-trace-go/v2/appsec/events" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + telemetrylog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" +) + +// HandlerOperation type representing an HTTP operation. It must be created with +// StartOperation() and finished with its Finish(). +type ( + HandlerOperation struct { + dyngo.Operation + *waf.ContextOperation + + // wafContextOwner indicates if the waf.ContextOperation was started by us or not and if we need to close it. + wafContextOwner bool + + // framework is the name of the framework or library that started the operation. + framework string + // method is the HTTP method for the current handler operation. + method string + // route is the HTTP route for the current handler operation (or the URL if no route is available). + route string + } + + // HandlerOperationArgs is the HTTP handler operation arguments. + HandlerOperationArgs struct { + Framework string // Optional: name of the framework or library being used + Method string + RequestURI string + RequestRoute string + Host string + RemoteAddr string + Headers map[string][]string + Cookies map[string][]string + QueryParams map[string][]string + PathParams map[string]string + } + + // HandlerOperationRes is the HTTP handler operation results. + HandlerOperationRes struct { + Headers map[string][]string + StatusCode int + } + + // EarlyBlock is used to trigger an early block before the handler is executed. + EarlyBlock struct{} +) + +func (HandlerOperationArgs) IsArgOf(*HandlerOperation) {} +func (HandlerOperationRes) IsResultOf(*HandlerOperation) {} + +func StartOperation(ctx context.Context, args HandlerOperationArgs, span trace.TagSetter) (*HandlerOperation, *atomic.Pointer[actions.BlockHTTP], context.Context) { + wafOp, found := dyngo.FindOperation[waf.ContextOperation](ctx) + if !found { + wafOp, ctx = waf.StartContextOperation(ctx, span) + } + + op := &HandlerOperation{ + Operation: dyngo.NewOperation(wafOp), + ContextOperation: wafOp, + wafContextOwner: !found, // If we started the parent operation, we finish it, otherwise we don't + framework: args.Framework, + method: args.Method, + route: args.RequestRoute, + } + + // We need to use an atomic pointer to store the action because the action may be created asynchronously in the future + var action atomic.Pointer[actions.BlockHTTP] + dyngo.OnData(op, func(a *actions.BlockHTTP) { + action.Store(a) + }) + + return op, &action, dyngo.StartAndRegisterOperation(ctx, op, args) +} + +// Framework returns the name of the framework or library that started the operation. +func (op *HandlerOperation) Framework() string { + return op.framework +} + +// Method returns the HTTP method for the current handler operation. +func (op *HandlerOperation) Method() string { + return op.method +} + +// Route returns the HTTP route for the current handler operation. +func (op *HandlerOperation) Route() string { + return op.route +} + +// Finish the HTTP handler operation and its children operations and write everything to the service entry span. +func (op *HandlerOperation) Finish(res HandlerOperationRes) { + dyngo.FinishOperation(op, res) + if op.wafContextOwner { + op.ContextOperation.Finish() + } +} + +const ( + monitorParsedBodyErrorLog = ` +"appsec: parsed http body monitoring ignored: could not find the http handler instrumentation metadata in the request context: + the request handler is not being monitored by a middleware function or the provided context is not the expected request context +` + monitorResponseBodyErrorLog = ` +"appsec: http response body monitoring ignored: could not find the http handler instrumentation metadata in the request context: + the request handler is not being monitored by a middleware function or the provided context is not the expected request context +` +) + +// MonitorParsedBody starts and finishes the SDK body operation. +// This function should not be called when AppSec is disabled in order to +// get more accurate error logs. +func MonitorParsedBody(ctx context.Context, body any) error { + return waf.RunSimple(ctx, + addresses.NewAddressesBuilder(). + WithRequestBody(body). + Build(), + monitorParsedBodyErrorLog, + ) +} + +// MonitorResponseBody gets the response body through the in-app WAF. +// This function should not be called when AppSec is disabled in order to get +// more accurate error logs. +func MonitorResponseBody(ctx context.Context, body any) error { + return waf.RunSimple(ctx, + addresses.NewAddressesBuilder(). + WithResponseBody(body). + Build(), + monitorResponseBodyErrorLog, + ) +} + +// Return the map of parsed cookies if any and following the specification of +// the rule address `server.request.cookies`. +func makeCookies(parsed []*http.Cookie) map[string][]string { + if len(parsed) == 0 { + return nil + } + cookies := make(map[string][]string, len(parsed)) + for _, c := range parsed { + cookies[c.Name] = append(cookies[c.Name], c.Value) + } + return cookies +} + +// RouteMatched can be called if BeforeHandle is started too early in the http request lifecycle like +// before the router has matched the request to a route. This can happen when the HTTP handler is wrapped +// using http.NewServeMux instead of http.WrapHandler. In this case the route is empty and so are the path parameters. +// In this case the route and path parameters will be filled in later by calling RouteMatched with the actual route. +// If RouteMatched returns an error, the request should be considered blocked and the error should be reported. +func RouteMatched(ctx context.Context, route string, routeParams map[string]string) error { + op, ok := dyngo.FindOperation[HandlerOperation](ctx) + if !ok { + log.Debug("appsec: RouteMatched called without an active HandlerOperation in the context, ignoring") + telemetrylog.Warn("appsec: RouteMatched called without an active HandlerOperation in the context, ignoring", telemetry.WithTags([]string{"product:appsec"})) + return nil + } + + // Overwrite the previous route that was created using a quantization algorithm + op.route = route + + var err error + dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { + err = e + }) + + // Call the WAF with this new data + op.Run(op, addresses.NewAddressesBuilder(). + WithPathParams(routeParams). + Build(), + ) + + return err +} + +// BeforeHandle contains the appsec functionality that should be executed before a http.Handler runs. +// It returns the modified http.ResponseWriter and http.Request, an additional afterHandle function +// that should be executed after the Handler runs, and a handled bool that instructs if the request has been handled +// or not - in case it was handled, the original handler should not run. +func BeforeHandle( + w http.ResponseWriter, + r *http.Request, + span trace.TagSetter, + opts *Config, +) (http.ResponseWriter, *http.Request, func(), bool) { + if opts == nil { + opts = defaultWrapHandlerConfig + } + if opts.ResponseHeaderCopier == nil { + opts.ResponseHeaderCopier = defaultWrapHandlerConfig.ResponseHeaderCopier + } + + op, blockAtomic, ctx := StartOperation(r.Context(), HandlerOperationArgs{ + Framework: opts.Framework, + Method: r.Method, + RequestURI: r.RequestURI, + RequestRoute: opts.Route, + Host: r.Host, + RemoteAddr: r.RemoteAddr, + Headers: r.Header, + Cookies: makeCookies(r.Cookies()), + QueryParams: r.URL.Query(), + PathParams: opts.RouteParams, + }, span) + tr := r.WithContext(ctx) + + afterHandle := func() { + var statusCode int + if res, ok := w.(interface{ Status() int }); ok { + statusCode = res.Status() + } + op.Finish(HandlerOperationRes{ + Headers: opts.ResponseHeaderCopier(w), + StatusCode: statusCode, + }) + + // Execute the onBlock functions to make sure blocking works properly + // in case we are instrumenting the Gin framework + if blockPtr := blockAtomic.Load(); blockPtr != nil { + for _, f := range opts.OnBlock { + f() + } + + if blockPtr.Handler != nil { + blockPtr.Handler.ServeHTTP(w, tr) + } + } + } + + handled := false + if blockPtr := blockAtomic.Load(); blockPtr != nil && blockPtr.Handler != nil { + // handler is replaced + blockPtr.Handler.ServeHTTP(w, tr) + blockPtr.Handler = nil + handled = true + } + + // We register a handler for cases that would require us to write the blocking response before any more code + // from a specific framework (like Gin) is executed that would write another (wrong) response here. + dyngo.OnData(op, func(e EarlyBlock) { + if blockPtr := blockAtomic.Load(); blockPtr != nil && blockPtr.Handler != nil { + blockPtr.Handler.ServeHTTP(w, tr) + blockPtr.Handler = nil + } + }) + + return w, tr, afterHandle, handled +} + +// WrapHandler wraps the given HTTP handler with the abstract HTTP operation defined by HandlerOperationArgs and +// HandlerOperationRes. +// The onBlock params are used to cleanup the context when needed. +// It is a specific patch meant for Gin, for which we must abort the +// context since it uses a queue of handlers and it's the only way to make +// sure other queued handlers don't get executed. +// TODO: this patch must be removed/improved when we rework our actions/operations system +func WrapHandler(handler http.Handler, span trace.TagSetter, opts *Config) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tw, tr, afterHandle, handled := BeforeHandle(w, r, span, opts) + defer afterHandle() + if handled { + return + } + + handler.ServeHTTP(tw, tr) + }) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/roundtripper.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/roundtripper.go new file mode 100644 index 00000000..6b27d990 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/roundtripper.go @@ -0,0 +1,71 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package httpsec + +import ( + "context" + "sync" + + "github.com/DataDog/dd-trace-go/v2/appsec/events" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +var badInputContextOnce sync.Once + +type ( + RoundTripOperation struct { + dyngo.Operation + } + + // RoundTripOperationArgs is the round trip operation arguments. + RoundTripOperationArgs struct { + // URL corresponds to the address `server.io.net.url`. + URL string + } + + // RoundTripOperationRes is the round trip operation results. + RoundTripOperationRes struct{} +) + +func (RoundTripOperationArgs) IsArgOf(*RoundTripOperation) {} +func (RoundTripOperationRes) IsResultOf(*RoundTripOperation) {} + +func ProtectRoundTrip(ctx context.Context, url string) error { + opArgs := RoundTripOperationArgs{ + URL: url, + } + + parent, _ := dyngo.FromContext(ctx) + if parent == nil { // No parent operation => we can't monitor the request + badInputContextOnce.Do(func() { + log.Debug("appsec: outgoing http request monitoring ignored: could not find the handler " + + "instrumentation metadata in the request context: the request handler is not being monitored by a " + + "middleware function or the incoming request context has not be forwarded correctly to the roundtripper") + }) + return nil + } + + op := &RoundTripOperation{ + Operation: dyngo.NewOperation(parent), + } + + var err *events.BlockingSecurityEvent + // TODO: move the data listener as a setup function of httpsec.StartRoundTripperOperation(ars, ) + dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { + err = e + }) + + dyngo.StartOperation(op, opArgs) + dyngo.FinishOperation(op, RoundTripOperationRes{}) + + if err != nil { + log.Debug("appsec: outgoing http request blocked by the WAF on URL: %s", url) + return err + } + + return nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/ossec/lfi.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/ossec/lfi.go new file mode 100644 index 00000000..555fd73a --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/ossec/lfi.go @@ -0,0 +1,41 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package ossec + +import ( + "io/fs" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" +) + +type ( + // OpenOperation type embodies any kind of function calls that will result in a call to an open(2) syscall + OpenOperation struct { + dyngo.Operation + blockErr error + } + + // OpenOperationArgs is the arguments for an open operation + OpenOperationArgs struct { + // Path is the path to the file to be opened + Path string + // Flags are the flags passed to the open(2) syscall + Flags int + // Perms are the permissions passed to the open(2) syscall if the creation of a file is required + Perms fs.FileMode + } + + // OpenOperationRes is the result of an open operation + OpenOperationRes[File any] struct { + // File is the file descriptor returned by the open(2) syscall + File *File + // Err is the error returned by the function + Err *error + } +) + +func (OpenOperationArgs) IsArgOf(*OpenOperation) {} +func (OpenOperationRes[File]) IsResultOf(*OpenOperation) {} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/sqlsec/sql.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/sqlsec/sql.go new file mode 100644 index 00000000..3b1db530 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/sqlsec/sql.go @@ -0,0 +1,71 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package sqlsec + +import ( + "context" + "sync" + + "github.com/DataDog/dd-trace-go/v2/appsec/events" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +var badInputContextOnce sync.Once + +type ( + SQLOperation struct { + dyngo.Operation + } + + SQLOperationArgs struct { + // Query corresponds to the addres `server.db.statement` + Query string + // Driver corresponds to the addres `server.db.system` + Driver string + } + SQLOperationRes struct{} +) + +func (SQLOperationArgs) IsArgOf(*SQLOperation) {} +func (SQLOperationRes) IsResultOf(*SQLOperation) {} + +func ProtectSQLOperation(ctx context.Context, query, driver string) error { + opArgs := SQLOperationArgs{ + Query: query, + Driver: driver, + } + + parent, _ := dyngo.FromContext(ctx) + if parent == nil { // No parent operation => we can't monitor the request + badInputContextOnce.Do(func() { + log.Debug("appsec: outgoing SQL operation monitoring ignored: could not find the handler " + + "instrumentation metadata in the request context: the request handler is not being monitored by a " + + "middleware function or the incoming request context has not be forwarded correctly to the SQL connection") + }) + return nil + } + + op := &SQLOperation{ + Operation: dyngo.NewOperation(parent), + } + + var err *events.BlockingSecurityEvent + // TODO: move the data listener as a setup function of SQLsec.StartSQLOperation(ars, ) + dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { + err = e + }) + + dyngo.StartOperation(op, opArgs) + dyngo.FinishOperation(op, SQLOperationRes{}) + + if err != nil { + log.Debug("appsec: outgoing SQL operation blocked by the WAF") + return err + } + + return nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/actions.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/actions.go new file mode 100644 index 00000000..b65d68db --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/actions.go @@ -0,0 +1,63 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package actions + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/log" + telemetrylog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" +) + +type ( + // Action is a generic interface that represents any WAF action + Action interface { + EmitData(op dyngo.Operation) + } +) + +type actionHandler func(map[string]any) []Action + +// actionHandlers is a map of action types to their respective handler functions +// It is populated by the init functions of the actions packages +var actionHandlers = map[string]actionHandler{} + +func registerActionHandler(aType string, handler actionHandler) { + if _, ok := actionHandlers[aType]; ok { + log.Warn("appsec: action type `%s` already registered", aType) + return + } + actionHandlers[aType] = handler +} + +// SendActionEvents sends the relevant actions to the operation's data listener. +// It returns true if at least one of those actions require interrupting the request handler +// When SDKError is not nil, this error is sent to the op with EmitData so that the invoked SDK can return it +// returns whenever the request should be interrupted +func SendActionEvents(op dyngo.Operation, actions map[string]any) bool { + var blocked bool + for aType, params := range actions { + log.Debug("appsec: processing %q action with params %v", aType, params) //nolint:gocritic + params, ok := params.(map[string]any) + if !ok { + telemetrylog.Error("appsec: could not cast action params to map[string]any from %T", params) + continue + } + + blocked = blocked || aType == "block_request" + + actionHandler, ok := actionHandlers[aType] + if !ok { + telemetrylog.Error("appsec: unknown action type `%s`", aType) + continue + } + + for _, a := range actionHandler(params) { + a.EmitData(op) + } + } + + return blocked +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/block.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/block.go new file mode 100644 index 00000000..43ad2714 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/block.go @@ -0,0 +1,162 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package actions + +import ( + _ "embed" // embed is used to embed the blocked-template.json and blocked-template.html files + "net/http" + "os" + "strings" + + "github.com/go-viper/mapstructure/v2" + + "github.com/DataDog/dd-trace-go/v2/appsec/events" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// blockedTemplateJSON is the default JSON template used to write responses for blocked requests +// +//go:embed blocked-template.json +var blockedTemplateJSON []byte + +// blockedTemplateHTML is the default HTML template used to write responses for blocked requests +// +//go:embed blocked-template.html +var blockedTemplateHTML []byte + +const ( + envBlockedTemplateHTML = "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_HTML" + envBlockedTemplateJSON = "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_JSON" +) + +func init() { + for key, template := range map[string]*[]byte{envBlockedTemplateJSON: &blockedTemplateJSON, envBlockedTemplateHTML: &blockedTemplateHTML} { + if path, ok := env.Lookup(key); ok { + if t, err := os.ReadFile(path); err != nil { + log.Error("Could not read template at %q: %v", path, err.Error()) + } else { + *template = t + } + } + } + + registerActionHandler("block_request", NewBlockAction) +} + +type ( + // blockActionParams are the dynamic parameters to be provided to a "block_request" + // action type upon invocation + blockActionParams struct { + // GRPCStatusCode is the gRPC status code to be returned. Since 0 is the OK status, the value is nullable to + // be able to distinguish between unset and defaulting to Abort (10), or set to OK (0). + GRPCStatusCode *int `mapstructure:"grpc_status_code,omitempty"` + StatusCode int `mapstructure:"status_code"` + Type string `mapstructure:"type,omitempty"` + } + // GRPCWrapper is an opaque prototype abstraction for a gRPC handler (to avoid importing grpc) + // that returns a status code and an error + GRPCWrapper func() (uint32, error) + + // BlockGRPC are actions that interact with a GRPC request flow + BlockGRPC struct { + GRPCWrapper + } + + // BlockHTTP are actions that interact with an HTTP request flow + BlockHTTP struct { + http.Handler + } +) + +func (a *BlockGRPC) EmitData(op dyngo.Operation) { + dyngo.EmitData(op, a) + dyngo.EmitData(op, &events.BlockingSecurityEvent{}) +} + +func (a *BlockHTTP) EmitData(op dyngo.Operation) { + dyngo.EmitData(op, a) + dyngo.EmitData(op, &events.BlockingSecurityEvent{}) +} + +func newGRPCBlockRequestAction(status int) *BlockGRPC { + return &BlockGRPC{GRPCWrapper: newGRPCBlockHandler(status)} +} + +func newGRPCBlockHandler(status int) GRPCWrapper { + return func() (uint32, error) { + return uint32(status), &events.BlockingSecurityEvent{} + } +} + +func blockParamsFromMap(params map[string]any) (blockActionParams, error) { + grpcCode := 10 + p := blockActionParams{ + Type: "auto", + StatusCode: 403, + GRPCStatusCode: &grpcCode, + } + + if err := mapstructure.WeakDecode(params, &p); err != nil { + return p, err + } + + if p.GRPCStatusCode == nil { + p.GRPCStatusCode = &grpcCode + } + + return p, nil +} + +// NewBlockAction creates an action for the "block_request" action type +func NewBlockAction(params map[string]any) []Action { + p, err := blockParamsFromMap(params) + if err != nil { + log.Debug("appsec: couldn't decode redirect action parameters") + return nil + } + return []Action{ + newHTTPBlockRequestAction(p.StatusCode, p.Type), + newGRPCBlockRequestAction(*p.GRPCStatusCode), + } +} + +func newHTTPBlockRequestAction(status int, template string) *BlockHTTP { + return &BlockHTTP{Handler: newBlockHandler(status, template)} +} + +// newBlockHandler creates, initializes and returns a new BlockRequestAction +func newBlockHandler(status int, template string) http.Handler { + htmlHandler := newBlockRequestHandler(status, "text/html", blockedTemplateHTML) + jsonHandler := newBlockRequestHandler(status, "application/json", blockedTemplateJSON) + switch template { + case "json": + return jsonHandler + case "html": + return htmlHandler + default: + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h := jsonHandler + hdr := r.Header.Get("Accept") + htmlIdx := strings.Index(hdr, "text/html") + jsonIdx := strings.Index(hdr, "application/json") + // Switch to html handler if text/html comes before application/json in the Accept header + if htmlIdx != -1 && (jsonIdx == -1 || htmlIdx < jsonIdx) { + h = htmlHandler + } + h.ServeHTTP(w, r) + }) + } +} + +func newBlockRequestHandler(status int, ct string, payload []byte) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", ct) + w.WriteHeader(status) + w.Write(payload) + }) +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.html b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.html similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.html rename to vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.html diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.json b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.json similarity index 78% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.json rename to vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.json index 885d766c..12ae2969 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.json +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.json @@ -1 +1 @@ -{"errors":[{"title":"You've been blocked","detail":"Sorry, you cannot access this page. Please contact the customer service team. Security provided by Datadog."}]} +{"errors":[{"title":"You've been blocked","detail":"Sorry, you cannot access this page. Please contact the customer service team. Security provided by Datadog."}]} \ No newline at end of file diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/http_redirect.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/http_redirect.go new file mode 100644 index 00000000..562bc31a --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/http_redirect.go @@ -0,0 +1,54 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package actions + +import ( + "net/http" + + "github.com/go-viper/mapstructure/v2" + + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// redirectActionParams are the dynamic parameters to be provided to a "redirect_request" +// action type upon invocation +type redirectActionParams struct { + Location string `mapstructure:"location,omitempty"` + StatusCode int `mapstructure:"status_code"` +} + +func init() { + registerActionHandler("redirect_request", NewRedirectAction) +} + +func redirectParamsFromMap(params map[string]any) (redirectActionParams, error) { + var p redirectActionParams + err := mapstructure.WeakDecode(params, &p) + return p, err +} + +func newRedirectRequestAction(status int, loc string) *BlockHTTP { + // Default to 303 if status is out of redirection codes bounds + if status < http.StatusMultipleChoices || status >= http.StatusBadRequest { + status = http.StatusSeeOther + } + + // If location is not set we fall back on a default block action + if loc == "" { + return &BlockHTTP{Handler: newBlockHandler(http.StatusForbidden, string(blockedTemplateJSON))} + } + return &BlockHTTP{Handler: http.RedirectHandler(loc, status)} +} + +// NewRedirectAction creates an action for the "redirect_request" action type +func NewRedirectAction(params map[string]any) []Action { + p, err := redirectParamsFromMap(params) + if err != nil { + log.Debug("appsec: couldn't decode redirect action parameters") + return nil + } + return []Action{newRedirectRequestAction(p.StatusCode, p.Location)} +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/stacktrace.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/stacktrace.go new file mode 100644 index 00000000..9ddd07cf --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/stacktrace.go @@ -0,0 +1,44 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package actions + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/stacktrace" +) + +func init() { + registerActionHandler("generate_stack", NewStackTraceAction) +} + +// StackTraceAction are actions that generate a stacktrace +type StackTraceAction struct { + Event *stacktrace.Event +} + +func (a *StackTraceAction) EmitData(op dyngo.Operation) { dyngo.EmitData(op, a) } + +// NewStackTraceAction creates an action for the "stacktrace" action type +func NewStackTraceAction(params map[string]any) []Action { + id, ok := params["stack_id"] + if !ok { + log.Debug("appsec: could not read stack_id parameter for generate_stack action") + return nil + } + + strID, ok := id.(string) + if !ok { + log.Debug("appsec: could not cast stacktrace ID to string") + return nil + } + + return []Action{ + &StackTraceAction{ + stacktrace.NewEvent(stacktrace.ExploitEvent, stacktrace.WithID(strID)), + }, + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/addresses.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/addresses.go new file mode 100644 index 00000000..e81f084b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/addresses.go @@ -0,0 +1,44 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package addresses + +const ( + ServerRequestMethodAddr = "server.request.method" + ServerRequestRawURIAddr = "server.request.uri.raw" + ServerRequestHeadersNoCookiesAddr = "server.request.headers.no_cookies" + ServerRequestCookiesAddr = "server.request.cookies" + ServerRequestQueryAddr = "server.request.query" + ServerRequestPathParamsAddr = "server.request.path_params" + ServerRequestBodyAddr = "server.request.body" + ServerResponseBodyAddr = "server.response.body" + ServerResponseStatusAddr = "server.response.status" + ServerResponseHeadersNoCookiesAddr = "server.response.headers.no_cookies" + + ClientIPAddr = "http.client_ip" + + UserIDAddr = "usr.id" + UserLoginAddr = "usr.login" + UserOrgAddr = "usr.org" + UserSessionIDAddr = "usr.session_id" + UserLoginSuccessAddr = "server.business_logic.users.login.success" + UserLoginFailureAddr = "server.business_logic.users.login.failure" + + ServerIoNetURLAddr = "server.io.net.url" + ServerIOFSFileAddr = "server.io.fs.file" + ServerDBStatementAddr = "server.db.statement" + ServerDBTypeAddr = "server.db.system" + ServerSysExecCmd = "server.sys.exec.cmd" + + GRPCServerMethodAddr = "grpc.server.method" + GRPCServerRequestMetadataAddr = "grpc.server.request.metadata" + GRPCServerRequestMessageAddr = "grpc.server.request.message" + GRPCServerResponseMessageAddr = "grpc.server.response.message" + GRPCServerResponseMetadataHeadersAddr = "grpc.server.response.metadata.headers" + GRPCServerResponseMetadataTrailersAddr = "grpc.server.response.metadata.trailers" + GRPCServerResponseStatusCodeAddr = "grpc.server.response.status" + + GraphQLServerResolverAddr = "graphql.server.resolver" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/builder.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/builder.go new file mode 100644 index 00000000..0e4930e2 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/builder.go @@ -0,0 +1,286 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package addresses + +import ( + "net/netip" + "strconv" + + "github.com/DataDog/go-libddwaf/v4" +) + +const contextProcessKey = "waf.context.processor" + +type RunAddressDataBuilder struct { + libddwaf.RunAddressData +} + +func NewAddressesBuilder() *RunAddressDataBuilder { + return &RunAddressDataBuilder{ + RunAddressData: libddwaf.RunAddressData{ + Persistent: make(map[string]any, 1), + Ephemeral: make(map[string]any, 1), + TimerKey: WAFScope, // Default value for TimerKey + }, + } +} + +func (b *RunAddressDataBuilder) WithMethod(method string) *RunAddressDataBuilder { + b.Persistent[ServerRequestMethodAddr] = method + return b +} + +func (b *RunAddressDataBuilder) WithRawURI(uri string) *RunAddressDataBuilder { + b.Persistent[ServerRequestRawURIAddr] = uri + return b +} + +func (b *RunAddressDataBuilder) WithHeadersNoCookies(headers map[string][]string) *RunAddressDataBuilder { + if len(headers) == 0 { + headers = nil + } + b.Persistent[ServerRequestHeadersNoCookiesAddr] = headers + return b +} + +func (b *RunAddressDataBuilder) WithCookies(cookies map[string][]string) *RunAddressDataBuilder { + if len(cookies) == 0 { + return b + } + b.Persistent[ServerRequestCookiesAddr] = cookies + return b +} + +func (b *RunAddressDataBuilder) WithQuery(query map[string][]string) *RunAddressDataBuilder { + if len(query) == 0 { + query = nil + } + b.Persistent[ServerRequestQueryAddr] = query + return b +} + +func (b *RunAddressDataBuilder) WithPathParams(params map[string]string) *RunAddressDataBuilder { + if len(params) == 0 { + return b + } + b.Persistent[ServerRequestPathParamsAddr] = params + return b +} + +func (b *RunAddressDataBuilder) WithRequestBody(body any) *RunAddressDataBuilder { + if body == nil { + return b + } + b.Persistent[ServerRequestBodyAddr] = body + return b +} + +func (b *RunAddressDataBuilder) WithResponseBody(body any) *RunAddressDataBuilder { + if body == nil { + return b + } + b.Persistent[ServerResponseBodyAddr] = body + return b +} + +func (b *RunAddressDataBuilder) WithResponseStatus(status int) *RunAddressDataBuilder { + if status == 0 { + return b + } + b.Persistent[ServerResponseStatusAddr] = strconv.Itoa(status) + return b +} + +func (b *RunAddressDataBuilder) WithResponseHeadersNoCookies(headers map[string][]string) *RunAddressDataBuilder { + if len(headers) == 0 { + return b + } + b.Persistent[ServerResponseHeadersNoCookiesAddr] = headers + return b +} + +func (b *RunAddressDataBuilder) WithClientIP(ip netip.Addr) *RunAddressDataBuilder { + if !ip.IsValid() { + return b + } + b.Persistent[ClientIPAddr] = ip.String() + return b +} + +func (b *RunAddressDataBuilder) WithUserID(id string) *RunAddressDataBuilder { + if id == "" { + return b + } + b.Persistent[UserIDAddr] = id + return b +} + +func (b *RunAddressDataBuilder) WithUserLogin(login string) *RunAddressDataBuilder { + if login == "" { + return b + } + b.Persistent[UserLoginAddr] = login + return b +} + +func (b *RunAddressDataBuilder) WithUserOrg(org string) *RunAddressDataBuilder { + if org == "" { + return b + } + b.Persistent[UserOrgAddr] = org + return b +} + +func (b *RunAddressDataBuilder) WithUserSessionID(id string) *RunAddressDataBuilder { + if id == "" { + return b + } + b.Persistent[UserSessionIDAddr] = id + return b + +} + +func (b *RunAddressDataBuilder) WithUserLoginSuccess() *RunAddressDataBuilder { + b.Persistent[UserLoginSuccessAddr] = nil + return b +} + +func (b *RunAddressDataBuilder) WithUserLoginFailure() *RunAddressDataBuilder { + b.Persistent[UserLoginFailureAddr] = nil + return b +} + +func (b *RunAddressDataBuilder) WithFilePath(file string) *RunAddressDataBuilder { + if file == "" { + return b + } + b.Ephemeral[ServerIOFSFileAddr] = file + b.TimerKey = RASPScope + return b +} + +func (b *RunAddressDataBuilder) WithURL(url string) *RunAddressDataBuilder { + if url == "" { + return b + } + b.Ephemeral[ServerIoNetURLAddr] = url + b.TimerKey = RASPScope + return b +} + +func (b *RunAddressDataBuilder) WithDBStatement(statement string) *RunAddressDataBuilder { + if statement == "" { + return b + } + b.Ephemeral[ServerDBStatementAddr] = statement + b.TimerKey = RASPScope + return b +} + +func (b *RunAddressDataBuilder) WithDBType(driver string) *RunAddressDataBuilder { + if driver == "" { + return b + } + b.Ephemeral[ServerDBTypeAddr] = driver + b.TimerKey = RASPScope + return b +} + +func (b *RunAddressDataBuilder) WithSysExecCmd(cmd []string) *RunAddressDataBuilder { + if len(cmd) == 0 { + return b + } + b.Ephemeral[ServerSysExecCmd] = cmd + b.TimerKey = RASPScope + return b +} + +func (b *RunAddressDataBuilder) WithGRPCMethod(method string) *RunAddressDataBuilder { + if method == "" { + return b + } + b.Persistent[GRPCServerMethodAddr] = method + return b +} + +func (b *RunAddressDataBuilder) WithGRPCRequestMessage(message any) *RunAddressDataBuilder { + if message == nil { + return b + } + b.Ephemeral[GRPCServerRequestMessageAddr] = message + return b +} + +func (b *RunAddressDataBuilder) WithGRPCRequestMetadata(metadata map[string][]string) *RunAddressDataBuilder { + if len(metadata) == 0 { + return b + } + b.Persistent[GRPCServerRequestMetadataAddr] = metadata + return b +} + +func (b *RunAddressDataBuilder) WithGRPCResponseMessage(message any) *RunAddressDataBuilder { + if message == nil { + return b + } + b.Ephemeral[GRPCServerResponseMessageAddr] = message + return b +} + +func (b *RunAddressDataBuilder) WithGRPCResponseMetadataHeaders(headers map[string][]string) *RunAddressDataBuilder { + if len(headers) == 0 { + return b + } + b.Persistent[GRPCServerResponseMetadataHeadersAddr] = headers + return b +} + +func (b *RunAddressDataBuilder) WithGRPCResponseMetadataTrailers(trailers map[string][]string) *RunAddressDataBuilder { + if len(trailers) == 0 { + return b + } + b.Persistent[GRPCServerResponseMetadataTrailersAddr] = trailers + return b +} + +func (b *RunAddressDataBuilder) WithGRPCResponseStatusCode(status int) *RunAddressDataBuilder { + if status == 0 { + return b + } + b.Persistent[GRPCServerResponseStatusCodeAddr] = strconv.Itoa(status) + return b +} + +func (b *RunAddressDataBuilder) WithGraphQLResolver(fieldName string, args map[string]any) *RunAddressDataBuilder { + if _, ok := b.Ephemeral[GraphQLServerResolverAddr]; !ok { + b.Ephemeral[GraphQLServerResolverAddr] = make(map[string]any, 1) + } + + b.Ephemeral[GraphQLServerResolverAddr].(map[string]any)[fieldName] = args + return b +} + +func (b *RunAddressDataBuilder) ExtractSchema() *RunAddressDataBuilder { + if _, ok := b.Persistent[contextProcessKey]; !ok { + b.Persistent[contextProcessKey] = make(map[string]bool, 1) + } + + b.Persistent[contextProcessKey].(map[string]bool)["extract-schema"] = true + return b +} + +func (b *RunAddressDataBuilder) NoExtractSchema() *RunAddressDataBuilder { + if _, ok := b.Persistent[contextProcessKey]; !ok { + b.Persistent[contextProcessKey] = make(map[string]bool, 1) + } + + b.Persistent[contextProcessKey].(map[string]bool)["extract-schema"] = false + return b +} + +func (b *RunAddressDataBuilder) Build() libddwaf.RunAddressData { + return b.RunAddressData +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/rasp_rule_type.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/rasp_rule_type.go new file mode 100644 index 00000000..8e5b04f4 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/rasp_rule_type.go @@ -0,0 +1,64 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package addresses + +import ( + "math" + + "github.com/DataDog/go-libddwaf/v4" +) + +type RASPRuleType uint8 + +const ( + RASPRuleTypeLFI RASPRuleType = iota + RASPRuleTypeSSRF + RASPRuleTypeSQLI + RASPRuleTypeCMDI +) + +var RASPRuleTypes = [...]RASPRuleType{ + RASPRuleTypeLFI, + RASPRuleTypeSSRF, + RASPRuleTypeSQLI, + RASPRuleTypeCMDI, +} + +func (r RASPRuleType) String() string { + switch r { + case RASPRuleTypeLFI: + return "lfi" + case RASPRuleTypeSSRF: + return "ssrf" + case RASPRuleTypeSQLI: + return "sql_injection" + case RASPRuleTypeCMDI: + return "command_injection" + } + return "unknown()" +} + +// RASPRuleTypeFromAddressSet returns the RASPRuleType for the given address set if it has a RASP address. +func RASPRuleTypeFromAddressSet(addressSet libddwaf.RunAddressData) (RASPRuleType, bool) { + if addressSet.TimerKey != RASPScope { + return math.MaxUint8, false + } + + for address := range addressSet.Ephemeral { + switch address { + case ServerIOFSFileAddr: + return RASPRuleTypeLFI, true + case ServerIoNetURLAddr: + return RASPRuleTypeSSRF, true + case ServerDBStatementAddr, ServerDBTypeAddr: + return RASPRuleTypeSQLI, true + case ServerSysExecCmd: + return RASPRuleTypeCMDI, true + } + } + + return math.MaxUint8, false +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/scope.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/scope.go new file mode 100644 index 00000000..8c319816 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/scope.go @@ -0,0 +1,25 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package addresses + +import ( + "github.com/DataDog/go-libddwaf/v4/timer" +) + +// Scope is used to divide the time spend in go-libddwaf between multiple parts. These scopes are then fed into +// [liddwaf.RunAddressData.TimerKey] to decide where to store the time spent in the WAF. +// Time which is then added to [libddwaf.Context.Timer]. +type Scope = timer.Key + +const ( + RASPScope Scope = "rasp" + WAFScope Scope = "waf" +) + +var Scopes = [...]Scope{ + RASPScope, + WAFScope, +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace/service_entry_span.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace/service_entry_span.go new file mode 100644 index 00000000..9c85549f --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace/service_entry_span.go @@ -0,0 +1,160 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package trace + +import ( + "context" + "encoding/json" + "sync" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +type ( + // ServiceEntrySpanOperation is a dyngo.Operation that holds a the first span of a service. Usually a http or grpc span. + ServiceEntrySpanOperation struct { + dyngo.Operation + jsonTags map[string]any + tagSetter TagSetter + mu sync.Mutex + } + + // ServiceEntrySpanArgs is the arguments for a ServiceEntrySpanOperation + ServiceEntrySpanArgs struct{} + + // ServiceEntrySpanTag is a key value pair event that is used to tag a service entry span + ServiceEntrySpanTag struct { + Key string + Value any + } + + // JSONServiceEntrySpanTag is a key value pair event that is used to tag a service entry span + // It will be serialized as JSON when added to the span + JSONServiceEntrySpanTag struct { + Key string + Value any + } + + // ServiceEntrySpanTagsBulk is a bulk event that is used to send tags to a service entry span + ServiceEntrySpanTagsBulk struct { + Tags []JSONServiceEntrySpanTag + SerializableTags []JSONServiceEntrySpanTag + } +) + +func (ServiceEntrySpanArgs) IsArgOf(*ServiceEntrySpanOperation) {} + +// SetTag adds the key/value pair to the tags to add to the service entry span +func (op *ServiceEntrySpanOperation) SetTag(key string, value any) { + op.mu.Lock() + defer op.mu.Unlock() + op.tagSetter.SetTag(key, value) +} + +// SetSerializableTag adds the key/value pair to the tags to add to the service entry span. +// The value MAY be serialized as JSON if necessary but simple types will not be serialized. +func (op *ServiceEntrySpanOperation) SetSerializableTag(key string, value any) { + op.mu.Lock() + defer op.mu.Unlock() + op.setSerializableTag(key, value) +} + +// SetSerializableTags adds the key/value pairs to the tags to add to the service entry span. +// Values MAY be serialized as JSON if necessary but simple types will not be serialized. +func (op *ServiceEntrySpanOperation) SetSerializableTags(tags map[string]any) { + op.mu.Lock() + defer op.mu.Unlock() + for key, value := range tags { + op.setSerializableTag(key, value) + } +} + +func (op *ServiceEntrySpanOperation) setSerializableTag(key string, value any) { + switch value.(type) { + case string, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, bool: + op.tagSetter.SetTag(key, value) + default: + op.jsonTags[key] = value + } +} + +// SetTags fills the span tags using the key/value pairs found in `tags` +func (op *ServiceEntrySpanOperation) SetTags(tags map[string]any) { + op.mu.Lock() + defer op.mu.Unlock() + for k, v := range tags { + op.tagSetter.SetTag(k, v) + } +} + +// SetStringTags fills the span tags using the key/value pairs found in `tags` +func (op *ServiceEntrySpanOperation) SetStringTags(tags map[string]string) { + op.mu.Lock() + defer op.mu.Unlock() + for k, v := range tags { + op.tagSetter.SetTag(k, v) + } +} + +// OnServiceEntrySpanTagEvent is a callback that is called when a dyngo.OnData is triggered with a ServiceEntrySpanTag event +func (op *ServiceEntrySpanOperation) OnServiceEntrySpanTagEvent(tag ServiceEntrySpanTag) { + op.SetTag(tag.Key, tag.Value) +} + +// OnJSONServiceEntrySpanTagEvent is a callback that is called when a dyngo.OnData is triggered with a JSONServiceEntrySpanTag event +func (op *ServiceEntrySpanOperation) OnJSONServiceEntrySpanTagEvent(tag JSONServiceEntrySpanTag) { + op.SetSerializableTag(tag.Key, tag.Value) +} + +// OnServiceEntrySpanTagsBulkEvent is a callback that is called when a dyngo.OnData is triggered with a ServiceEntrySpanTagsBulk event +func (op *ServiceEntrySpanOperation) OnServiceEntrySpanTagsBulkEvent(bulk ServiceEntrySpanTagsBulk) { + for _, v := range bulk.Tags { + op.SetTag(v.Key, v.Value) + } + + for _, v := range bulk.SerializableTags { + op.SetSerializableTag(v.Key, v.Value) + } +} + +// OnSpanTagEvent is a listener for SpanTag events. +func (op *ServiceEntrySpanOperation) OnSpanTagEvent(tag SpanTag) { + op.SetTag(tag.Key, tag.Value) +} + +func StartServiceEntrySpanOperation(ctx context.Context, span TagSetter) (*ServiceEntrySpanOperation, context.Context) { + parent, _ := dyngo.FromContext(ctx) + if span == nil { + // Ensure we have a non-nil tagSetter going forward, so we don't have to check all the time. + span = NoopTagSetter{} + } + op := &ServiceEntrySpanOperation{ + Operation: dyngo.NewOperation(parent), + jsonTags: make(map[string]any, 2), + tagSetter: span, + } + return op, dyngo.StartAndRegisterOperation(ctx, op, ServiceEntrySpanArgs{}) +} + +func (op *ServiceEntrySpanOperation) Finish() { + span := op.tagSetter + if _, ok := span.(NoopTagSetter); ok { // If the span is a NoopTagSetter or is nil, we don't need to set any tags + return + } + + op.mu.Lock() + defer op.mu.Unlock() + + for k, v := range op.jsonTags { + strValue, err := json.Marshal(v) + if err != nil { + log.Debug("appsec: failed to marshal tag %q: %v", k, err.Error()) + continue + } + span.SetTag(k, string(strValue)) + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace/span.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace/span.go new file mode 100644 index 00000000..8e2719ab --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace/span.go @@ -0,0 +1,67 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package trace + +import ( + "context" + "sync" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" +) + +type ( + // SpanOperation is a dyngo.Operation that holds a tracer.Span. + // It used as a middleware for appsec code and the tracer code + // hopefully some day this operation will create spans instead of simply using them + SpanOperation struct { + dyngo.Operation + tags map[string]any + mu sync.Mutex + } + + // SpanArgs is the arguments for a SpanOperation + SpanArgs struct{} + + // SpanTag is a key value pair event that is used to tag the current span + SpanTag struct { + Key string + Value any + } +) + +func (SpanArgs) IsArgOf(*SpanOperation) {} + +// SetTag adds the key/value pair to the tags to add to the span +func (op *SpanOperation) SetTag(key string, value any) { + op.mu.Lock() + defer op.mu.Unlock() + op.tags[key] = value +} + +// OnSpanTagEvent is a listener for SpanTag events. +func (op *SpanOperation) OnSpanTagEvent(tag SpanTag) { + op.SetTag(tag.Key, tag.Value) +} + +func StartSpanOperation(ctx context.Context) (*SpanOperation, context.Context) { + op := &SpanOperation{ + tags: make(map[string]any), + } + return op, dyngo.StartAndRegisterOperation(ctx, op, SpanArgs{}) +} + +func (op *SpanOperation) Finish(span TagSetter) { + if _, ok := span.(NoopTagSetter); ok || span == nil { // If the span is a NoopTagSetter or is nil, we don't need to set any tags + return + } + + op.mu.Lock() + defer op.mu.Unlock() + + for k, v := range op.tags { + span.SetTag(k, v) + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace/tag_setter.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace/tag_setter.go new file mode 100644 index 00000000..a7f5bc19 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace/tag_setter.go @@ -0,0 +1,29 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package trace + +// TagSetter is the interface needed to set a span tag. +type TagSetter interface { + SetTag(string, any) +} + +// NoopTagSetter is a TagSetter that does nothing. Useful when no tracer +// Span is available, but a TagSetter is assumed. +type NoopTagSetter struct{} + +func (NoopTagSetter) SetTag(string, any) { + // Do nothing +} + +type TestTagSetter map[string]any + +func (t TestTagSetter) SetTag(key string, value any) { + t[key] = value +} + +func (t TestTagSetter) Tags() map[string]any { + return t +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/env/env.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/env/env.go new file mode 100644 index 00000000..92a80ee7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/env/env.go @@ -0,0 +1,40 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package env + +import ( + "github.com/DataDog/dd-trace-go/v2/internal/env" +) + +// Get is a wrapper around os.Getenv that validates the environment variable +// against a list of supported environment variables. +// +// When a environment variable is not supported because it is not +// listed in the list of supported environment variables, the function will log an error +// and behave as if the environment variable was not set. +// +// In testing mode, the reader will automatically add the environment variable +// to the configuration file. +// +// This function is a passthrough to the internal env package. +func Get(name string) string { + return env.Get(name) +} + +// Lookup is a wrapper around os.LookupEnv that validates the environment variable +// against a list of supported environment variables. +// +// When a environment variable is not supported because it is not +// listed in the list of supported environment variables, the function will log an error +// and behave as if the environment variable was not set. +// +// In testing mode, the reader will automatically add the environment variable +// to the configuration file. +// +// This function is a passthrough to the internal env package. +func Lookup(name string) (string, bool) { + return env.Lookup(name) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/errortrace/errortrace.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/errortrace/errortrace.go new file mode 100644 index 00000000..37bf9cc5 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/errortrace/errortrace.go @@ -0,0 +1,162 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package errortrace + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "strconv" + "strings" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +// TracerError is an error type that holds stackframes from when the error was thrown. +// It can be used interchangeably with the built-in Go error type. +type TracerError struct { + stackFrames *runtime.Frames + inner error + stack *bytes.Buffer +} + +// defaultStackLength specifies the default maximum size of a stack trace. +const defaultStackLength = 32 + +func (err *TracerError) Error() string { + return err.inner.Error() +} + +func New(text string) *TracerError { + // Skip one to exclude New(...) + return Wrap(errors.New(text)) +} + +// Wrap takes in an error and records the stack trace at the moment that it was thrown. +func Wrap(err error) *TracerError { + return WrapN(err, 0, 1) +} + +// WrapN takes in an error and records the stack trace at the moment that it was thrown. +// It will capture a maximum of `n` entries, skipping the first `skip` entries. +// If n is 0, it will capture up to 32 entries instead. +func WrapN(err error, n uint, skip uint) *TracerError { + if err == nil { + return nil + } + var e *TracerError + if errors.As(err, &e) { + return e + } + if n <= 0 { + n = defaultStackLength + } + + telemetry.Count(telemetry.NamespaceTracers, "errorstack.source", []string{"source:TracerError"}).Submit(1) + now := time.Now() + defer func() { + dur := float64(time.Since(now)) + telemetry.Distribution(telemetry.NamespaceTracers, "errorstack.duration", []string{"source:TracerError"}).Submit(dur) + }() + + pcs := make([]uintptr, n) + var stackFrames *runtime.Frames + // +2 to exclude runtime.Callers and Wrap + numFrames := runtime.Callers(2+int(skip), pcs) + if numFrames == 0 { + stackFrames = nil + } else { + stackFrames = runtime.CallersFrames(pcs[:numFrames]) + } + + tracerErr := &TracerError{ + stackFrames: stackFrames, + inner: err, + } + return tracerErr +} + +// Format returns a string representation of the stack trace. +func (err *TracerError) Format() string { + if err == nil || err.stackFrames == nil { + return "" + } + if err.stack != nil { + return err.stack.String() + } + + out := bytes.Buffer{} + for i := 0; ; i++ { + frame, more := err.stackFrames.Next() + if i != 0 { + out.WriteByte('\n') + } + out.WriteString(frame.Function) + out.WriteByte('\n') + out.WriteByte('\t') + out.WriteString(frame.File) + out.WriteByte(':') + out.WriteString(strconv.Itoa(frame.Line)) + if !more { + break + } + } + // CallersFrames returns an iterator that is consumed as we read it. In order to + // allow calling Format() multiple times, we save the result into err.stack, which can be + // returned in future calls + err.stack = &out + return out.String() +} + +// Errorf serves the same purpose as fmt.Errorf, but returns a TracerError +// and prevents wrapping errors of type TracerError twice. +// The %w flag will only wrap errors if they are not already of type *TracerError. +func Errorf(format string, a ...any) *TracerError { + switch len(a) { + case 0: + return New(format) + case 1: + if _, ok := a[0].(*TracerError); ok { + format = strings.Replace(format, "%w", "%v", 1) + } + default: + aIndex := 0 + var newFormat strings.Builder + for i := 0; i < len(format); i++ { + c := format[i] + newFormat.WriteByte(c) + if c != '%' { + continue + } + if i+1 >= len(format) { + break + } + if format[i+1] == '%' { + continue + } + if format[i+1] == 'w' { + if _, ok := a[aIndex].(*TracerError); ok { + newFormat.WriteString("v") + i++ + } + } + aIndex++ + } + format = newFormat.String() + } + err := fmt.Errorf(format, a...) + return Wrap(err) +} + +// Unwrap takes a wrapped error and returns the inner error. +func (err *TracerError) Unwrap() error { + if err == nil { + return nil + } + return err.inner +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/before_handle.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/before_handle.go new file mode 100644 index 00000000..e3d26316 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/before_handle.go @@ -0,0 +1,93 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package httptrace + +import ( + "net/http" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec" + "github.com/DataDog/dd-trace-go/v2/instrumentation/options" + "github.com/DataDog/dd-trace-go/v2/internal/appsec" +) + +// ServeConfig specifies the tracing configuration when using TraceAndServe. +type ServeConfig struct { + // Framework is the name of the framework or library being used (optional). + Framework string + // Service specifies the service name to use. If left blank, the global service name + // will be inherited. + Service string + // Resource optionally specifies the resource name for this request. + Resource string + // QueryParams should be true in order to append the URL query values to the "http.url" tag. + QueryParams bool + // Route is the request matched route if any, if empty, a quantization algorithm will create one using the request URL. + Route string + // RouteParams specifies framework-specific route parameters (e.g. for route /user/:id coming + // in as /user/123 we'll have {"id": "123"}). This field is optional and is used for monitoring + // by AppSec. It is only taken into account when AppSec is enabled. + RouteParams map[string]string + // FinishOpts specifies any options to be used when finishing the request span. + FinishOpts []tracer.FinishOption + // SpanOpts specifies any options to be applied to the request starting span. + SpanOpts []tracer.StartSpanOption + // isStatusError allows customization of error code determination. + IsStatusError func(int) bool +} + +// BeforeHandle contains functionality that should be executed before a http.Handler runs. +// It returns the "traced" http.ResponseWriter and http.Request, an additional afterHandle function +// that should be executed after the Handler runs, and a handled bool that instructs if the request has been handled +// or not - in case it was handled, the original handler should not run. +func BeforeHandle(cfg *ServeConfig, w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request, func(), bool) { + if cfg == nil { + cfg = new(ServeConfig) + } + opts := options.Expand(cfg.SpanOpts, 2, 3) + // Pre-append span.kind, component and http.route tags to the options so that they can be overridden. + opts[0] = tracer.Tag(ext.SpanKind, ext.SpanKindServer) + opts[1] = tracer.Tag(ext.Component, "net/http") + if cfg.Service != "" { + opts = append(opts, tracer.ServiceName(cfg.Service)) + } + if cfg.Resource != "" { + opts = append(opts, tracer.ResourceName(cfg.Resource)) + } + if cfg.Route != "" { + opts = append(opts, tracer.Tag(ext.HTTPRoute, cfg.Route)) + } + span, ctx, finishSpans := StartRequestSpan(r, opts...) + rw, ddrw := wrapResponseWriter(w) + rt := r.WithContext(ctx) + closeSpan := func() { + finishSpans(ddrw.status, cfg.IsStatusError, cfg.FinishOpts...) + } + afterHandle := closeSpan + handled := false + if appsec.Enabled() { + route := cfg.Route + if route == "" { + route = QuantizeURL(r.URL.EscapedPath()) + } + appsecConfig := &httpsec.Config{ + Framework: cfg.Framework, + Route: route, + RouteParams: cfg.RouteParams, + } + + secW, secReq, secAfterHandle, secHandled := httpsec.BeforeHandle(rw, rt, span, appsecConfig) + afterHandle = func() { + secAfterHandle() + closeSpan() + } + rw = secW + rt = secReq + handled = secHandled + } + return rw, rt, afterHandle, handled +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/config.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/config.go new file mode 100644 index 00000000..8236410c --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/config.go @@ -0,0 +1,175 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022 Datadog, Inc. + +package httptrace + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// The env vars described below are used to configure the http security tags collection. +// See https://docs.datadoghq.com/tracing/setup_overview/configure_data_security to learn how to use those properly. +const ( + // envQueryStringDisabled is the name of the env var used to disabled query string collection. + envQueryStringDisabled = "DD_TRACE_HTTP_URL_QUERY_STRING_DISABLED" + // EnvQueryStringRegexp is the name of the env var used to specify the regexp to use for query string obfuscation. + EnvQueryStringRegexp = "DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP" + // envTraceClientIPEnabled is the name of the env var used to specify whether or not to collect client ip in span tags + envTraceClientIPEnabled = "DD_TRACE_CLIENT_IP_ENABLED" + // envServerErrorStatuses is the name of the env var used to specify error status codes on http server spans + envServerErrorStatuses = "DD_TRACE_HTTP_SERVER_ERROR_STATUSES" + // envInferredProxyServicesEnabled is the name of the env var used for enabling inferred span tracing + envInferredProxyServicesEnabled = "DD_TRACE_INFERRED_PROXY_SERVICES_ENABLED" +) + +// defaultQueryStringRegexp is the regexp used for query string obfuscation if [EnvQueryStringRegexp] is empty. +var defaultQueryStringRegexp = regexp.MustCompile("(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?|access_?|secret_?)key(?:_?id)?|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?)(?:(?:\\s|%20)*(?:=|%3D)[^&]+|(?:\"|%22)(?:\\s|%20)*(?::|%3A)(?:\\s|%20)*(?:\"|%22)(?:%2[^2]|%[^2]|[^\"%])+(?:\"|%22))|bearer(?:\\s|%20)+[a-z0-9\\._\\-]|token(?::|%3A)[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L](?:[\\w=-]|%3D)+\\.ey[I-L](?:[\\w=-]|%3D)+(?:\\.(?:[\\w.+\\/=-]|%3D|%2F|%2B)+)?|[\\-]{5}BEGIN(?:[a-z\\s]|%20)+PRIVATE(?:\\s|%20)KEY[\\-]{5}[^\\-]+[\\-]{5}END(?:[a-z\\s]|%20)+PRIVATE(?:\\s|%20)KEY|ssh-rsa(?:\\s|%20)*(?:[a-z0-9\\/\\.+]|%2F|%5C|%2B){100,}") + +type config struct { + queryStringRegexp *regexp.Regexp // specifies the regexp to use for query string obfuscation. + queryString bool // reports whether the query string should be included in the URL span tag. + traceClientIP bool + isStatusError func(statusCode int) bool + inferredProxyServicesEnabled bool + allowAllBaggage bool // tag all baggage items when true (DD_TRACE_BAGGAGE_TAG_KEYS="*"). + baggageTagKeys map[string]struct{} // when allowAllBaggage is false, only tag baggage items whose keys are listed here. +} + +func (c config) String() string { + return fmt.Sprintf("config{queryString: %t, traceClientIP: %t, inferredProxyServicesEnabled: %t}", c.queryString, c.traceClientIP, c.inferredProxyServicesEnabled) +} + +// ResetCfg sets local variable cfg back to its defaults (mainly useful for testing) +func ResetCfg() { + cfg = newConfig() +} + +func newConfig() config { + c := config{ + queryString: !internal.BoolEnv(envQueryStringDisabled, false), + queryStringRegexp: QueryStringRegexp(), + traceClientIP: internal.BoolEnv(envTraceClientIPEnabled, false), + isStatusError: isServerError, + inferredProxyServicesEnabled: internal.BoolEnv(envInferredProxyServicesEnabled, false), + baggageTagKeys: make(map[string]struct{}), + } + if v, ok := env.Lookup("DD_TRACE_BAGGAGE_TAG_KEYS"); ok { + if v == "*" { + c.allowAllBaggage = true + } else { + for _, part := range strings.Split(v, ",") { + key := strings.TrimSpace(part) + if key == "" { + continue + } + c.baggageTagKeys[key] = struct{}{} + } + } + } else { + c.baggageTagKeys = defaultBaggageTagKeys() + } + v := env.Get(envServerErrorStatuses) + if fn := GetErrorCodesFromInput(v); fn != nil { + c.isStatusError = fn + } + return c +} + +func isServerError(statusCode int) bool { + return statusCode >= 500 && statusCode < 600 +} + +func QueryStringRegexp() *regexp.Regexp { + if s, ok := env.Lookup(EnvQueryStringRegexp); !ok { + return defaultQueryStringRegexp + } else if s == "" { + log.Debug("%s is set but empty. Query string obfuscation will be disabled.", EnvQueryStringRegexp) + return nil + } else if r, err := regexp.Compile(s); err == nil { + return r + } + log.Error("Could not compile regexp from %s. Using default regexp instead.", EnvQueryStringRegexp) + return defaultQueryStringRegexp + +} + +// GetErrorCodesFromInput parses a comma-separated string s to determine which codes are to be considered errors +// Its purpose is to support the DD_TRACE_HTTP_SERVER_ERROR_STATUSES env var +// If error condition cannot be determined from s, `nil` is returned +// e.g, input of "100,200,300-400" returns a function that returns true on 100, 200, and all values between 300-400, inclusive +// any input that cannot be translated to integer values returns nil +func GetErrorCodesFromInput(s string) func(statusCode int) bool { + if s == "" { + return nil + } + var codes []int + var ranges [][]int + vals := strings.Split(s, ",") + for _, val := range vals { + // "-" indicates a range of values + if strings.Contains(val, "-") { + bounds := strings.Split(val, "-") + if len(bounds) != 2 { + log.Debug("Trouble parsing %q due to entry %q, using default error status determination logic", s, val) + return nil + } + before, err := strconv.Atoi(bounds[0]) + if err != nil { + log.Debug("Trouble parsing %q due to entry %q, using default error status determination logic", s, val) + return nil + } + after, err := strconv.Atoi(bounds[1]) + if err != nil { + log.Debug("Trouble parsing %q due to entry %q, using default error status determination logic", s, val) + return nil + } + ranges = append(ranges, []int{before, after}) + } else { + intVal, err := strconv.Atoi(val) + if err != nil { + log.Debug("Trouble parsing %q due to entry %q, using default error status determination logic", s, val) + return nil + } + codes = append(codes, intVal) + } + } + return func(statusCode int) bool { + for _, c := range codes { + if c == statusCode { + return true + } + } + for _, bounds := range ranges { + if statusCode >= bounds[0] && statusCode <= bounds[1] { + return true + } + } + return false + } +} + +func defaultBaggageTagKeys() map[string]struct{} { + return map[string]struct{}{ + "user.id": {}, + "account.id": {}, + "session.id": {}, + } +} + +// tagBaggageKey returns true if we should tag this baggage key. +func (c *config) tagBaggageKey(key string) bool { + if c.allowAllBaggage { + return true + } + _, ok := c.baggageTagKeys[key] + return ok +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/httptrace.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/httptrace.go new file mode 100644 index 00000000..c9cfe8d5 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/httptrace.go @@ -0,0 +1,250 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package httptrace provides functionalities to trace HTTP requests that are commonly required and used across +// contrib/** integrations. +package httptrace + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/baggage" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/instrumentation" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +var ( + cfg = newConfig() +) + +var instr *instrumentation.Instrumentation + +func init() { + instr = instrumentation.Load(instrumentation.PackageNetHTTP) +} + +var reportTelemetryConfigOnce sync.Once + +type inferredSpanCreatedCtxKey struct{} + +type FinishSpanFunc = func(status int, errorFn func(int) bool, opts ...tracer.FinishOption) + +// StartRequestSpan starts an HTTP request span with the standard list of HTTP request span tags (http.method, http.url, +// http.useragent). Any further span start option can be added with opts. +func StartRequestSpan(r *http.Request, opts ...tracer.StartSpanOption) (*tracer.Span, context.Context, FinishSpanFunc) { + // Append our span options before the given ones so that the caller can "overwrite" them. + // TODO(): rework span start option handling (https://github.com/DataDog/dd-trace-go/issues/1352) + + // we cannot track the configuration in newConfig because it's called during init() and the the telemetry client + // is not initialized yet + reportTelemetryConfigOnce.Do(func() { + telemetry.RegisterAppConfig("inferred_proxy_services_enabled", cfg.inferredProxyServicesEnabled, telemetry.OriginEnvVar) + log.Debug("internal/httptrace: telemetry.RegisterAppConfig called with cfg: %s", cfg) + }) + + var ipTags map[string]string + if cfg.traceClientIP { + ipTags, _ = httpsec.ClientIPTags(r.Header, true, r.RemoteAddr) + } + + var inferredProxySpan *tracer.Span + + if cfg.inferredProxyServicesEnabled { + inferredProxySpanCreated := false + + if created, ok := r.Context().Value(inferredSpanCreatedCtxKey{}).(bool); ok { + inferredProxySpanCreated = created + } + + if !inferredProxySpanCreated { + var inferredStartSpanOpts []tracer.StartSpanOption + + requestProxyContext, err := extractInferredProxyContext(r.Header) + if err != nil { + log.Debug("%s\n", err.Error()) + } else { + // TODO: Baggage? + spanParentCtx, spanParentErr := tracer.Extract(tracer.HTTPHeadersCarrier(r.Header)) + if spanParentErr == nil { + if spanParentCtx != nil && spanParentCtx.SpanLinks() != nil { + inferredStartSpanOpts = append(inferredStartSpanOpts, tracer.WithSpanLinks(spanParentCtx.SpanLinks())) + } + } + inferredProxySpan = startInferredProxySpan(requestProxyContext, spanParentCtx, inferredStartSpanOpts...) + } + } + } + + parentCtx, extractErr := tracer.Extract(tracer.HTTPHeadersCarrier(r.Header)) + if extractErr == nil && parentCtx != nil { + ctx2 := r.Context() + parentCtx.ForeachBaggageItem(func(k, v string) bool { + ctx2 = baggage.Set(ctx2, k, v) + return true + }) + r = r.WithContext(ctx2) + } + + nopts := make([]tracer.StartSpanOption, 0, len(opts)+1+len(ipTags)) + nopts = append(nopts, + func(ssCfg *tracer.StartSpanConfig) { + if ssCfg.Tags == nil { + ssCfg.Tags = make(map[string]interface{}) + } + ssCfg.Tags[ext.SpanType] = ext.SpanTypeWeb + ssCfg.Tags[ext.HTTPMethod] = r.Method + ssCfg.Tags[ext.HTTPURL] = URLFromRequest(r, cfg.queryString) + ssCfg.Tags[ext.HTTPUserAgent] = r.UserAgent() + ssCfg.Tags["_dd.measured"] = 1 + if r.Host != "" { + ssCfg.Tags["http.host"] = r.Host + } + + if inferredProxySpan != nil { + tracer.ChildOf(inferredProxySpan.Context())(ssCfg) + } else if extractErr == nil && parentCtx != nil { + if links := parentCtx.SpanLinks(); links != nil { + tracer.WithSpanLinks(links)(ssCfg) + } + tracer.ChildOf(parentCtx)(ssCfg) + } + + parentCtx.ForeachBaggageItem(func(k, v string) bool { + if cfg.tagBaggageKey(k) { + ssCfg.Tags["baggage."+k] = v + } + return true + }) + + for k, v := range ipTags { + ssCfg.Tags[k] = v + } + }) + nopts = append(nopts, opts...) + + requestContext := r.Context() + if inferredProxySpan != nil { + requestContext = context.WithValue(requestContext, inferredSpanCreatedCtxKey{}, true) + } + + span, ctx := tracer.StartSpanFromContext(requestContext, instr.OperationName(instrumentation.ComponentServer, nil), nopts...) + return span, ctx, func(status int, errorFn func(int) bool, opts ...tracer.FinishOption) { + FinishRequestSpan(span, status, errorFn, opts...) + if inferredProxySpan != nil { + FinishRequestSpan(inferredProxySpan, status, errorFn, opts...) + } + } +} + +// FinishRequestSpan finishes the given HTTP request span and sets the expected response-related tags such as the status +// code. If not nil, errorFn will override the isStatusError method on httptrace for determining error codes. Any further span finish option can be added with opts. +func FinishRequestSpan(s *tracer.Span, status int, errorFn func(int) bool, opts ...tracer.FinishOption) { + var statusStr string + var fn func(int) bool + if errorFn == nil { + fn = cfg.isStatusError + } else { + fn = errorFn + } + // if status is 0, treat it like 200 unless 0 was called out in DD_TRACE_HTTP_SERVER_ERROR_STATUSES + if status == 0 { + if fn(status) { + statusStr = "0" + s.SetTag(ext.Error, fmt.Errorf("%s: %s", statusStr, http.StatusText(status))) + } else { + statusStr = "200" + } + } else { + statusStr = strconv.Itoa(status) + if fn(status) { + s.SetTag(ext.Error, fmt.Errorf("%s: %s", statusStr, http.StatusText(status))) + } + } + fc := &tracer.FinishConfig{} + for _, opt := range opts { + if opt == nil { + continue + } + opt(fc) + } + if fc.NoDebugStack { + // This is a workaround to ensure that the error stack is not set when NoDebugStack is true. + // This is required because the error stack is set when we call `s.SetTag(ext.Error, err)` just + // a few lines above. + // This is also caused by the fact that the error stack generation is controlled by `tracer.WithDebugStack` (globally) + // or `tracer.NoDebugStack` (per span, but only when we finish the span). These two options don't allow to control + // the error stack generation per span that happens in `FinishRequestSpan` before calling `s.Finish`. + s.SetTag("error.stack", "") + } + s.SetTag(ext.HTTPCode, statusStr) + s.Finish(tracer.WithFinishConfig(fc)) +} + +// URLFromRequest returns the full URL from the HTTP request. If queryString is true, params are collected and they are obfuscated either by the default query string obfuscator or the custom obfuscator provided by the user (through DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP) +// See https://docs.datadoghq.com/tracing/configure_data_security/?tab=net#redact-query-strings for more information. +func URLFromRequest(r *http.Request, queryString bool) string { + // Quoting net/http comments about net.Request.URL on server requests: + // "For most requests, fields other than Path and RawQuery will be + // empty. (See RFC 7230, Section 5.3)" + // This is why we can't rely entirely on url.URL.String(), url.URL.Host, url.URL.Scheme, etc... + var url string + path := r.URL.EscapedPath() + scheme := "http" + if s := r.URL.Scheme; s != "" { + scheme = s + } else if r.TLS != nil { + scheme = "https" + } + if r.Host != "" { + url = strings.Join([]string{scheme, "://", r.Host, path}, "") + } else { + url = path + } + // Collect the query string if we are allowed to report it and obfuscate it if possible/allowed + if queryString && r.URL.RawQuery != "" { + query := r.URL.RawQuery + if cfg.queryStringRegexp != nil { + query = cfg.queryStringRegexp.ReplaceAllLiteralString(query, "") + } + url = strings.Join([]string{url, query}, "?") + } + if frag := r.URL.EscapedFragment(); frag != "" { + url = strings.Join([]string{url, frag}, "#") + } + return url +} + +// HeaderTagsFromRequest matches req headers to user-defined list of header tags +// and creates span tags based on the header tag target and the req header value +func HeaderTagsFromRequest(req *http.Request, headerTags instrumentation.HeaderTags) tracer.StartSpanOption { + var tags []struct { + key string + val string + } + + headerTags.Iter(func(header, tag string) { + if vs, ok := req.Header[header]; ok { + tags = append(tags, struct { + key string + val string + }{tag, strings.TrimSpace(strings.Join(vs, ","))}) + } + }) + + return func(cfg *tracer.StartSpanConfig) { + for _, t := range tags { + cfg.Tags[t.key] = t.val + } + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/inferred_proxy.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/inferred_proxy.go new file mode 100644 index 00000000..f74b5168 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/inferred_proxy.go @@ -0,0 +1,153 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package httptrace + +import ( + "errors" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// These constants are intended to be used by tracers to extract and infer +// parent span information for distributed tracing systems. +const ( + // ProxyHeaderSystem is the header used to indicate the source of the + // proxy. In the case of AWS API Gateway, the value of this header + // will always be 'aws-apigateway'. + ProxyHeaderSystem = "X-Dd-Proxy" + + // ProxyHeaderStartTimeMs is the header used to indicate the start time + // of the request in milliseconds. This value corresponds to the + // 'context.requestTimeEpoch' in AWS API Gateway, providing a timestamp + // for when the request was initiated. + ProxyHeaderStartTimeMs = "X-Dd-Proxy-Request-Time-Ms" + + // ProxyHeaderPath is the header used to indicate the path of the + // request. This value corresponds to 'context.path' in AWS API Gateway, + // and helps identify the resource that the request is targeting. + ProxyHeaderPath = "X-Dd-Proxy-Path" + + // ProxyHeaderHTTPMethod is the header used to indicate the HTTP method + // of the request (e.g., GET, POST, PUT, DELETE). This value corresponds + // to 'context.httpMethod' in AWS API Gateway, and provides the method + // used to make the request. + ProxyHeaderHTTPMethod = "X-Dd-Proxy-Httpmethod" + + // ProxyHeaderDomain is the header used to indicate the AWS domain name + // handling the request. This value corresponds to 'context.domainName' + // in AWS API Gateway, which represents the custom domain associated + // with the API Gateway. + ProxyHeaderDomain = "X-Dd-Proxy-Domain-Name" + + // ProxyHeaderStage is the header used to indicate the AWS stage name + // for the API request. This value corresponds to 'context.stage' in + // AWS API Gateway, and provides the stage (e.g., dev, prod, etc.) + // in which the request is being processed. + ProxyHeaderStage = "X-Dd-Proxy-Stage" +) + +type proxyDetails struct { + spanName string + component string +} + +type proxyContext struct { + startTime time.Time + method string + path string + stage string + domainName string + proxySystemName string +} + +var ( + supportedProxies = map[string]proxyDetails{ + "aws-apigateway": { + spanName: "aws.apigateway", + component: "aws-apigateway", + }, + } +) + +func extractInferredProxyContext(headers http.Header) (*proxyContext, error) { + _, exists := headers[ProxyHeaderStartTimeMs] + if !exists { + return nil, errors.New("proxy header start time does not exist") + } + + proxyHeaderSystem, exists := headers[ProxyHeaderSystem] + if !exists { + return nil, errors.New("proxy header system does not exist") + } + + if _, ok := supportedProxies[proxyHeaderSystem[0]]; !ok { + return nil, errors.New("unsupported Proxy header system") + } + + pc := proxyContext{ + method: headers.Get(ProxyHeaderHTTPMethod), + path: headers.Get(ProxyHeaderPath), + stage: headers.Get(ProxyHeaderStage), + domainName: headers.Get(ProxyHeaderDomain), + proxySystemName: headers.Get(ProxyHeaderSystem), + } + + startTimeUnixMilli, err := strconv.ParseInt(headers[ProxyHeaderStartTimeMs][0], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing time string: %s", err.Error()) + } + pc.startTime = time.UnixMilli(startTimeUnixMilli) + + return &pc, nil +} + +func startInferredProxySpan(requestProxyContext *proxyContext, parent *tracer.SpanContext, opts ...tracer.StartSpanOption) *tracer.Span { + proxySpanInfo := supportedProxies[requestProxyContext.proxySystemName] + log.Debug(`Successfully extracted inferred span info ${proxyContext} for proxy: ${proxyContext.proxySystemName}`) + + startTime := requestProxyContext.startTime + + configService := requestProxyContext.domainName + if configService == "" { + configService = globalconfig.ServiceName() + } + + optsLocal := make([]tracer.StartSpanOption, len(opts), len(opts)+1) + copy(optsLocal, opts) + + optsLocal = append(optsLocal, + func(cfg *tracer.StartSpanConfig) { + if cfg.Tags == nil { + cfg.Tags = make(map[string]interface{}) + } + + cfg.Parent = parent + cfg.StartTime = startTime + + cfg.Tags[ext.SpanType] = ext.SpanTypeWeb + cfg.Tags[ext.ServiceName] = configService + cfg.Tags[ext.Component] = proxySpanInfo.component + cfg.Tags[ext.HTTPMethod] = requestProxyContext.method + cfg.Tags[ext.HTTPURL] = requestProxyContext.domainName + requestProxyContext.path + cfg.Tags[ext.HTTPRoute] = requestProxyContext.path + cfg.Tags[ext.ResourceName] = fmt.Sprintf("%s %s", requestProxyContext.method, requestProxyContext.path) + cfg.Tags["_dd.inferred_span"] = 1 + cfg.Tags["stage"] = requestProxyContext.stage + }, + ) + + span := tracer.StartSpan(proxySpanInfo.spanName, optsLocal...) + + return span +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/response_writer.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/response_writer.go new file mode 100644 index 00000000..f44fff76 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/response_writer.go @@ -0,0 +1,58 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package httptrace + +//go:generate sh -c "go run make_responsewriter.go | gofmt > trace_gen.go" + +import "net/http" + +// responseWriter is a small wrapper around an http response writer that will +// intercept and store the status of a request. +type responseWriter struct { + http.ResponseWriter + status int +} + +// ResetStatusCode resets the status code of the response writer. +func ResetStatusCode(w http.ResponseWriter) { + if rw, ok := w.(*responseWriter); ok { + rw.status = 0 + } +} + +func newResponseWriter(w http.ResponseWriter) *responseWriter { + return &responseWriter{w, 0} +} + +// Status returns the status code that was monitored. +func (w *responseWriter) Status() int { + return w.status +} + +// Write writes the data to the connection as part of an HTTP reply. +// We explicitly call WriteHeader with the 200 status code +// in order to get it reported into the span. +func (w *responseWriter) Write(b []byte) (int, error) { + if w.status == 0 { + w.WriteHeader(http.StatusOK) + } + return w.ResponseWriter.Write(b) +} + +// WriteHeader sends an HTTP response header with status code. +// It also sets the status code to the span. +func (w *responseWriter) WriteHeader(status int) { + if w.status != 0 { + return + } + w.ResponseWriter.WriteHeader(status) + w.status = status +} + +// Unwrap returns the underlying wrapped http.ResponseWriter. +func (w *responseWriter) Unwrap() http.ResponseWriter { + return w.ResponseWriter +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/route_quantization.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/route_quantization.go new file mode 100644 index 00000000..b90ee367 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/route_quantization.go @@ -0,0 +1,156 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package httptrace + +import ( + "strings" +) + +// QuantizeURL quantizes a URL path into a more generic form that resembles a route. +func QuantizeURL(path string) string { + var quantizer urlQuantizer + return quantizer.Quantize(path) +} + +// urlQuantizer is responsible for quantizing URLs paths into a more generic form that resembles a route +// in case a handler pattern is not available. net/http was the last framework where we did not have access to it +// until go 1.22. Now this algorithm is only used in proxy implementations where handlers don't make sense. +type urlQuantizer struct { + tokenizer tokenizer + buf strings.Builder +} + +// Quantize path (eg /segment1/segment2/segment3) by doing the following: +// * If a segment contains only letters, we keep it as it is; +// * If a segment contains one or more digits or special characters, we replace it by '*' +// * If a segments represents an API version (eg. v123) we keep it as it is +func (q *urlQuantizer) Quantize(path string) string { + if len(path) == 0 { + return "" + } + + if path[0] != '/' { + path = "/" + path + } + + q.tokenizer.Reset(path) + q.buf.Reset() + replacements := 0 + + for q.tokenizer.Next() { + q.buf.WriteByte('/') + tokenType, tokenValue := q.tokenizer.Value() + if tokenType == tokenWildcard { + replacements++ + q.buf.WriteByte('*') + continue + } + + q.buf.WriteString(tokenValue) + } + + if replacements == 0 { + return path + } + + // Copy quantized path into original byte slice + return q.buf.String() +} + +// tokenType represents a type of token handled by the `tokenizer` +type tokenType string + +const ( + // tokenUnknown represents a token of type unknown + tokenUnknown = "token:unknown" + // tokenWildcard represents a token that contains digits or special chars + tokenWildcard = "token:wildcard" + // tokenString represents a token that contains only letters + tokenString = "token:string" + // tokenAPIVersion represents an API version (eg. v123) + tokenAPIVersion = "token:api-version" +) + +// tokenizer provides a stream of tokens for a given URL +type tokenizer struct { + // These variables represent the moving cursors (left and right side + // respectively) of the tokenizer. After each "Next()" execution, they will + // point to the beginning and end of a segment like the following: + // + // /segment1/segment2/segment3 + // ----------^-------^-------- + // i j + // + i, j int + + path string + + countAllowedChars int // a-Z, "-", "_" + countNumbers int // 0-9 + countSpecialChars int // anything else +} + +// Reset underlying path being consumed +func (t *tokenizer) Reset(path string) { + t.i = 0 + t.j = 0 + t.path = path +} + +// Next attempts to parse the next token, and returns true if a token was read +func (t *tokenizer) Next() bool { + t.countNumbers = 0 + t.countAllowedChars = 0 + t.countSpecialChars = 0 + t.i = t.j + 1 + + for t.j = t.i; t.j < len(t.path); t.j++ { + c := t.path[t.j] + + if c == '/' { + break + } else if c >= '0' && c <= '9' { + t.countNumbers++ + } else if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '-' || c == '_' { + t.countAllowedChars++ + } else { + t.countSpecialChars++ + } + } + + return t.i < len(t.path) +} + +// Value returns the current token along with its byte value +// Note that the byte value is only valid until the next call to `Reset()` +func (t *tokenizer) Value() (tokenType, string) { + if t.i < 0 || t.j > len(t.path) || t.i >= t.j { + return tokenUnknown, "" + } + + return t.getType(), t.path[t.i:t.j] +} + +func (t *tokenizer) getType() tokenType { + // This matches segments like "v1" + if t.countAllowedChars == 1 && t.countNumbers > 0 && t.path[t.i] == 'v' { + return tokenAPIVersion + } + + // A segment that contains one or more special characters or numbers is + // considered a wildcard token + if t.countSpecialChars > 0 || t.countNumbers > 0 { + return tokenWildcard + } + + // If the segment is comprised by only allowed chars, we classify it as a + // string token which is preserved as it is by the quantizer + if t.countAllowedChars > 0 && t.countSpecialChars == 0 && t.countNumbers == 0 { + return tokenString + } + + return tokenUnknown +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/trace_gen.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/trace_gen.go new file mode 100644 index 00000000..24e26183 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/httptrace/trace_gen.go @@ -0,0 +1,134 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Code generated by make_responsewriter.go DO NOT EDIT + +package httptrace + +import "net/http" + +// wrapResponseWriter wraps an underlying http.ResponseWriter so that it can +// trace the http response codes. It also checks for various http interfaces +// (Flusher, Pusher, CloseNotifier, Hijacker) and if the underlying +// http.ResponseWriter implements them it generates an unnamed struct with the +// appropriate fields. +// +// This code is generated because we have to account for all the permutations +// of the interfaces. +// +// In case of any new interfaces or methods we didn't consider here, we also +// implement the rwUnwrapper interface, which is used internally by +// the standard library: https://github.com/golang/go/blob/6d89b38ed86e0bfa0ddaba08dc4071e6bb300eea/src/net/http/responsecontroller.go#L42-L44 +func wrapResponseWriter(w http.ResponseWriter) (http.ResponseWriter, *responseWriter) { + hFlusher, okFlusher := w.(http.Flusher) + hPusher, okPusher := w.(http.Pusher) + hCloseNotifier, okCloseNotifier := w.(http.CloseNotifier) + hHijacker, okHijacker := w.(http.Hijacker) + + mw := newResponseWriter(w) + type monitoredResponseWriter interface { + http.ResponseWriter + Status() int + Unwrap() http.ResponseWriter + } + switch { + case okFlusher && okPusher && okCloseNotifier && okHijacker: + w = struct { + monitoredResponseWriter + http.Flusher + http.Pusher + http.CloseNotifier + http.Hijacker + }{mw, hFlusher, hPusher, hCloseNotifier, hHijacker} + case okFlusher && okPusher && okCloseNotifier: + w = struct { + monitoredResponseWriter + http.Flusher + http.Pusher + http.CloseNotifier + }{mw, hFlusher, hPusher, hCloseNotifier} + case okFlusher && okPusher && okHijacker: + w = struct { + monitoredResponseWriter + http.Flusher + http.Pusher + http.Hijacker + }{mw, hFlusher, hPusher, hHijacker} + case okFlusher && okCloseNotifier && okHijacker: + w = struct { + monitoredResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{mw, hFlusher, hCloseNotifier, hHijacker} + case okPusher && okCloseNotifier && okHijacker: + w = struct { + monitoredResponseWriter + http.Pusher + http.CloseNotifier + http.Hijacker + }{mw, hPusher, hCloseNotifier, hHijacker} + case okFlusher && okPusher: + w = struct { + monitoredResponseWriter + http.Flusher + http.Pusher + }{mw, hFlusher, hPusher} + case okFlusher && okCloseNotifier: + w = struct { + monitoredResponseWriter + http.Flusher + http.CloseNotifier + }{mw, hFlusher, hCloseNotifier} + case okFlusher && okHijacker: + w = struct { + monitoredResponseWriter + http.Flusher + http.Hijacker + }{mw, hFlusher, hHijacker} + case okPusher && okCloseNotifier: + w = struct { + monitoredResponseWriter + http.Pusher + http.CloseNotifier + }{mw, hPusher, hCloseNotifier} + case okPusher && okHijacker: + w = struct { + monitoredResponseWriter + http.Pusher + http.Hijacker + }{mw, hPusher, hHijacker} + case okCloseNotifier && okHijacker: + w = struct { + monitoredResponseWriter + http.CloseNotifier + http.Hijacker + }{mw, hCloseNotifier, hHijacker} + case okFlusher: + w = struct { + monitoredResponseWriter + http.Flusher + }{mw, hFlusher} + case okPusher: + w = struct { + monitoredResponseWriter + http.Pusher + }{mw, hPusher} + case okCloseNotifier: + w = struct { + monitoredResponseWriter + http.CloseNotifier + }{mw, hCloseNotifier} + case okHijacker: + w = struct { + monitoredResponseWriter + http.Hijacker + }{mw, hHijacker} + default: + w = mw + } + + return w, mw +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/instrumentation.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/instrumentation.go new file mode 100644 index 00000000..fe468c29 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/instrumentation.go @@ -0,0 +1,169 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package instrumentation + +import ( + "context" + "math" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/tracer" + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/appsec" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/namingschema" + "github.com/DataDog/dd-trace-go/v2/internal/normalizer" + "github.com/DataDog/dd-trace-go/v2/internal/stableconfig" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + "github.com/DataDog/dd-trace-go/v2/internal/version" +) + +// OperationContext holds metadata about an instrumentation operation. +type OperationContext map[string]string + +// Load attempts to load the requested package instrumentation. It panics if the package has not been registered. +func Load(pkg Package) *Instrumentation { + info, ok := packages[pkg] + if !ok { + panic("instrumentation package: " + pkg + " was not found. If this is an external package, you must " + + "call instrumentation.Register first") + } + + telemetry.LoadIntegration(string(pkg)) + tracer.MarkIntegrationImported(info.TracedPackage) + + return &Instrumentation{ + pkg: pkg, + logger: newLogger(pkg), + info: info, + } +} + +// ReloadConfig reloads config read from environment variables. This is useful for tests. +func ReloadConfig() { + namingschema.ReloadConfig() +} + +// Version returns the version of the dd-trace-go package. +func Version() string { + return version.Tag +} + +// Instrumentation represents instrumentation for a package. +type Instrumentation struct { + pkg Package + logger Logger + info PackageInfo +} + +// ServiceName returns the default service name to be set for the given instrumentation component. +func (i *Instrumentation) ServiceName(component Component, opCtx OperationContext) string { + cfg := namingschema.GetConfig() + + n, ok := i.info.naming[component] + if !ok { + return cfg.DDService + } + + useDDService := cfg.NamingSchemaVersion == namingschema.SchemaV1 || cfg.RemoveIntegrationServiceNames || n.useDDServiceV0 || n.buildServiceNameV0 == nil + if useDDService && cfg.DDService != "" { + return cfg.DDService + } + return n.buildServiceNameV0(opCtx) +} + +// OperationName returns the operation name to be set for the given instrumentation component. +func (i *Instrumentation) OperationName(component Component, opCtx OperationContext) string { + op, ok := i.info.naming[component] + if !ok { + return "" + } + + switch namingschema.GetVersion() { + case namingschema.SchemaV1: + return op.buildOpNameV1(opCtx) + default: + return op.buildOpNameV0(opCtx) + } +} + +func (i *Instrumentation) Logger() Logger { + return i.logger +} + +func (i *Instrumentation) AnalyticsRate(defaultGlobal bool) float64 { + if internal.BoolEnv("DD_TRACE_"+i.info.EnvVarPrefix+"_ANALYTICS_ENABLED", false) { + return 1.0 + } + if defaultGlobal { + return i.GlobalAnalyticsRate() + } + return math.NaN() +} + +func (i *Instrumentation) GlobalAnalyticsRate() float64 { + return globalconfig.AnalyticsRate() +} + +func (i *Instrumentation) AppSecEnabled() bool { + return appsec.Enabled() +} + +func (i *Instrumentation) AppSecRASPEnabled() bool { + return appsec.RASPEnabled() +} + +func (i *Instrumentation) DataStreamsEnabled() bool { + v, _, _ := stableconfig.Bool("DD_DATA_STREAMS_ENABLED", false) + return v +} + +// TracerInitialized returns whether the global tracer has been initialized or not. +func (i *Instrumentation) TracerInitialized() bool { + return internal.TracerInitialized() +} + +// WithExecutionTraced marks ctx as being associated with an execution trace +// task. It is assumed that ctx already contains a trace task. The caller is +// responsible for ending the task. +// +// This is intended for a specific case where the database/sql contrib package +// only creates spans *after* an operation, in case the operation was +// unavailable, and thus execution trace tasks tied to the span only capture the +// very end. This function enables creating a task *before* creating a span, and +// communicating to the APM tracer that it does not need to create a task. In +// general, APM instrumentation should prefer creating tasks around the +// operation rather than after the fact, if possible. +func (i *Instrumentation) WithExecutionTraced(ctx context.Context) context.Context { + return internal.WithExecutionTraced(ctx) +} + +type StatsdClient = internal.StatsdClient + +func (i *Instrumentation) StatsdClient(extraTags []string) (StatsdClient, error) { + addr := globalconfig.DogstatsdAddr() + tags := globalconfig.StatsTags() + for _, tag := range extraTags { + tags = append(tags, tag) + } + return internal.NewStatsdClient(addr, tags) +} + +type HeaderTags interface { + Iter(f func(header string, tag string)) +} + +func NewHeaderTags(headers []string) HeaderTags { + headerTagsMap := normalizer.HeaderTagSlice(headers) + return internal.NewLockMap(headerTagsMap) +} + +func (i *Instrumentation) HTTPHeadersAsTags() HeaderTags { + return globalconfig.HeaderTagMap() +} + +func (i *Instrumentation) ActiveSpanKey() any { + return internal.ActiveSpanKey +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/logger.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/logger.go new file mode 100644 index 00000000..93c9f190 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/logger.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package instrumentation + +import ( + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +type Logger interface { + Debug(msg string, args ...any) + Info(msg string, args ...any) + Warn(msg string, args ...any) + Error(msg string, args ...any) +} + +type logger struct { + logOpts []telemetry.LogOption +} + +func newLogger(pkg Package) *logger { + return &logger{ + logOpts: []telemetry.LogOption{telemetry.WithTags([]string{"integration:" + string(pkg)})}, + } +} + +func (l logger) Debug(msg string, args ...any) { + log.Debug(msg, args...) //nolint:gocritic // Logger plumbing needs to pass through variable format strings +} + +func (l logger) Info(msg string, args ...any) { + log.Info(msg, args...) //nolint:gocritic // Logger plumbing needs to pass through variable format strings +} + +func (l logger) Warn(msg string, args ...any) { + log.Warn(msg, args...) //nolint:gocritic // Logger plumbing needs to pass through variable format strings +} + +func (l logger) Error(msg string, args ...any) { + log.Error(msg, args...) //nolint:gocritic // Logger plumbing needs to pass through variable format strings +} + +func hasErrors(args ...any) bool { + for _, arg := range args { + if _, ok := arg.(error); ok { + return true + } + } + return false +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/options/options.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/options/options.go new file mode 100644 index 00000000..ab2cb8c2 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/options/options.go @@ -0,0 +1,40 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023 Datadog, Inc. + +package options + +import "github.com/DataDog/dd-trace-go/v2/internal" + +// Copy should be used any time existing options are copied into +// a new locally scoped set of options. This is to avoid data races and +// accidental side effects. +func Copy[T any](opts []T) []T { + return Expand(opts, 0, 0) +} + +// Expand should be used any time existing options are copied into +// a new locally scoped set of options and extra space is required. +// This is to avoid data accidental side effects and undesired reallocations +// when appending to the new slice. +// The initialPosition parameter specifies the position in the new slice +// where the existing options should be copied to. It's assumed that the new +// slice will at least have a length of initialPosition + len(opts). +// The trailCapacity parameter specifies the number of additional options that may be +// appended to the new slice. +// The new slice will have a capacity of initialPosition + len(opts) + trailCapacity +// and a length of initialPosition + len(opts). +func Expand[T any](opts []T, initialPosition, trailCapacity int) []T { + capacity := initialPosition + len(opts) + dup := make([]T, capacity, capacity+trailCapacity) + copy(dup[initialPosition:], opts) + return dup +} + +// This is a workaround needed because of v2 changes that prevents contribs from accessing +// the internal directory. This function should not be used if the internal directory +// can be +func GetBoolEnv(key string, def bool) bool { + return internal.BoolEnv(key, def) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/packages.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/packages.go new file mode 100644 index 00000000..3c7d0202 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/packages.go @@ -0,0 +1,922 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package instrumentation + +import ( + "fmt" + "strings" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" +) + +type Package string + +const ( + Package99DesignsGQLGen Package = "99designs/gqlgen" + PackageAWSSDKGo Package = "aws/aws-sdk-go" + PackageAWSSDKGoV2 Package = "aws/aws-sdk-go-v2" + PackageBradfitzGoMemcache Package = "bradfitz/gomemcache" + PackageGCPPubsub Package = "cloud.google.com/go/pubsub.v1" + PackageGCPPubsubV2 Package = "cloud.google.com/go/pubsub.v2" + PackageConfluentKafkaGo Package = "confluentinc/confluent-kafka-go/kafka" + PackageConfluentKafkaGoV2 Package = "confluentinc/confluent-kafka-go/kafka.v2" + PackageDatabaseSQL Package = "database/sql" + PackageDimfeldHTTPTreeMuxV5 Package = "dimfeld/httptreemux.v5" + PackageGoElasticSearchV6 Package = "elastic/go-elasticsearch.v6" + PackageEmickleiGoRestfulV3 Package = "emicklei/go-restful.v3" + PackageGin Package = "gin-gonic/gin" + PackageGlobalsignMgo Package = "globalsign/mgo" + PackageMongoDriver Package = "go.mongodb.org/mongo-driver" + PackageMongoDriverV2 Package = "go.mongodb.org/mongo-driver.v2" + PackageChi Package = "go-chi/chi" + PackageChiV5 Package = "go-chi/chi.v5" + PackageGoPGV10 Package = "go-pg/pg.v10" + PackageGoRedis Package = "go-redis/redis" + PackageGoRedisV7 Package = "go-redis/redis.v7" + PackageGoRedisV8 Package = "go-redis/redis.v8" + PackageGoCQL Package = "gocql/gocql" + PackageGoFiberV2 Package = "gofiber/fiber.v2" + PackageRedigo Package = "gomodule/redigo" + PackageGoogleAPI Package = "google.golang.org/api" + PackageGRPC Package = "google.golang.org/grpc" + + PackageNetHTTP Package = "net/http" + PackageIBMSarama Package = "IBM/sarama" + + PackageValyalaFastHTTP Package = "valyala/fasthttp" + PackageUrfaveNegroni Package = "urfave/negroni" + PackageTwitchTVTwirp Package = "twitchtv/twirp" + PackageTidwallBuntDB Package = "tidwall/buntdb" + PackageSyndtrGoLevelDB Package = "syndtr/goleveldb" + PackageSirupsenLogrus Package = "sirupsen/logrus" + PackageShopifySarama Package = "Shopify/sarama" + PackageSegmentioKafkaGo Package = "segmentio/kafka-go" + PackageRedisGoRedisV9 Package = "redis/go-redis.v9" + PackageOlivereElasticV5 Package = "olivere/elastic.v5" + PackageMiekgDNS Package = "miekg/dns" + PackageLabstackEchoV4 Package = "labstack/echo.v4" + PackageK8SClientGo Package = "k8s.io/client-go" + PackageK8SGatewayAPI Package = "k8s.io/gateway-api" + PackageJulienschmidtHTTPRouter Package = "julienschmidt/httprouter" + PackageJmoironSQLx Package = "jmoiron/sqlx" + PackageJackcPGXV5 Package = "jackc/pgx.v5" + PackageHashicorpConsulAPI Package = "hashicorp/consul" + PackageHashicorpVaultAPI Package = "hashicorp/vault" + PackageGraphQLGoGraphQL Package = "graphql-go/graphql" + PackageGraphGophersGraphQLGo Package = "graph-gophers/graphql-go" + PackageGormIOGormV1 Package = "gorm.io/gorm.v1" + PackageGorillaMux Package = "gorilla/mux" + PackageUptraceBun Package = "uptrace/bun" + PackageLogSlog Package = "log/slog" + + PackageValkeyIoValkeyGo Package = "valkey-io/valkey-go" + PackageEnvoyProxyGoControlPlane Package = "envoyproxy/go-control-plane" + PackageOS Package = "os" + PackageRedisRueidis Package = "redis/rueidis" +) + +// These packages have been removed in v2, but they are kept here for the transitional version. +const ( + PackageEmickleiGoRestful Package = "emicklei/go-restful" + PackageGaryburdRedigo Package = "garyburd/redigo" + PackageGopkgJinZhuGormV1 Package = "gopkg.in/jinzhu/gorm.v1" + PackageGojiV1Web Package = "zenazn/goji.v1/web" + PackageJinzhuGorm Package = "jinzhu/gorm" + PackageLabstackEcho Package = "labstack/echo" +) + +type Component int + +const ( + ComponentDefault Component = iota + ComponentServer + ComponentClient + ComponentProducer + ComponentConsumer +) + +type componentNames struct { + useDDServiceV0 bool + buildServiceNameV0 func(opCtx OperationContext) string + buildOpNameV0 func(opCtx OperationContext) string + buildOpNameV1 func(opCtx OperationContext) string +} + +type PackageInfo struct { + external bool + + TracedPackage string + IsStdLib bool + EnvVarPrefix string + + naming map[Component]componentNames +} + +var packages = map[Package]PackageInfo{ + Package99DesignsGQLGen: { + TracedPackage: "github.com/99designs/gqlgen", + EnvVarPrefix: "GQLGEN", + naming: map[Component]componentNames{ + ComponentDefault: { + buildServiceNameV0: staticName("graphql"), + buildOpNameV0: func(opCtx OperationContext) string { + name := "graphql.request" + if graphqlOp, ok := opCtx["graphql.operation"]; ok { + name = fmt.Sprintf("%s.%s", ext.SpanTypeGraphQL, graphqlOp) + } + return name + }, + buildOpNameV1: staticName("graphql.server.request"), + }, + }, + }, + PackageAWSSDKGo: { + TracedPackage: "github.com/aws/aws-sdk-go", + EnvVarPrefix: "AWS", + naming: map[Component]componentNames{ + ComponentDefault: { + buildServiceNameV0: awsBuildDefaultServiceNameV0, + buildOpNameV0: func(opCtx OperationContext) string { + awsService, ok := opCtx[ext.AWSService] + if !ok { + return "" + } + return awsService + ".command" + }, + buildOpNameV1: awsBuildOpNameV1, + }, + }, + }, + PackageAWSSDKGoV2: { + TracedPackage: "github.com/aws/aws-sdk-go-v2", + EnvVarPrefix: "AWS", + naming: map[Component]componentNames{ + ComponentDefault: { + buildServiceNameV0: awsBuildDefaultServiceNameV0, + buildOpNameV0: func(opCtx OperationContext) string { + awsService, ok := opCtx[ext.AWSService] + if !ok { + return "" + } + return awsService + ".request" + }, + buildOpNameV1: awsBuildOpNameV1, + }, + }, + }, + PackageBradfitzGoMemcache: { + TracedPackage: "github.com/bradfitz/gomemcache", + EnvVarPrefix: "MEMCACHE", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("memcached"), + buildOpNameV0: staticName("memcached.query"), + buildOpNameV1: staticName("memcached.command"), + }, + }, + }, + PackageGCPPubsub: { + TracedPackage: "cloud.google.com/go/pubsub", + EnvVarPrefix: "GCP_PUBSUB", + naming: map[Component]componentNames{ + ComponentConsumer: { + useDDServiceV0: false, + buildServiceNameV0: staticName(""), + buildOpNameV0: staticName("pubsub.receive"), + buildOpNameV1: staticName("gcp.pubsub.process"), + }, + ComponentProducer: { + useDDServiceV0: false, + buildServiceNameV0: staticName(""), + buildOpNameV0: staticName("pubsub.publish"), + buildOpNameV1: staticName("gcp.pubsub.send"), + }, + }, + }, + PackageGCPPubsubV2: { + TracedPackage: "cloud.google.com/go/pubsub/v2", + EnvVarPrefix: "GCP_PUBSUB", + naming: map[Component]componentNames{ + ComponentConsumer: { + useDDServiceV0: false, + buildServiceNameV0: staticName(""), + buildOpNameV0: staticName("pubsub.receive"), + buildOpNameV1: staticName("gcp.pubsub.process"), + }, + ComponentProducer: { + useDDServiceV0: false, + buildServiceNameV0: staticName(""), + buildOpNameV0: staticName("pubsub.publish"), + buildOpNameV1: staticName("gcp.pubsub.send"), + }, + }, + }, + PackageConfluentKafkaGo: { + TracedPackage: "github.com/confluentinc/confluent-kafka-go", + EnvVarPrefix: "KAFKA", + naming: map[Component]componentNames{ + ComponentConsumer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.consume"), + buildOpNameV1: staticName("kafka.process"), + }, + ComponentProducer: { + useDDServiceV0: false, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.produce"), + buildOpNameV1: staticName("kafka.send"), + }, + }, + }, + PackageConfluentKafkaGoV2: { + TracedPackage: "github.com/confluentinc/confluent-kafka-go/v2", + EnvVarPrefix: "KAFKA", + naming: map[Component]componentNames{ + ComponentConsumer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.consume"), + buildOpNameV1: staticName("kafka.process"), + }, + ComponentProducer: { + useDDServiceV0: false, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.produce"), + buildOpNameV1: staticName("kafka.send"), + }, + }, + }, + PackageDatabaseSQL: { + TracedPackage: "database/sql", + IsStdLib: true, + EnvVarPrefix: "SQL", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: func(opCtx OperationContext) string { + if svc := opCtx["registerService"]; svc != "" { + return svc + } + return fmt.Sprintf("%s.db", opCtx["driverName"]) + }, + buildOpNameV0: func(opCtx OperationContext) string { + return fmt.Sprintf("%s.query", opCtx["driverName"]) + }, + buildOpNameV1: func(opCtx OperationContext) string { + return fmt.Sprintf("%s.query", opCtx[ext.DBSystem]) + }, + }, + }, + }, + PackageDimfeldHTTPTreeMuxV5: { + TracedPackage: "github.com/dimfeld/httptreemux/v5", + EnvVarPrefix: "HTTPTREEMUX", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("http.router"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageGoElasticSearchV6: { + TracedPackage: "github.com/elastic/go-elasticsearch/v6", + EnvVarPrefix: "ELASTIC", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("elastic.client"), + buildOpNameV0: staticName("elasticsearch.query"), + buildOpNameV1: staticName("elasticsearch.query"), + }, + }, + }, + PackageEmickleiGoRestfulV3: { + TracedPackage: "github.com/emicklei/go-restful/v3", + EnvVarPrefix: "RESTFUL", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: false, + buildServiceNameV0: staticName("go-restful"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageGin: { + TracedPackage: "github.com/gin-gonic/gin", + EnvVarPrefix: "GIN", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("gin.router"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageGlobalsignMgo: { + TracedPackage: "github.com/globalsign/mgo", + EnvVarPrefix: "MGO", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("mongodb"), + buildOpNameV0: staticName("mongodb.query"), + buildOpNameV1: staticName("mongodb.query"), + }, + }, + }, + PackageMongoDriver: { + TracedPackage: "go.mongodb.org/mongo-driver", + EnvVarPrefix: "MONGO", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("mongo"), + buildOpNameV0: staticName("mongodb.query"), + buildOpNameV1: staticName("mongodb.query"), + }, + }, + }, + PackageMongoDriverV2: { + TracedPackage: "go.mongodb.org/mongo-driver/v2", + EnvVarPrefix: "MONGO", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("mongo"), + buildOpNameV0: staticName("mongodb.query"), + buildOpNameV1: staticName("mongodb.query"), + }, + }, + }, + PackageChi: { + TracedPackage: "github.com/go-chi/chi", + EnvVarPrefix: "CHI", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("chi.router"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageChiV5: { + TracedPackage: "github.com/go-chi/chi/v5", + EnvVarPrefix: "CHI", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("chi.router"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageGoPGV10: { + TracedPackage: "github.com/go-pg/pg/v10", + EnvVarPrefix: "GOPG", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: true, + buildServiceNameV0: staticName("gopg.db"), + buildOpNameV0: staticName("go-pg"), + buildOpNameV1: staticName("postgresql.query"), + }, + }, + }, + PackageGoRedis: { + TracedPackage: "github.com/go-redis/redis", + EnvVarPrefix: "REDIS", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("redis.client"), + buildOpNameV0: staticName("redis.command"), + buildOpNameV1: staticName("redis.command"), + }, + }, + }, + PackageGoRedisV7: { + TracedPackage: "github.com/go-redis/redis/v7", + EnvVarPrefix: "REDIS", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("redis.client"), + buildOpNameV0: staticName("redis.command"), + buildOpNameV1: staticName("redis.command"), + }, + }, + }, + PackageGoRedisV8: { + TracedPackage: "github.com/go-redis/redis/v8", + EnvVarPrefix: "REDIS", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("redis.client"), + buildOpNameV0: staticName("redis.command"), + buildOpNameV1: staticName("redis.command"), + }, + }, + }, + PackageGoCQL: { + TracedPackage: "github.com/gocql/gocql", + EnvVarPrefix: "GOCQL", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("gocql.query"), + buildOpNameV0: func(opCtx OperationContext) string { + if opCtx["operationType"] == "batch" { + return "cassandra.batch" + } + return "cassandra.query" + }, + buildOpNameV1: staticName("cassandra.query"), + }, + }, + }, + PackageGoFiberV2: { + TracedPackage: "github.com/gofiber/fiber/v2", + EnvVarPrefix: "FIBER", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("fiber"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageRedigo: { + TracedPackage: "github.com/gomodule/redigo", + EnvVarPrefix: "REDIGO", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("redis.conn"), + buildOpNameV0: staticName("redis.command"), + buildOpNameV1: staticName("redis.command"), + }, + }, + }, + PackageGoogleAPI: { + TracedPackage: "google.golang.org/api", + EnvVarPrefix: "GOOGLE_API", + naming: nil, // this package does not use naming schema + }, + PackageGRPC: { + TracedPackage: "google.golang.org/grpc", + EnvVarPrefix: "GRPC", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("grpc.server"), + buildOpNameV0: staticName("grpc.server"), + buildOpNameV1: staticName("grpc.server.request"), + }, + ComponentClient: { + useDDServiceV0: false, + buildServiceNameV0: staticName("grpc.client"), + buildOpNameV0: staticName("grpc.client"), + buildOpNameV1: staticName("grpc.client.request"), + }, + }, + }, + // TODO + + PackageNetHTTP: { + TracedPackage: "net/http", + IsStdLib: true, + EnvVarPrefix: "HTTP", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("http.router"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + ComponentClient: { + useDDServiceV0: false, + buildServiceNameV0: staticName(""), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.client.request"), + }, + }, + }, + PackageValyalaFastHTTP: { + TracedPackage: "github.com/valyala/fasthttp", + EnvVarPrefix: "FASTHTTP", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: false, + buildServiceNameV0: staticName("fasthttp"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageUrfaveNegroni: { + TracedPackage: "github.com/urfave/negroni", + EnvVarPrefix: "NEGRONI", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("negroni.router"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageTwitchTVTwirp: { + TracedPackage: "github.com/twitchtv/twirp", + EnvVarPrefix: "TWIRP", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("twirp-server"), + buildOpNameV0: func(opCtx OperationContext) string { + rpcService, ok := opCtx[ext.RPCService] + if rpcService == "" || !ok { + return "twirp.service" + } + return fmt.Sprintf("twirp.%s", rpcService) + }, + buildOpNameV1: staticName("twirp.server.request"), + }, + ComponentClient: { + useDDServiceV0: true, + buildServiceNameV0: staticName("twirp-client"), + buildOpNameV0: staticName("twirp.request"), + buildOpNameV1: staticName("twirp.client.request"), + }, + }, + }, + PackageTidwallBuntDB: { + TracedPackage: "github.com/tidwall/buntdb", + EnvVarPrefix: "BUNTDB", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("buntdb"), + buildOpNameV0: staticName("buntdb.query"), + buildOpNameV1: staticName("buntdb.query"), + }, + }, + }, + PackageSyndtrGoLevelDB: { + TracedPackage: "github.com/syndtr/goleveldb", + EnvVarPrefix: "LEVELDB", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("leveldb"), + buildOpNameV0: staticName("leveldb.query"), + buildOpNameV1: staticName("leveldb.query"), + }, + }, + }, + PackageSirupsenLogrus: { + TracedPackage: "github.com/sirupsen/logrus", + EnvVarPrefix: "LOGRUS", + }, + PackageShopifySarama: { + TracedPackage: "github.com/Shopify/sarama", + EnvVarPrefix: "SARAMA", + naming: map[Component]componentNames{ + ComponentConsumer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.consume"), + buildOpNameV1: staticName("kafka.process"), + }, + ComponentProducer: { + useDDServiceV0: false, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.produce"), + buildOpNameV1: staticName("kafka.send"), + }, + }, + }, + PackageSegmentioKafkaGo: { + TracedPackage: "github.com/segmentio/kafka-go", + EnvVarPrefix: "KAFKA", + naming: map[Component]componentNames{ + ComponentConsumer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.consume"), + buildOpNameV1: staticName("kafka.process"), + }, + ComponentProducer: { + useDDServiceV0: false, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.produce"), + buildOpNameV1: staticName("kafka.send"), + }, + }, + }, + PackageRedisGoRedisV9: { + TracedPackage: "github.com/redis/go-redis/v9", + EnvVarPrefix: "REDIS", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: true, + buildServiceNameV0: staticName("redis.client"), + buildOpNameV0: staticName("redis.command"), + buildOpNameV1: staticName("redis.command"), + }, + }, + }, + PackageRedisRueidis: { + TracedPackage: "github.com/redis/rueidis", + EnvVarPrefix: "REDIS", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: true, + buildServiceNameV0: staticName("redis.client"), + buildOpNameV0: staticName("redis.command"), + buildOpNameV1: staticName("redis.command"), + }, + }, + }, + PackageOlivereElasticV5: { + TracedPackage: "gopkg.in/olivere/elastic.v5", + EnvVarPrefix: "ELASTIC", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("elastic.client"), + buildOpNameV0: staticName("elasticsearch.query"), + buildOpNameV1: staticName("elasticsearch.query"), + }, + }, + }, + PackageMiekgDNS: { + TracedPackage: "github.com/miekg/dns", + }, + PackageLabstackEchoV4: { + TracedPackage: "github.com/labstack/echo/v4", + EnvVarPrefix: "ECHO", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("echo"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageK8SClientGo: { + TracedPackage: "k8s.io/client-go", + }, + PackageK8SGatewayAPI: { + TracedPackage: "sigs.k8s.io/gateway-api", + }, + PackageJulienschmidtHTTPRouter: { + TracedPackage: "github.com/julienschmidt/httprouter", + EnvVarPrefix: "HTTPROUTER", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("http.router"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageJmoironSQLx: { + TracedPackage: "github.com/jmoiron/sqlx", + }, + PackageJackcPGXV5: { + TracedPackage: "github.com/jackc/pgx/v5", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("postgres.db"), + }, + }, + }, + PackageIBMSarama: { + TracedPackage: "github.com/IBM/sarama", + EnvVarPrefix: "SARAMA", + naming: map[Component]componentNames{ + ComponentConsumer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.consume"), + buildOpNameV1: staticName("kafka.process"), + }, + ComponentProducer: { + useDDServiceV0: false, + buildServiceNameV0: staticName("kafka"), + buildOpNameV0: staticName("kafka.produce"), + buildOpNameV1: staticName("kafka.send"), + }, + }, + }, + PackageHashicorpConsulAPI: { + TracedPackage: "github.com/hashicorp/consul/api", + EnvVarPrefix: "CONSUL", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("consul"), + buildOpNameV0: staticName("consul.command"), + buildOpNameV1: staticName("consul.query"), + }, + }, + }, + PackageHashicorpVaultAPI: { + TracedPackage: "github.com/hashicorp/vault/api", + EnvVarPrefix: "VAULT", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("vault"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("vault.query"), + }, + }, + }, + PackageGraphQLGoGraphQL: { + TracedPackage: "github.com/graphql-go/graphql", + EnvVarPrefix: "GRAPHQL", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: true, + buildServiceNameV0: staticName("graphql.server"), + }, + }, + }, + PackageGraphGophersGraphQLGo: { + TracedPackage: "github.com/graph-gophers/graphql-go", + EnvVarPrefix: "GRAPHQL", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: true, + buildServiceNameV0: staticName("graphql.server"), + buildOpNameV0: staticName("graphql.request"), + buildOpNameV1: staticName("graphql.server.request"), + }, + }, + }, + PackageGormIOGormV1: { + TracedPackage: "gorm.io/gorm", + }, + PackageGorillaMux: { + TracedPackage: "github.com/gorilla/mux", + EnvVarPrefix: "MUX", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("mux.router"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageUptraceBun: { + TracedPackage: "github.com/uptrace/bun", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: true, + buildServiceNameV0: staticName("bun.db"), + }, + }, + }, + PackageLogSlog: { + TracedPackage: "log/slog", + IsStdLib: true, + }, + PackageValkeyIoValkeyGo: { + TracedPackage: "github.com/valkey-io/valkey-go", + EnvVarPrefix: "VALKEY", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: true, + buildServiceNameV0: staticName("valkey.client"), + }, + }, + }, + PackageEnvoyProxyGoControlPlane: { + TracedPackage: "github.com/envoyproxy/go-control-plane", + }, + PackageOS: { + TracedPackage: "os", + }, + PackageEmickleiGoRestful: { + TracedPackage: "github.com/emicklei/go-restful", + EnvVarPrefix: "RESTFUL", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: false, + buildServiceNameV0: staticName("go-restful"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageGaryburdRedigo: { + TracedPackage: "github.com/garyburd/redigo", + EnvVarPrefix: "REDIGO", + naming: map[Component]componentNames{ + ComponentDefault: { + useDDServiceV0: false, + buildServiceNameV0: staticName("redis.conn"), + buildOpNameV0: staticName("redis.command"), + buildOpNameV1: staticName("redis.command"), + }, + }, + }, + PackageGopkgJinZhuGormV1: { + TracedPackage: "gopkg.in/jinzhu/gorm.v1", + }, + PackageGojiV1Web: { + TracedPackage: "github.com/zenazn/goji/web", + EnvVarPrefix: "GOJI", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("http.router"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, + PackageJinzhuGorm: { + TracedPackage: "github.com/jinzhu/gorm", + }, + PackageLabstackEcho: { + TracedPackage: "github.com/labstack/echo", + EnvVarPrefix: "ECHO", + naming: map[Component]componentNames{ + ComponentServer: { + useDDServiceV0: true, + buildServiceNameV0: staticName("echo"), + buildOpNameV0: staticName("http.request"), + buildOpNameV1: staticName("http.server.request"), + }, + }, + }, +} + +func staticName(name string) func(OperationContext) string { + return func(_ OperationContext) string { + return name + } +} + +func awsBuildDefaultServiceNameV0(opCtx OperationContext) string { + awsService, ok := opCtx[ext.AWSService] + if !ok { + return "" + } + return "aws." + awsService +} + +func awsBuildOpNameV1(opCtx OperationContext) string { + awsService, ok := opCtx[ext.AWSService] + if !ok { + return "" + } + awsOp, ok := opCtx[ext.AWSOperation] + if !ok { + return "" + } + op := "request" + if isAWSMessagingSendOp(awsService, awsOp) { + op = "send" + } + return fmt.Sprintf("aws.%s.%s", strings.ToLower(awsService), op) +} + +func isAWSMessagingSendOp(awsService, awsOperation string) bool { + s, op := strings.ToLower(awsService), strings.ToLower(awsOperation) + if s == "sqs" { + return strings.HasPrefix(op, "sendmessage") + } + if s == "sns" { + return op == "publish" + } + return false +} + +// GetPackages returns a map of Package to the corresponding instrumented module. +func GetPackages() map[Package]PackageInfo { + cp := make(map[Package]PackageInfo) + for pkg, info := range packages { + cp[pkg] = info + } + return cp +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/active_span_key.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/active_span_key.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/active_span_key.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/active_span_key.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/agent.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/agent.go similarity index 84% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/agent.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/agent.go index f4bcdce8..d80e8329 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/agent.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/agent.go @@ -10,7 +10,8 @@ import ( "net/url" "os" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" ) const ( @@ -31,10 +32,10 @@ var DefaultTraceAgentUDSPath = "/var/run/datadog/apm.socket" // - Then, DefaultTraceAgentUDSPath, if the path exists // - Finally, localhost:8126 func AgentURLFromEnv() *url.URL { - if agentURL := os.Getenv("DD_TRACE_AGENT_URL"); agentURL != "" { + if agentURL := env.Get("DD_TRACE_AGENT_URL"); agentURL != "" { u, err := url.Parse(agentURL) if err != nil { - log.Warn("Failed to parse DD_TRACE_AGENT_URL: %v", err) + log.Warn("Failed to parse DD_TRACE_AGENT_URL: %s", err.Error()) } else { switch u.Scheme { case "unix", "http", "https": @@ -45,8 +46,8 @@ func AgentURLFromEnv() *url.URL { } } - host, providedHost := os.LookupEnv("DD_AGENT_HOST") - port, providedPort := os.LookupEnv("DD_TRACE_AGENT_PORT") + host, providedHost := env.Lookup("DD_AGENT_HOST") + port, providedPort := env.Lookup("DD_TRACE_AGENT_PORT") if host == "" { // We treat set but empty the same as unset providedHost = false diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/README.md b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/README.md new file mode 100644 index 00000000..cc60323e --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/README.md @@ -0,0 +1,212 @@ +# Appsec Go Design + +This document describes the design of the `internal/appsec` package and everything under it. This package is responsible +for securing the application by monitoring the operations that are executed by the application and applying actions in +case a security threats is detected. + +Most of the work is to forward information to the module `github.com/DataDog/go-libddwaf` which contains the WAF +(Web Application Firewall) engine. The WAF does most of the decision making about events and actions. Our goal is to +connect the different parts of the application and the WAF engine while keeping up to date the various sources of +configuration that the WAF engine uses. + +### Instrumentation Gateway: Dyngo + +Having the customer (or orchestrion) instrument their code is the hardest part of the job. That's why we want to provide +the simplest API possible for them to use. This means loosing the flexibility or enabling and disabling multiple +products and features at runtime. Flexibility that we still want to provide to the customer, that's why behind every +API entrypoint present in `dd-trace-go/contrib` that support appsec is a call to the `internal/appsec/dyngo` package. + +```mermaid +flowchart LR + +UserCode[User Code] --> Instrumentation --> IG{Instrumentation
Gateway} --> Listener +``` + +Dyngo is a context-scoped event listener system that provide a way to listen dynamically to events that are happening in +the customer code and to react to configuration changes and hot-swap event listeners at runtime. + +```mermaid +flowchart LR + +UserCode[contrib] --> appsec/emitter --> IG{dyngo} --> appsec/listener --> WAF +appsec/remoteconfig -->|config change| IG +appsec/config -->|config change| IG +``` + +### Operation definition requirements + +* Each operation must have a `Start*` and a `Finish` method covering calls to dyngo. +* The content of the arguments and results should not require any external package, at most the standard library. + +Example operation: + +```go +package main + +import ( + "context" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +type ( + ExampleOperation struct { + dyngo.Operation + } + + ExampleOperationArgs struct { + Type string + } + + ExampleOperationResult struct { + Code int + } +) + +func (ExampleOperationArgs) IsArgOf(*ExampleOperation) {} +func (ExampleOperationResult) IsResultOf(*ExampleOperation) {} + +func StartExampleOperation(ctx context.Context, args ExampleOperationArgs) *ExampleOperation { + parent, ok := dyngo.FromContext(ctx) + if !ok { + log.Error("No parent operation found") + return nil + } + op := &ExampleOperation{ + Operation: dyngo.NewOperation(parent), + } + return dyngo.StartOperation(op, args) +} + +func (op *ExampleOperation) Finish(result ExampleOperationResult) { + dyngo.FinishOperation(op, result) +} +``` + +> [!CAUTION] +> Importing external packages in the operation definition will probably cause circular dependencies. This is because +> the operation definition can be used in the package is will instrument, and the package that will instrument it will +> probably import the operation definition. + +### Operation Stack + +Current state of the possible operation stacks + +```mermaid +flowchart TD + + subgraph Top Level Operation + SES[trace.ServiceEntrySpanOperation] + + Context[waf.ContextOperation] + + HTTPH[httpsec.HandlerOperation] + GRPCH[grpcsec.HandlerOperation] + GQL[graphqlsec.RequestOperation] + end + + subgraph HTTP + RequestBody([httpsec.MonitorRequestBody]) + Roundtripper[httpsec.RoundTripOperation] + end + + subgraph GRPC + RequestMessage([grpcsec.MonitorRequestMessage]) + ResponseMessage([grpcsec.MonitorResponseMessage]) + end + + subgraph GraphQL + Exec[graphqlsec.ExecutionOperation] + Resolve[graphqlsec.ResolveOperation] + end + + Code{User Code} + + SES --> Context + Context --> HTTPH --> Code + Context --> GRPCH --> Code + Context --> GQL + + GQL --> Exec --> Resolve --> Code + + Code --> RequestBody + + Code --> RequestMessage + Code --> ResponseMessage + + Code --> Span[trace.SpanOperation] + + Span --> Roundtripper + Span --> OS[ossec.OpenOperation] + Span --> SQL[sqlsec.SQLOperation] + Span --> User[usersec.UserOperation] +``` + +> [!IMPORTANT] +> Please note that this is how the operation SHOULD be stacked. If the user code does not have a Top Level Operation +> then nothing will be monitored. In this case an error log should be produced to explain thoroughly the issue to +> the user. + +### Features + +Features represent an abstract feature added to the tracer by AppSec. They are the bridge between the configuration and +its sources +and the actual code that needs to be ran in case of enablement or disablement of a feature. Features are divided in two +parts: + +- The builder that should be a pure function that takes the configuration and returns a feature object. +- The listeners that are methods of the feature object that are called when an event from the Instrumentation Gateway is + triggered. + +From there, at each configuration change from any config source, the AppSec module will rebuild the feature objects, +register the listeners to the Instrumentation Gateway, and hot-swap the root level operation with the new one, +consequently making the whole AppSec code atomic. + +Here is an example of how a system with only two features, GRPC and HTTP WAF Protection, would look like: + +```mermaid +flowchart TD + + subgraph HTTP Feature + HTTPListener + HTTPBuilder + end + + subgraph GRPC Feature + GRPCBuilder + GRPCListener + end + + subgraph Configuration + RemoteConfig + EnvConfig + ... + end + + Configuration -->|config change| AppSec + + AppSec -->|rebuild| HTTPBuilder + AppSec -->|rebuild| GRPCBuilder + HTTPBuilder -->|register HTTP Listener| IG + GRPCBuilder -->|register GRPC Listener| IG + + + + IG{Instrumentation
Gateway} -->|Start httpsec.HandlerOperation| HTTPListener + IG{Instrumentation
Gateway} -->|Start grpcsec.HandlerOperation| GRPCListener +``` + +All currently available features are the following ones: + +| Feature Name | Description | +|------------------------|--------------------------------------------------------| +| HTTP WAF Protection | Protects HTTP requests from attacks | +| GRPC WAF Protection | Protects GRPC requests from attacks | +| GraphQL WAF Protection | Protects GraphQL requests from attacks | +| SQL RASP | Runtime Application Self-Protection for SQL injections | +| OS RASP | Runtime Application Self-Protection for LFI attacks | +| HTTP RASP | Runtime Application Self-Protection for SSRF attacks | +| User Security | User blocking and login failures/success events | +| WAF Context | Setup of the request scoped context system of the WAF | +| Tracing | Bridge between the tracer and AppSec features | diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/config/const.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/config/const.go new file mode 100644 index 00000000..6483f288 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/config/const.go @@ -0,0 +1,11 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package config + +const ( + // MaxItemCount is the maximum amount of items to keep in a timed set. + MaxItemCount = 4_096 +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed/clock.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed/clock.go new file mode 100644 index 00000000..6fb4448a --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed/clock.go @@ -0,0 +1,31 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package timed + +import "time" + +type ( + // biasedClock is a specialized clock implementation used to ensure we can get + // 32-bit wide timestamps without having to worry about wraparound. + biasedClock struct { + // bias is effectively the time at which the biasedClock was initialized. + bias int64 + } +) + +// newBiasedClock creates a new [biasedClock] with the given clock function and +// horizon. +func newBiasedClock(horizon uint32) biasedClock { + return biasedClock{ + bias: time.Now().Unix() - int64(horizon), + } +} + +// Now returns the current timestamp, relative to this [biasedClock]. +func (c *biasedClock) Now() uint32 { + // We clamp it to [0,) to be absolutely safe... + return uint32(max(0, time.Now().Unix()-c.bias)) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed/lru.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed/lru.go new file mode 100644 index 00000000..ac7150df --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed/lru.go @@ -0,0 +1,211 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package timed + +import ( + "fmt" + "math" + "math/rand" + "sync/atomic" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/config" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// capacity is the maximum number of items that may be temporarily present in a +// [LRU]. An eviction triggers once [config.MaxItemCount] is reached, however the +// implementation is based on Copy-Update-Replace semantics, so during a table +// rebuild, the old table may contrinue to receive items for a short while. +const capacity = 2 * config.MaxItemCount + +// LRU is a specialized open-addressing-hash-table-based implementation of a +// specialized LRU cache, using Copy-Update-Replace semantics to operate in a +// completely lock-less manner. +type LRU struct { + // table is the pointer to the current backing hash table + table atomic.Pointer[table] + // clock is used to determine the current timestamp when making + // changes + clock biasedClock + // intervalSeconds is the amount of time in seconds that an entry is + // considered live for. + intervalSeconds uint32 + // zeroKey is a key that is used to replace 0 in the set. This key and 0 are + // effectively the same item. This allows us to gracefully handle 0 in our + // use-case without having to half the hash-space (to 63 bits) so we can use + // one bit as an empty discriminator. The value is chosen at random when the + // set is created, so that different instances will merge 0 with a different + // key. + zeroKey uint64 + // rebuilding is a flag to indicate whether the table is being rebuilt as + // part of an eviction request. + rebuilding atomic.Bool +} + +// NewLRU initializes a new, empty [LRU] with the given interval and clock +// function. A warning will be logged if it is set below 1 second. Panics if +// the interval is more than [math.MaxUint32] seconds, as this value cannot be +// used internally. +// +// Note: timestamps are stored at second resolution, so the interval will be +// rounded down to the nearest second. +func NewLRU(interval time.Duration) *LRU { + if interval < time.Second { + log.Debug("NewLRU: interval is less than one second; this should not be attempted in production (value: %s)", interval) + } + if interval > time.Second*math.MaxUint32 { + panic(fmt.Errorf("NewLRU: interval must be <= %s, but was %s", time.Second*math.MaxUint32, interval)) + } + + intervalSeconds := uint32(interval.Seconds()) + set := &LRU{ + clock: newBiasedClock(intervalSeconds), + intervalSeconds: intervalSeconds, + zeroKey: rand.Uint64(), + } + + // That value cannot be zero... + for set.zeroKey == 0 { + set.zeroKey = rand.Uint64() + } + + set.table.Store(&table{}) + + return set +} + +// Hit determines whether the given key should be kept or dropped based on the +// last time it was sampled. If the table grows larger than [config.MaxItemCount], the +// [LRU.rebuild] method is called in a separate goroutine to begin the +// eviction process. Until this has completed, all updates to the [LRU] are +// effectively dropped, as they happen on the soon-to-be-replaced table. +// +// Note: in order to run completely lock-less, [LRU] cannot store the 0 key in +// the table, as a 0 key is used as a sentinel value to identify free entries. +// To avoid this pitfall, [LRU.zeroKey] is used as a substitute for 0, meaning +// 0 and [LRU.zeroKey] are treated as the same key. This is not an issue in +// common use, as given a uniform distribution of keys this only happens 1 in +// 2^64-1 times. +func (m *LRU) Hit(key uint64) bool { + if key == 0 { + // The 0 key is used as a way to imply a slot is empty; so we cannot store + // it in the table. To address this, when passed a 0 key, we will use the + // [Set.zeroKey] as a substitute. + key = m.zeroKey + } + + now := m.clock.Now() + threshold := now - m.intervalSeconds + + var ( + table = m.table.Load() + entry *entry + ) + for { + var exists bool + entry, exists = table.FindEntry(key) + if exists { + // The entry already exists, so we can proceed... + break + } + + // We're adding a new entry to the table, so we need to: + // 1. Ensure we have capacity (possibly trigger an eviction rebuild) + // 2. Claim the slot (or look for another slot if it's already claimed) + newCount := table.count.Add(1) + if newCount > config.MaxItemCount && m.rebuilding.CompareAndSwap(false, true) { + // We're already holding the maximium number of items, so we will rebuild + // in order to perform an eviction pass. Updates made in the meantime will + // be lost. + go m.rebuild(table, threshold) + } + if newCount > capacity { + // We don't have space to add any new item, so we'll ignore this and + // decide to DROP it (we may otherwise cause a surge of inconditional + // keep decisions, that is not desirable). This only happens in the most + // dire of circumstances (a table rebuild did not complete fast enough + // to make up free space). + table.count.Add(-1) + return false + } + + if entry.Key.CompareAndSwap(0, key) { + // We have successfully claimed the slot, so now we can proceed to set it + // up. If we fail to swap, another goroutine has sampled this slot just + // before this one, so we can DROP the sample. + return entry.Data.CompareAndSwap(0, newEntryData(now, now)) + } + + if entry.Key.Load() == key { + // Another goroutine has concurrently claimed this slot for this key, and + // since very little time has passed since then, so we can DROP this + // sample... This is extremely unlikely to happen (and nearly impossible + // to reliably cover in unit tests). + return false + } + + // Another goroutine has concurrently claimed this slot for another key... + // We will try to find another slot then... + table.count.Add(-1) + } + + // We have found an existing entry, so we can proceed to update it... + curData := entry.Data.Load() + if curData.SampleTime() <= threshold { + // We sampled this a while back (or this is the first time), so we may keep + // this sample! + + // Store the value ahead of the for loop so we don't have to do the bit + // shifts over and over again (even though they're cheap to do). + nowEntryData := newEntryData(now, now) + for !entry.Data.CompareAndSwap(curData, nowEntryData) { + // Another goroutine has already changed it... + curData = entry.Data.Load() + if curData.LastAccessKept() { + // The concurrent update was a KEEP (as is indicated by the fact its + // atime and stime are equal), so this one is necessarily a DROP. + return false + } + + if curData.SampleTime() >= now { + // The concurrent update was made in our future, and it somehow was not + // a KEEP, so we'll make a KEEP decision here, but avoid rolling back + // the [entryData.AccessTime] back. + return true + } + + // The concurrent update was a DROP, and our clock is ahead of theirs, so + // we'll try again... + } + + // We successfully swapped at this point, so we have our KEEP decision! + return true + } + + newData := curData.WithAccessTime(now) + for curData.AccessTime() < now { + if entry.Data.CompareAndSwap(curData, newData) { + // We are done here! + break + } + // Another goroutine has updated the access time... We'll try again... + curData = entry.Data.Load() + } + return false +} + +// rebuild runs in a separate goroutine, and creates a pruned copy of the +// provided [table] with old and expired entries removed. It will keep at most +// [config.MaxItemCount]*2/3 items in the new table. Once the rebuild is complete, it +// replaces the [LRU.table] with the copy. +func (m *LRU) rebuild(oldTable *table, threshold uint32) { + // Since Go has a GC, we can "just" replace the current [Set.table] with a + // trimmed down copy, and let the GC take care of reclaiming the old one, once + // it is no longer in use by any reader. + m.table.Store(oldTable.PrunedCopy(threshold)) + m.rebuilding.Store(false) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed/table.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed/table.go new file mode 100644 index 00000000..3826e9a0 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed/table.go @@ -0,0 +1,181 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package timed + +import ( + "slices" + "sync/atomic" + + "github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/config" +) + +type ( + // table is a simple open-addressing hash table implementation that uses a + // fixed array of items. + table struct { + // entries is the set of items contained in the table. The last entry is + // reserved for cases where all slots are taken before a rebuild is + // complete (it saves us from having to write code to deal with the + // impossibility to find an empty slot, as we always have a slot to return. + // We could return a throw-away slot but this would incur a heap allocation, + // which we can spare by doing this). + entries [capacity + 1]entry + // count is the number of items currently stored in the table. + count atomic.Int32 + } + + // entry is a single item in the open-addressing hash table. + entry struct { + // Key is the Key of the entry. A zero Key indicates that the entry is + // currently free. + Key atomic.Uint64 + // Data is the Data associated with the entry. + Data atomicEntryData + } + + // atomicEntryData is an atomic version of [entryData]. + atomicEntryData atomic.Uint64 + // entryData is a 64-bit value that represents the last time an entry was + // accessed paired together with the last time this value was sampled. + entryData uint64 + + // copiableEntry is a copy-able version of entryData, which is used for + // sorting entries by recency using a heap when re-building the table. + copiableEntry struct { + // Key is the Key of the entry. + Key uint64 + // Data is the Data associated with the entry. + Data entryData + } +) + +// FindEntry locates the correct entry for use in the table. If an entry already +// exists for the given key, it is returned with true. If not, the first blank +// entry is returned with false. +func (t *table) FindEntry(key uint64) (*entry, bool) { + origIdx := key % capacity + idx := origIdx + + for { + entry := &t.entries[idx] + if curKey := entry.Key.Load(); curKey == 0 || curKey == key { + // This is either the entry we're looking for, or an empty slot we can + // claim for this key. + return entry, curKey == key + } + idx = (idx + 1) % capacity + if idx == origIdx { + // We are back at the original index, meaning the map is full. + break + } + } + // We have gone full circle without finding a blank slot, so we give up and + // return our last resort slot that is reserved for this situation. + return &t.entries[capacity], true +} + +// PrunedCopy creates a copy of this table with expired items removed, retaining +// up to the [config.MaxItemCount]*2/3 most recent items from the original. +func (t *table) PrunedCopy(threshold uint32) *table { + // Sort the existing entries (most recent at the top) + newEntries := make([]copiableEntry, 0, capacity) + for i := range capacity { + if t.entries[i].BlankOrExpired(threshold) { + continue + } + newEntries = append(newEntries, t.entries[i].Copyable()) + } + slices.SortFunc(newEntries, copiableEntry.Compare) + + // Insert up to [config.MaxItemCount]*2/3 items into the new table + t = new(table) + count := min(int32(config.MaxItemCount*2/3), int32(len(newEntries))) + for _, entry := range newEntries[:count] { + slot, _ := t.FindEntry(entry.Key) + slot.Key.Store(entry.Key) + slot.Data.Store(entry.Data) + } + t.count.Store(count) + + return t +} + +// BlankOrExpired returns true if the receiver is blank or has expired already. +func (e *entry) BlankOrExpired(threshold uint32) bool { + return e.Key.Load() == 0 || e.Data.Load().SampleTime() < threshold +} + +// Copyable returns a [copyableEntry] version of this entry. +func (e *entry) Copyable() copiableEntry { + return copiableEntry{ + Key: e.Key.Load(), + Data: e.Data.Load(), + } +} + +// Load returns the current value held by this atomic. +func (d *atomicEntryData) Load() entryData { + return entryData((*atomic.Uint64)(d).Load()) +} + +// CompareAndSwap atomically compares the current value held by this atomic with +// the old value, and if they match replaes it with the new value. Returns true +// if the swap happened. +func (d *atomicEntryData) CompareAndSwap(old entryData, new entryData) (swapped bool) { + return (*atomic.Uint64)(d).CompareAndSwap(uint64(old), uint64(new)) +} + +// Store atomically stores the given value in this atomic. +func (d *atomicEntryData) Store(new entryData) { + (*atomic.Uint64)(d).Store(uint64(new)) +} + +// newEntryData creates a new [entryData] value from the given access and sample +// times. +func newEntryData(atime uint32, stime uint32) entryData { + return entryData(uint64(atime)<<32 | uint64(stime)) +} + +// AccessTime is the access time part of the [entryData]. +func (d entryData) AccessTime() uint32 { + return uint32(d >> 32) +} + +// SampleTime is the sample time part of the [entryData]. +func (d entryData) SampleTime() uint32 { + return uint32(d) +} + +// LastAccessKept returns true if the last access to this entry resulted in a +// decision to keep the sample. This is true of the access time is not 0 and is +// equal to the sample time. +func (d entryData) LastAccessKept() bool { + return d.AccessTime() != 0 && d.AccessTime() == d.SampleTime() +} + +// WithAccessTime returns a new [entryData] by copying the receiver and +// replacing the access time portion with the specified value. +func (d entryData) WithAccessTime(atime uint32) entryData { + return (d & 0x00000000_FFFFFFFF) | (entryData(atime) << 32) +} + +// Compare performs a comparison between the receiver and another entry; such +// that most recently sampled entries come first. Two entries with the same +// sample time are considered equal. +func (e copiableEntry) Compare(other copiableEntry) int { + tst := e.Data.SampleTime() + ost := other.Data.SampleTime() + if tst < ost { + // Receiver was sampled more recently (sorts higher) + return 1 + } + if tst > ost { + // Receiver was sampled less recently (sorts lower) + return -1 + } + // Both have the same sample time, so we consider them equal. + return 0 +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/sampler.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/sampler.go new file mode 100644 index 00000000..328e4228 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/sampler.go @@ -0,0 +1,92 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package apisec + +import ( + "encoding/binary" + "hash/fnv" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec/internal/timed" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/limiter" +) + +type ( + Sampler interface { + DecisionFor(SamplingKey) bool + } + + timedSetSampler timed.LRU + + proxySampler struct { + limiter limiter.Limiter + } + + nullSampler struct{} + + SamplingKey struct { + // Method is the value of the http.method span tag + Method string + // Route is the value of the http.route span tag + Route string + // StatusCode is the value of the http.status_code span tag + StatusCode int + } + + clockFunc = func() int64 +) + +// NewProxySampler creates a new sampler suitable for proxy environments where the sampling decision +// is not based on the request's properties, but on a rate. +func NewProxySampler(rate int, interval time.Duration) Sampler { + if rate <= 0 { + return &nullSampler{} + } + r := int64(rate) + l := limiter.NewTokenTickerWithInterval(r, r, interval) + l.Start() + return &proxySampler{ + limiter: l, + } +} + +// NewSampler returns a new [*Sampler] with the specified interval. +func NewSampler(interval time.Duration) Sampler { + return (*timedSetSampler)(timed.NewLRU(interval)) +} + +// DecisionFor makes a sampling decision for the provided [SamplingKey]. If it +// returns true, the request has been "sampled in" and the caller should proceed +// with the necessary actions. If it returns false, the request has been +// dropped, and the caller should short-circuit without extending further +// effort. +func (s *timedSetSampler) DecisionFor(key SamplingKey) bool { + keyHash := key.hash() + return (*timed.LRU)(s).Hit(keyHash) +} + +func (s *proxySampler) DecisionFor(_ SamplingKey) bool { + return s.limiter.Allow() +} + +func (s *nullSampler) DecisionFor(_ SamplingKey) bool { + return false +} + +// hash returns a hash of the key. Given the same seed, it always produces the +// same output. If the seed changes, the output is likely to change as well. +func (k SamplingKey) hash() uint64 { + fnv := fnv.New64() + + _, _ = fnv.Write([]byte(k.Method)) + _, _ = fnv.Write([]byte(k.Route)) + + var bytes [2]byte + binary.NativeEndian.PutUint16(bytes[:], uint16(k.StatusCode)) + _, _ = fnv.Write(bytes[:]) + + return fnv.Sum64() +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/appsec.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/appsec.go new file mode 100644 index 00000000..2c70a75d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/appsec.go @@ -0,0 +1,215 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package appsec + +import ( + "fmt" + "sync" + + "github.com/DataDog/go-libddwaf/v4" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + globalinternal "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + telemetrylog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" +) + +// Enabled returns true when AppSec is up and running. Meaning that the appsec build tag is enabled, the env var +// DD_APPSEC_ENABLED is set to true, and the tracer is started. +func Enabled() bool { + mu.RLock() + defer mu.RUnlock() + return activeAppSec != nil && activeAppSec.started +} + +// RASPEnabled returns true when DD_APPSEC_RASP_ENABLED=true or is unset. Granted that AppSec is enabled. +func RASPEnabled() bool { + mu.RLock() + defer mu.RUnlock() + return activeAppSec != nil && activeAppSec.started && activeAppSec.cfg.RASP +} + +// Start AppSec when enabled is enabled by both using the appsec build tag and +// setting the environment variable DD_APPSEC_ENABLED to true. +func Start(opts ...config.StartOption) { + // TODO: Add support to configure the tracer via a public interface + if globalinternal.BoolEnv("_DD_APPSEC_BLOCKING_UNAVAILABLE", false) { + opts = append(opts, config.WithBlockingUnavailable(true)) + } + if globalinternal.BoolEnv("_DD_APPSEC_PROXY_ENVIRONMENT", false) { + opts = append(opts, config.WithProxyEnvironment()) + } + + startConfig := config.NewStartConfig(opts...) + + // AppSec can start either: + // 1. Manually thanks to DD_APPSEC_ENABLED (or via [config.WithEnablementMode]) + // 2. Remotely when DD_APPSEC_ENABLED is undefined + // Note: DD_APPSEC_ENABLED=false takes precedence over remote configuration + // and enforces to have AppSec disabled. + mode, modeOrigin, err := startConfig.EnablementMode() + if err != nil { + logUnexpectedStartError(err) + return + } + + if mode == config.ForcedOff { + log.Debug("appsec: disabled by the configuration: set the environment variable DD_APPSEC_ENABLED to true to enable it") + return + } + + // Check whether libddwaf - required for Threats Detection - is ok or not + if ok, err := libddwaf.Usable(); !ok && err != nil { + // We need to avoid logging an error to APM tracing users who don't necessarily intend to enable appsec + if mode == config.ForcedOn { + logUnexpectedStartError(err) + } else { + // DD_APPSEC_ENABLED is not set so we cannot know what the intent is here, we must log a + // debug message instead to avoid showing an error to APM-tracing-only users. + telemetrylog.Error("appsec: remote activation of threats detection cannot be enabled for the following reasons: %s", err.Error()) + } + return + } + + // From this point we know that AppSec is either enabled or can be enabled through remote config + cfg, err := startConfig.NewConfig() + if err != nil { + logUnexpectedStartError(err) + return + } + appsec := newAppSec(cfg) + + // Start the remote configuration client + log.Debug("appsec: starting the remote configuration client") + if err := appsec.startRC(); err != nil { + telemetrylog.Error("appsec: Remote config: disabled due to an instanciation error: %s", err.Error()) + } + + if mode == config.RCStandby { + // AppSec is not enforced by the env var and can be enabled through remote config + log.Debug("appsec: %s is not set, appsec won't start until activated through remote configuration", config.EnvEnabled) + if err := appsec.enableRemoteActivation(); err != nil { + // ASM is not enabled and can't be enabled through remote configuration. Nothing more can be done. + logUnexpectedStartError(err) + appsec.stopRC() + return + } + log.Debug("appsec: awaiting for possible remote activation") + setActiveAppSec(appsec) + return + } + + if err := appsec.start(); err != nil { // AppSec is specifically enabled + logUnexpectedStartError(err) + appsec.stopRC() + return + } + + registerAppsecStartTelemetry(mode, modeOrigin) + setActiveAppSec(appsec) +} + +// Implement the AppSec log message C1 +func logUnexpectedStartError(err error) { + log.Error("appsec: could not start because of an unexpected error: %s\nNo security activities will be collected. Please contact support at https://docs.datadoghq.com/help/ for help.", err.Error()) + telemetry.Log(telemetry.LogError, fmt.Sprintf("appsec: could not start because of an unexpected error: %s", err.Error()), telemetry.WithTags([]string{"product:appsec"})) + telemetry.ProductStartError(telemetry.NamespaceAppSec, err) +} + +// Stop AppSec. +func Stop() { + setActiveAppSec(nil) +} + +var ( + activeAppSec *appsec + mu sync.RWMutex +) + +func setActiveAppSec(a *appsec) { + mu.Lock() + defer mu.Unlock() + if activeAppSec != nil { + activeAppSec.stopRC() + activeAppSec.stop() + } + activeAppSec = a +} + +type appsec struct { + cfg *config.Config + features []listener.Feature + featuresMu sync.Mutex + started bool +} + +func newAppSec(cfg *config.Config) *appsec { + return &appsec{ + cfg: cfg, + } +} + +// Start AppSec by registering its security protections according to the configured the security rules. +func (a *appsec) start() error { + // Load the waf to catch early errors if any + if ok, err := libddwaf.Load(); err != nil { + // 1. If there is an error and the loading is not ok: log as an unexpected error case and quit appsec + // Note that we assume here that the test for the unsupported target has been done before calling + // this method, so it is now considered an error for this method + if !ok { + return fmt.Errorf("error while loading libddwaf: %w", err) + } + // 2. If there is an error and the loading is ok: log as an informative error where appsec can be used + log.Error("appsec: non-critical error while loading libddwaf: %s", err.Error()) + } + + // Register dyngo listeners + if err := a.SwapRootOperation(); err != nil { + return err + } + + a.enableRCBlocking() + a.enableRASP() + + a.started = true + log.Info("appsec: up and running") + + // TODO: log the config like the APM tracer does but we first need to define + // an user-friendly string representation of our config and its sources + + return nil +} + +// Stop AppSec by unregistering the security protections. +func (a *appsec) stop() { + if !a.started { + return + } + a.started = false + registerAppsecStopTelemetry() + // Disable RC blocking first so that the following is guaranteed not to be concurrent anymore. + a.disableRCBlocking() + + a.featuresMu.Lock() + defer a.featuresMu.Unlock() + + // Disable the currently applied instrumentation + dyngo.SwapRootOperation(nil) + + // Close the WAF manager to release all resources associated with it + a.cfg.WAFManager.Reset() + + // TODO: block until no more requests are using dyngo operations + + for _, feature := range a.features { + feature.Stop() + } + + a.features = nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/config/config.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/config/config.go new file mode 100644 index 00000000..c689964c --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/config/config.go @@ -0,0 +1,221 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package config + +import ( + "fmt" + "time" + + sharedinternal "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/remoteconfig" + "github.com/DataDog/dd-trace-go/v2/internal/stableconfig" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" +) + +func init() { + registerSCAAppConfigTelemetry() +} + +// Register the global app telemetry configuration related to the Software Composition Analysis (SCA) product. +// Report over telemetry whether SCA's enablement env var was set or not along with its value. Nothing is reported in +// case of an error or if the env var is not set. +func registerSCAAppConfigTelemetry() { + _, _, err := stableconfig.Bool(EnvSCAEnabled, false) + if err != nil { + log.Error("appsec: %s", err.Error()) + return + } +} + +// The following environment variables dictate the enablement of different the ASM products. +const ( + // EnvEnabled controls ASM Threats Protection's enablement. + EnvEnabled = "DD_APPSEC_ENABLED" + // EnvSCAEnabled controls ASM Software Composition Analysis (SCA)'s enablement. + EnvSCAEnabled = "DD_APPSEC_SCA_ENABLED" +) + +// StartOption is used to customize the AppSec configuration when invoked with appsec.Start() +type StartOption func(c *StartConfig) + +type StartConfig struct { + // RC is the remote config client configuration to be used. + RC *remoteconfig.ClientConfig + // IsEnabled is a function that determines whether AppSec is enabled or not. When unset, the + // default [IsEnabled] function is used. + EnablementMode func() (EnablementMode, telemetry.Origin, error) + // MetaStructAvailable is true if meta struct is supported by the trace agent. + MetaStructAvailable bool + + APISecOptions []APISecOption + + // BlockingUnavailable is true when the application run in an environment where blocking is not possible + BlockingUnavailable bool + + // ProxyEnvironment is true if the application is running in a proxy environment, + // such as within an Envoy External Processor. + ProxyEnvironment bool +} + +type EnablementMode int8 + +const ( + // ForcedOff is the mode where AppSec is forced to be disabled, not allowing remote activation. + ForcedOff EnablementMode = -1 + // RCStandby is the mode where AppSec is in stand-by, waiting remote activation. + RCStandby EnablementMode = 0 + // ForcedOn is the mode where AppSec is forced to be enabled. + ForcedOn EnablementMode = 1 +) + +func NewStartConfig(opts ...StartOption) *StartConfig { + c := &StartConfig{ + EnablementMode: func() (mode EnablementMode, origin telemetry.Origin, err error) { + enabled, set, err := IsEnabledByEnvironment() + if set { + origin = telemetry.OriginEnvVar + if enabled { + mode = ForcedOn + } else { + mode = ForcedOff + } + } else { + origin = telemetry.OriginDefault + mode = RCStandby + } + return mode, origin, err + }, + } + for _, opt := range opts { + opt(c) + } + return c +} + +// WithEnablementMode forces AppSec enablement, replacing the default initialization conditions +// implemented by [IsEnabledByEnvironment]. +func WithEnablementMode(mode EnablementMode) StartOption { + return func(c *StartConfig) { + c.EnablementMode = func() (EnablementMode, telemetry.Origin, error) { + return mode, telemetry.OriginCode, nil + } + } +} + +// WithRCConfig sets the AppSec remote config client configuration to the specified cfg +func WithRCConfig(cfg remoteconfig.ClientConfig) StartOption { + return func(c *StartConfig) { + c.RC = &cfg + } +} + +func WithMetaStructAvailable(available bool) StartOption { + return func(c *StartConfig) { + c.MetaStructAvailable = available + } +} + +func WithAPISecOptions(opts ...APISecOption) StartOption { + return func(c *StartConfig) { + c.APISecOptions = append(c.APISecOptions, opts...) + } +} + +func WithBlockingUnavailable(unavailable bool) StartOption { + return func(c *StartConfig) { + c.BlockingUnavailable = unavailable + } +} + +func WithProxyEnvironment() StartOption { + return func(c *StartConfig) { + c.APISecOptions = append(c.APISecOptions, WithProxy()) + } +} + +// Config is the AppSec configuration. +type Config struct { + *WAFManager + + // WAFTimeout is the maximum WAF execution time + WAFTimeout time.Duration + // TraceRateLimit is the AppSec trace rate limit (traces per second). + TraceRateLimit int64 + // APISec configuration + APISec APISecConfig + // RC is the remote configuration client used to receive product configuration updates. Nil if RC is disabled (default) + RC *remoteconfig.ClientConfig + // RASP determines whether RASP features are enabled or not. + RASP bool + // SupportedAddresses are the addresses that the AppSec listener will bind to. + SupportedAddresses AddressSet + // MetaStructAvailable is true if meta struct is supported by the trace agent. + MetaStructAvailable bool + // BlockingUnavailable is true when the application run in an environment where blocking is not possible + BlockingUnavailable bool + // TracingAsTransport is true if APM is disabled and manually force keeping a trace is the only way for it to be sent. + TracingAsTransport bool +} + +// AddressSet is a set of WAF addresses. +type AddressSet map[string]struct{} + +func NewAddressSet(addrs []string) AddressSet { + set := make(AddressSet, len(addrs)) + for _, addr := range addrs { + set[addr] = struct{}{} + } + return set +} + +// AnyOf returns true if any of the addresses in the set are in the given list. +func (set AddressSet) AnyOf(anyOf ...string) bool { + for _, addr := range anyOf { + if _, ok := set[addr]; ok { + return true + } + } + + return false +} + +// IsEnabledByEnvironment returns true when appsec is enabled by the environment variable +// [EnvEnabled] being set to a truthy value, as well as whether the environment variable was set at +// all or not (so it is possible to distinguish between explicitly false, and false-by-default). +// If the [EnvEnabled] variable is set to a value that is not a valid boolean (according to +// [strconv.ParseBool]), it is considered false-y, and a detailed error is also returned. +func IsEnabledByEnvironment() (enabled bool, set bool, err error) { + enabled, origin, err := stableconfig.Bool(EnvEnabled, false) + if origin != telemetry.OriginDefault { + set = true + } + return enabled, set, err +} + +// NewConfig returns a fresh appsec configuration read from the env +func (c *StartConfig) NewConfig() (*Config, error) { + data, err := RulesFromEnv() + if err != nil { + return nil, fmt.Errorf("reading WAF rules from environment: %w", err) + } + manager, err := NewWAFManagerWithStaticRules(NewObfuscatorConfig(), data) + if err != nil { + return nil, err + } + + return &Config{ + WAFManager: manager, + WAFTimeout: WAFTimeoutFromEnv(), + TraceRateLimit: RateLimitFromEnv(), + APISec: NewAPISecConfig(c.APISecOptions...), + RASP: RASPEnabled(), + RC: c.RC, + MetaStructAvailable: c.MetaStructAvailable, + BlockingUnavailable: c.BlockingUnavailable, + TracingAsTransport: !sharedinternal.BoolEnv("DD_APM_TRACING_ENABLED", true), + }, nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/config/internal_config.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/config/internal_config.go new file mode 100644 index 00000000..4d44cb2c --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/config/internal_config.go @@ -0,0 +1,256 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package config + +import ( + "fmt" + "math" + "os" + "regexp" + "strconv" + "time" + "unicode" + "unicode/utf8" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// Configuration environment variables +const ( + // EnvAPISecEnabled is the env var used to enable API Security + EnvAPISecEnabled = "DD_API_SECURITY_ENABLED" + // EnvAPISecSampleRate is the env var used to set the sampling rate of API Security schema extraction. + // Deprecated: a new [APISecConfig.Sampler] is now used instead of this. + EnvAPISecSampleRate = "DD_API_SECURITY_REQUEST_SAMPLE_RATE" + // EnvAPISecProxySampleRate is the env var used to set the sampling rate of API Security schema extraction for proxies. + // The value represents the number of schemas extracted per minute (samples per minute). + EnvAPISecProxySampleRate = "DD_API_SECURITY_PROXY_SAMPLE_RATE" + // EnvObfuscatorKey is the env var used to provide the WAF key obfuscation regexp + EnvObfuscatorKey = "DD_APPSEC_OBFUSCATION_PARAMETER_KEY_REGEXP" + // EnvObfuscatorValue is the env var used to provide the WAF value obfuscation regexp + EnvObfuscatorValue = "DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP" + // EnvWAFTimeout is the env var used to specify the timeout value for a WAF run + EnvWAFTimeout = "DD_APPSEC_WAF_TIMEOUT" + // EnvTraceRateLimit is the env var used to set the ASM trace limiting rate + EnvTraceRateLimit = "DD_APPSEC_TRACE_RATE_LIMIT" + // EnvRules is the env var used to provide a path to a local security rule file + EnvRules = "DD_APPSEC_RULES" + // EnvRASPEnabled is the env var used to enable/disable RASP functionalities for ASM + EnvRASPEnabled = "DD_APPSEC_RASP_ENABLED" + + // envAPISecSampleDelay is the env var used to set the delay for the API Security sampler in system tests. + // It is not indended to be set by users. + envAPISecSampleDelay = "DD_API_SECURITY_SAMPLE_DELAY" +) + +// Configuration constants and default values +const ( + // DefaultAPISecSampleRate is the default rate at which API Security schemas are extracted from requests + DefaultAPISecSampleRate = .1 + // DefaultAPISecSampleInterval is the default interval between two samples being taken. + DefaultAPISecSampleInterval = 30 * time.Second + // DefaultAPISecProxySampleRate is the default rate (schemas per minute) at which API Security schemas are extracted from requests + DefaultAPISecProxySampleRate = 300 + // DefaultAPISecProxySampleInterval is the default time window for the API Security proxy sampler rate limiter. + DefaultAPISecProxySampleInterval = time.Minute + // DefaultObfuscatorKeyRegex is the default regexp used to obfuscate keys + DefaultObfuscatorKeyRegex = `(?i)pass|pw(?:or)?d|secret|(?:api|private|public|access)[_-]?key|token|consumer[_-]?(?:id|key|secret)|sign(?:ed|ature)|bearer|authorization|jsessionid|phpsessid|asp\.net[_-]sessionid|sid|jwt` + // DefaultObfuscatorValueRegex is the default regexp used to obfuscate values + DefaultObfuscatorValueRegex = `(?i)(?:p(?:ass)?w(?:or)?d|pass(?:[_-]?phrase)?|secret(?:[_-]?key)?|(?:(?:api|private|public|access)[_-]?)key(?:[_-]?id)?|(?:(?:auth|access|id|refresh)[_-]?)?token|consumer[_-]?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?|jsessionid|phpsessid|asp\.net(?:[_-]|-)sessionid|sid|jwt)(?:\s*=([^;&]+)|"\s*:\s*("[^"]+"|\d+))|bearer\s+([a-z0-9\._\-]+)|token\s*:\s*([a-z0-9]{13})|gh[opsu]_([0-9a-zA-Z]{36})|ey[I-L][\w=-]+\.(ey[I-L][\w=-]+(?:\.[\w.+\/=-]+)?)|[\-]{5}BEGIN[a-z\s]+PRIVATE\sKEY[\-]{5}([^\-]+)[\-]{5}END[a-z\s]+PRIVATE\sKEY|ssh-rsa\s*([a-z0-9\/\.+]{100,})` + // DefaultWAFTimeout is the default time limit past which a WAF run will timeout + DefaultWAFTimeout = time.Millisecond + // DefaultTraceRate is the default limit (trace/sec) past which ASM traces are sampled out + DefaultTraceRate = 100 // up to 100 appsec traces/s +) + +// APISecConfig holds the configuration for API Security schemas reporting. +// It is used to enabled/disable the feature. +type APISecConfig struct { + Sampler apisec.Sampler + Enabled bool + IsProxy bool + // Deprecated: use the new [APISecConfig.Sampler] instead. + SampleRate float64 +} + +// ObfuscatorConfig wraps the key and value regexp to be passed to the WAF to perform obfuscation. +type ObfuscatorConfig struct { + KeyRegex string + ValueRegex string +} + +type APISecOption func(*APISecConfig) + +// NewAPISecConfig creates and returns a new API Security configuration by reading the env +func NewAPISecConfig(opts ...APISecOption) APISecConfig { + cfg := APISecConfig{ + Enabled: internal.BoolEnv(EnvAPISecEnabled, true), + SampleRate: readAPISecuritySampleRate(), + } + for _, opt := range opts { + opt(&cfg) + } + + if cfg.Sampler != nil { + return cfg + } + + if cfg.IsProxy { + rate := internal.IntEnv(EnvAPISecProxySampleRate, DefaultAPISecProxySampleRate) + cfg.Sampler = apisec.NewProxySampler(rate, DefaultAPISecProxySampleInterval) + } else { + interval := internal.DurationEnvWithUnit(envAPISecSampleDelay, "s", DefaultAPISecSampleInterval) + cfg.Sampler = apisec.NewSampler(interval) + } + + return cfg +} + +func readAPISecuritySampleRate() float64 { + value := env.Get(EnvAPISecSampleRate) + if value == "" { + return DefaultAPISecSampleRate + } + + rate, err := strconv.ParseFloat(value, 64) + if err != nil { + logEnvVarParsingError(EnvAPISecSampleRate, value, err, DefaultAPISecSampleRate) + return DefaultAPISecSampleRate + } + // Clamp the value so that 0.0 <= rate <= 1.0 + if rate < 0. { + rate = 0. + } else if rate > 1. { + rate = 1. + } + return rate +} + +// WithAPISecSampler sets the sampler for the API Security configuration. This is useful for testing +// purposes. +func WithAPISecSampler(sampler apisec.Sampler) APISecOption { + return func(c *APISecConfig) { + c.Sampler = sampler + } +} + +// WithProxy configures API Security for a proxy environment. +func WithProxy() APISecOption { + return func(c *APISecConfig) { + c.IsProxy = true + } +} + +// RASPEnabled returns true if RASP functionalities are enabled through the env, or if DD_APPSEC_RASP_ENABLED +// is not set +func RASPEnabled() bool { + return internal.BoolEnv(EnvRASPEnabled, true) +} + +// NewObfuscatorConfig creates and returns a new WAF obfuscator configuration by reading the env +func NewObfuscatorConfig() ObfuscatorConfig { + keyRE := readObfuscatorConfigRegexp(EnvObfuscatorKey, DefaultObfuscatorKeyRegex) + valueRE := readObfuscatorConfigRegexp(EnvObfuscatorValue, DefaultObfuscatorValueRegex) + return ObfuscatorConfig{KeyRegex: keyRE, ValueRegex: valueRE} +} + +func readObfuscatorConfigRegexp(name, defaultValue string) string { + val, present := env.Lookup(name) + if !present { + log.Debug("appsec: %s not defined, starting with the default obfuscator regular expression", name) + return defaultValue + } + if _, err := regexp.Compile(val); err != nil { + logUnexpectedEnvVarValue(name, val, "could not compile the configured obfuscator regular expression", defaultValue) + return defaultValue + } + log.Debug("appsec: starting with the configured obfuscator regular expression %s", name) + return val +} + +// WAFTimeoutFromEnv reads and parses the WAF timeout value set through the env +// If not set, it defaults to `DefaultWAFTimeout` +func WAFTimeoutFromEnv() (timeout time.Duration) { + timeout = DefaultWAFTimeout + value := env.Get(EnvWAFTimeout) + if value == "" { + return + } + + // Check if the value ends with a letter, which means the user has + // specified their own time duration unit(s) such as 1s200ms. + // Otherwise, default to microseconds. + if lastRune, _ := utf8.DecodeLastRuneInString(value); !unicode.IsLetter(lastRune) { + value += "us" // Add the default microsecond time-duration suffix + } + + parsed, err := time.ParseDuration(value) + if err != nil { + logEnvVarParsingError(EnvWAFTimeout, value, err, timeout) + return + } + if parsed <= 0 { + logUnexpectedEnvVarValue(EnvWAFTimeout, parsed, "expecting a strictly positive duration", timeout) + return + } + return parsed +} + +// RateLimitFromEnv reads and parses the trace rate limit set through the env +// If not set, it defaults to `DefaultTraceRate` +func RateLimitFromEnv() (rate int64) { + rate = DefaultTraceRate + value := env.Get(EnvTraceRateLimit) + if value == "" { + return rate + } + parsed, err := strconv.ParseUint(value, 10, 0) + if err != nil { + logEnvVarParsingError(EnvTraceRateLimit, value, err, rate) + return + } + if parsed == 0 { + logUnexpectedEnvVarValue(EnvTraceRateLimit, parsed, "expecting a value strictly greater than 0", rate) + return + } + if parsed > math.MaxInt64 { + logUnexpectedEnvVarValue(EnvTraceRateLimit, parsed, "expecting a value less than or equal to math.MaxInt64", rate) + return + } + return int64(parsed) +} + +// RulesFromEnv returns the security rules provided through the environment +// If the env var is not set, the default recommended rules are returned instead +func RulesFromEnv() ([]byte, error) { + filepath := env.Get(EnvRules) + if filepath == "" { + log.Debug("appsec: using the default built-in recommended security rules") + return nil, nil + } + buf, err := os.ReadFile(filepath) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("appsec: could not find the rules file in path %s: %w", filepath, err) + } + return nil, err + } + log.Debug("appsec: using the security rules from file %s", filepath) + return buf, nil +} + +func logEnvVarParsingError(name, value string, err error, defaultValue any) { + //nolint:gocritic // we're trying to be helpful here... + log.Debug("appsec: could not parse the env var %s=%s as a duration: %v. Using default value %v.", name, value, err, defaultValue) +} + +func logUnexpectedEnvVarValue(name string, value any, reason string, defaultValue any) { + //nolint:gocritic // we're trying to be helpful here... + log.Debug("appsec: unexpected configuration value of %s=%v: %s. Using default value %v.", name, value, reason, defaultValue) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/config/wafmanager.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/config/wafmanager.go new file mode 100644 index 00000000..ea49fc30 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/config/wafmanager.go @@ -0,0 +1,203 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package config + +import ( + "bytes" + "encoding/json" + "runtime" + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + telemetryLog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" + "github.com/DataDog/go-libddwaf/v4" +) + +type ( + // WAFManager holds a [libddwaf.Builder] and allows managing its configuration. + WAFManager struct { + builder *libddwaf.Builder + staticRules []byte // nullable + rulesVersion string + closed bool + mu sync.RWMutex + } +) + +const defaultRulesPath = "ASM_DD/default" + +// NewWAFManager creates a new [WAFManager] with the provided [config.ObfuscatorConfig] and initial +// rules (if any). +func NewWAFManager(obfuscator ObfuscatorConfig) (*WAFManager, error) { + return NewWAFManagerWithStaticRules(obfuscator, nil) +} + +func NewWAFManagerWithStaticRules(obfuscator ObfuscatorConfig, staticRules []byte) (*WAFManager, error) { + builder, err := libddwaf.NewBuilder(obfuscator.KeyRegex, obfuscator.ValueRegex) + if err != nil { + return nil, err + } + + mgr := &WAFManager{ + builder: builder, + staticRules: staticRules, + } + + if err := mgr.RestoreDefaultConfig(); err != nil { + return nil, err + } + + // Attach a finalizer to close the builder when it is garbage collected, in case + // [WAFManager.Close] is not called explicitly by the user. The call to [libddwaf.Builder.Close] + // is safe to make multiple times. + runtime.SetFinalizer(mgr, func(m *WAFManager) { m.doClose(true) }) + + return mgr, nil +} + +// Reset resets the WAF manager to its initial state. +func (m *WAFManager) Reset() error { + for _, path := range m.ConfigPaths("") { + m.RemoveConfig(path) + } + return m.RestoreDefaultConfig() +} + +// ConfigPaths returns the list of configuration paths currently loaded in the receiving +// [WAFManager]. This is typically used for testing purposes. An optional filter regular expression +// can be provided to limit what paths are returned. +func (m *WAFManager) ConfigPaths(filter string) []string { + m.mu.RLock() + defer m.mu.RUnlock() + + return m.builder.ConfigPaths(filter) +} + +// NewHandle returns a new [*libddwaf.Handle] (which may be nil if no valid WAF could be built) and the +// version of the rules that were used to build it. +func (m *WAFManager) NewHandle() (*libddwaf.Handle, string) { + m.mu.RLock() + rulesVersion := m.rulesVersion + hdl := m.builder.Build() + m.mu.RUnlock() + return hdl, rulesVersion +} + +// Close releases all resources associated with this [WAFManager]. +func (m *WAFManager) Close() { + m.doClose(false) +} + +func (m *WAFManager) doClose(leaked bool) { + m.mu.Lock() + defer m.mu.Unlock() + + if m.closed { + return + } + if leaked { + telemetryLog.Warn("WAFManager was leaked and is being closed by GC. Remember to call WAFManager.Close() explicitly!") + } + + m.builder.Close() + m.rulesVersion = "" + m.closed = true +} + +// RemoveConfig removes a configuration from the receiving [WAFManager]. +func (m *WAFManager) RemoveConfig(path string) { + m.mu.Lock() + defer m.mu.Unlock() + m.builder.RemoveConfig(path) +} + +// RemoveDefaultConfig removes the initial configuration from the receiving [WAFManager]. Returns +// true if the default config was actually removed; false otherwise (e.g, if it had previously been +// removed, or there was no default config to begin with). +func (m *WAFManager) RemoveDefaultConfig() bool { + m.mu.Lock() + defer m.mu.Unlock() + + if m.staticRules != nil { + return m.builder.RemoveConfig(defaultRulesPath) + } + + return m.builder.RemoveDefaultRecommendedRuleset() +} + +// AddOrUpdateConfig adds or updates a configuration in the receiving [WAFManager]. +func (m *WAFManager) AddOrUpdateConfig(path string, fragment any) (libddwaf.Diagnostics, error) { + m.mu.Lock() + defer m.mu.Unlock() + diags, err := m.builder.AddOrUpdateConfig(path, fragment) + if err != nil { + return diags, err + } + + // Submit the telemetry metrics for error counts obtained from the [libddwaf.Diagnostics] object. + // See: https://docs.google.com/document/d/1lcCvURsWTS_p01-MvrI6SmDB309L1e8bx9txuUR1zCk/edit?tab=t.0#heading=h.nwzm8andnx41 + if diags.Version != "" { + m.rulesVersion = diags.Version + } + diags.EachFeature(updateTelemetryMetrics(m.rulesVersion)) + return diags, err +} + +// RestoreDefaultConfig restores the initial configurations to the receiving [WAFManager]. +func (m *WAFManager) RestoreDefaultConfig() error { + var diags libddwaf.Diagnostics + var err error + if m.staticRules == nil { + diags, err = m.builder.AddDefaultRecommendedRuleset() + } else { + var rules map[string]any + dec := json.NewDecoder(bytes.NewReader(m.staticRules)) + dec.UseNumber() + if err := dec.Decode(&rules); err != nil { + return err + } + diags, err = m.AddOrUpdateConfig(defaultRulesPath, rules) + } + if err != nil { + return err + } + + if diags.Version != "" { + m.rulesVersion = diags.Version + } + + diags.EachFeature(updateTelemetryMetrics(m.rulesVersion)) + diags.EachFeature(logLocalDiagnosticMessages) + return err +} + +func logLocalDiagnosticMessages(name string, feature *libddwaf.Feature) { + if feature.Error != "" { + telemetryLog.Error("%s", feature.Error, telemetry.WithTags([]string{"appsec_config_key:" + name, "log_type:local::diagnostic"})) + } + for msg, ids := range feature.Errors { + telemetryLog.Error("%s: %q", msg, ids, telemetry.WithTags([]string{"appsec_config_key:" + name, "log_type:local::diagnostic"})) + } + for msg, ids := range feature.Warnings { + telemetryLog.Warn("%s: %q", msg, ids, telemetry.WithTags([]string{"appsec_config_key:" + name, "log_type:local::diagnostic"})) + } +} + +func updateTelemetryMetrics(eventRulesVersion string) func(name string, feat *libddwaf.Feature) { + return func(name string, feat *libddwaf.Feature) { + errCount := telemetry.Count(telemetry.NamespaceAppSec, "waf.config_errors", []string{ + "waf_version:" + libddwaf.Version(), + "event_rules_version:" + eventRulesVersion, + "config_key:" + name, + "scope:item", + "action:update", + }) + errCount.Submit(0) + for _, ids := range feat.Errors { + errCount.Submit(float64(len(ids))) + } + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/usersec/user.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/usersec/user.go new file mode 100644 index 00000000..f03301cf --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/usersec/user.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package usersec + +import ( + "context" + "sync" + + "github.com/DataDog/dd-trace-go/v2/appsec/events" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +const errorLog = ` +appsec: user login monitoring ignored: could not find the http handler instrumentation metadata in the request context: + the request handler is not being monitored by a middleware function or the provided context is not the expected request context +` + +var errorLogOnce sync.Once + +type ( + // UserEventType is the type of user event, such as a successful login or a failed login or any other authenticated request. + UserEventType int + + // UserLoginOperation type representing a call to appsec.SetUser(). It gets both created and destroyed in a single + // call to ExecuteUserIDOperation + UserLoginOperation struct { + dyngo.Operation + EventType UserEventType + } + // UserLoginOperationArgs is the user ID operation arguments. + UserLoginOperationArgs struct{} + + // UserLoginOperationRes is the user ID operation results. + UserLoginOperationRes struct { + UserID string + UserLogin string + UserOrg string + SessionID string + } +) + +const ( + // UserLoginSuccess is the event type for a successful user login, when a new session or JWT is created. + UserLoginSuccess UserEventType = iota + // UserLoginFailure is the event type for a failed user login, when the user ID is not found or the password is incorrect. + UserLoginFailure + // UserSet is the event type for a user ID operation that is not a login, such as any authenticated request made by the user. + UserSet +) + +func StartUserLoginOperation(ctx context.Context, eventType UserEventType, args UserLoginOperationArgs) (*UserLoginOperation, *error) { + parent, ok := dyngo.FromContext(ctx) + if !ok { // Nothing will be reported in this case, but we can still block so we don't return + errorLogOnce.Do(func() { log.Error(errorLog) }) + } + + op := &UserLoginOperation{Operation: dyngo.NewOperation(parent), EventType: eventType} + var err error + dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { err = e }) + dyngo.StartOperation(op, args) + return op, &err +} + +func (op *UserLoginOperation) Finish(args UserLoginOperationRes) { + dyngo.FinishOperation(op, args) +} + +func (UserLoginOperationArgs) IsArgOf(*UserLoginOperation) {} +func (UserLoginOperationRes) IsResultOf(*UserLoginOperation) {} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/context.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/context.go new file mode 100644 index 00000000..f85cf7d8 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/context.go @@ -0,0 +1,187 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package waf + +import ( + "context" + "maps" + "slices" + "strings" + "sync" + "sync/atomic" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/limiter" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/stacktrace" + "github.com/DataDog/go-libddwaf/v4" +) + +type ( + ContextOperation struct { + dyngo.Operation + *trace.ServiceEntrySpanOperation + + // context is an atomic pointer to the current WAF context. + // Makes sure the calls to context.Run are safe. + context atomic.Pointer[libddwaf.Context] + // limiter comes from the WAF feature and is used to limit the number of events as a whole. + limiter limiter.Limiter + // events is where we store WAF events received from the WAF over the course of the request. + events []any + // stacks is where we store stack traces received from the WAF over the course of the request. + stacks []*stacktrace.Event + // derivatives is where we store any span tags generated by the WAF over the course of the request. + derivatives map[string]any + // supportedAddresses is the set of addresses supported by the WAF. + supportedAddresses config.AddressSet + // metrics the place that manages reporting for the current execution + metrics *ContextMetrics + // requestBlocked is used to track if the request has been requestBlocked by the WAF or not. + requestBlocked bool + // mu protects the events, stacks, and derivatives, supportedAddresses, eventRulesetVersion slices, and requestBlocked. + mu sync.Mutex + // logOnce is used to log a warning once when a request has too many WAF events via the built-in limiter or the max value. + logOnce sync.Once + } + + ContextArgs struct{} + + ContextRes struct{} + + // RunEvent is the type of event that should be emitted to child operations to run the WAF + RunEvent struct { + libddwaf.RunAddressData + dyngo.Operation + } + + // SecurityEvent is a dyngo data event sent when a security event is detected by the WAF + SecurityEvent struct{} +) + +func (ContextArgs) IsArgOf(*ContextOperation) {} +func (ContextRes) IsResultOf(*ContextOperation) {} + +func StartContextOperation(ctx context.Context, span trace.TagSetter) (*ContextOperation, context.Context) { + entrySpanOp, ctx := trace.StartServiceEntrySpanOperation(ctx, span) + op := &ContextOperation{ + Operation: dyngo.NewOperation(entrySpanOp), + ServiceEntrySpanOperation: entrySpanOp, + } + return op, dyngo.StartAndRegisterOperation(ctx, op, ContextArgs{}) +} + +func (op *ContextOperation) Finish() { + dyngo.FinishOperation(op, ContextRes{}) + op.ServiceEntrySpanOperation.Finish() +} + +func (op *ContextOperation) SwapContext(ctx *libddwaf.Context) *libddwaf.Context { + return op.context.Swap(ctx) +} + +func (op *ContextOperation) SetLimiter(limiter limiter.Limiter) { + op.limiter = limiter +} + +func (op *ContextOperation) SetMetricsInstance(metrics *ContextMetrics) { + op.metrics = metrics +} + +func (op *ContextOperation) GetMetricsInstance() *ContextMetrics { + return op.metrics +} + +func (op *ContextOperation) SetRequestBlocked() { + op.mu.Lock() + defer op.mu.Unlock() + op.requestBlocked = true +} + +// AddEvents adds WAF events to the operation and returns true if the operation has reached the maximum number of events, by the limiter or the max value. +func (op *ContextOperation) AddEvents(events ...any) bool { + if len(events) == 0 { + return false + } + + if !op.limiter.Allow() { + log.Error("appsec: too many WAF events, stopping further reporting") + return true + } + + op.mu.Lock() + defer op.mu.Unlock() + + const maxWAFEventsPerRequest = 10 + if len(op.events) >= maxWAFEventsPerRequest { + op.logOnce.Do(func() { + log.Warn("appsec: ignoring new WAF event due to the maximum number of security events per request was reached") + }) + return true + } + + op.events = append(op.events, events...) + return false +} + +func (op *ContextOperation) AddStackTraces(stacks ...*stacktrace.Event) { + if len(stacks) == 0 { + return + } + + op.mu.Lock() + defer op.mu.Unlock() + op.stacks = append(op.stacks, stacks...) +} + +func (op *ContextOperation) AbsorbDerivatives(derivatives map[string]any) { + if len(derivatives) == 0 { + return + } + + op.mu.Lock() + defer op.mu.Unlock() + if op.derivatives == nil { + op.derivatives = make(map[string]any, len(derivatives)) + } + + for k, v := range derivatives { + // If the request has been blocked, we don't want to report any derivatives representing the response schema. + if op.requestBlocked && strings.HasPrefix(k, "_dd.appsec.s.res.") { + continue + } + + op.derivatives[k] = v + } +} + +func (op *ContextOperation) Derivatives() map[string]any { + op.mu.Lock() + defer op.mu.Unlock() + return maps.Clone(op.derivatives) +} + +func (op *ContextOperation) Events() []any { + op.mu.Lock() + defer op.mu.Unlock() + return slices.Clone(op.events) +} + +func (op *ContextOperation) StackTraces() []*stacktrace.Event { + op.mu.Lock() + defer op.mu.Unlock() + return slices.Clone(op.stacks) +} + +func (op *ContextOperation) OnEvent(event RunEvent) { + op.Run(event.Operation, event.RunAddressData) +} + +func (op *ContextOperation) SetSupportedAddresses(addrs config.AddressSet) { + op.supportedAddresses = addrs +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/metrics.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/metrics.go new file mode 100644 index 00000000..e2d2a142 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/metrics.go @@ -0,0 +1,397 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package waf + +import ( + "errors" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + telemetrylog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" + "github.com/DataDog/go-libddwaf/v4" + "github.com/DataDog/go-libddwaf/v4/timer" + "github.com/DataDog/go-libddwaf/v4/waferrors" + "github.com/puzpuzpuz/xsync/v3" +) + +// newHandleTelemetryMetric is the name of the metric that will be used to track the initialization of the WAF handle +// this values is changed to waf.updates after the first call to [NewMetricsInstance] +var newHandleTelemetryMetric = "waf.init" +var changeToWafUpdates sync.Once + +// RequestMilestones is a list of things that can happen as a result of a waf call. They are stacked for each requests +// and used as tags to the telemetry metric `waf.requests`. +// this struct can be modified concurrently. +// TODO: add request_excluded and block_failure to the mix once we have the capability to track them +type RequestMilestones struct { + requestBlocked bool + ruleTriggered bool + wafTimeout bool + rateLimited bool + wafError bool + inputTruncated bool +} + +// raspMetricKey is used as a cache key for the metrics having tags depending on the RASP rule type +type raspMetricKey[T any] struct { + typ addresses.RASPRuleType + additionalTag T +} + +// HandleMetrics is a struct that holds all the telemetry metrics for the WAF that lives and die alongside with the WAF handle +// It basically serves as a big cache to not go through the telemetry package each time we want to submit a metric +// and have to recompute all tags that are static (from a WAF handle lifetime perspective) +type HandleMetrics struct { + baseTags []string + baseRASPTags [len(addresses.RASPRuleTypes)][]string + + // Common metric types + + // externalTimerDistributions holds the telemetry metrics for the `rasp.duration_ext`, `waf.duration_ext` metrics + externalTimerDistributions map[addresses.Scope]telemetry.MetricHandle + // internalTimerDistributions holds the telemetry metrics for the `rasp.duration`, `waf.duration` metrics + internalTimerDistributions map[addresses.Scope]telemetry.MetricHandle + + // wafRequestsCounts holds the telemetry metrics for the `waf.requests` metric, lazily filled + wafRequestsCounts *xsync.MapOf[RequestMilestones, telemetry.MetricHandle] + + // Uncommon metric types + + // raspTimeout holds the telemetry metrics for the rasp.timeout metrics since there is no waf.timeout metric + raspTimeout [len(addresses.RASPRuleTypes)]telemetry.MetricHandle + // raspRuleEval holds the telemetry metrics for the `rasp.rule_eval` metric by rule type + raspRuleEval [len(addresses.RASPRuleTypes)]telemetry.MetricHandle + + // Rare metric types + + // truncationCounts holds the telemetry metrics for the `waf.input_truncated` metric, lazily filled + truncationCounts *xsync.MapOf[libddwaf.TruncationReason, telemetry.MetricHandle] + // truncationDistributions holds the telemetry metrics for the `waf.truncated_value_size` metric, lazily filled + truncationDistributions *xsync.MapOf[libddwaf.TruncationReason, telemetry.MetricHandle] + + // Epic metric types + + // wafErrorCount holds the telemetry metrics for the `waf.error` metric, lazily filled + wafErrorCount *xsync.MapOf[int, telemetry.MetricHandle] + // raspErrorCount holds the telemetry metrics for the `rasp.error` metric, lazily filled + raspErrorCount *xsync.MapOf[raspMetricKey[int], telemetry.MetricHandle] + + // Legendary metric types + + // raspRuleMatch holds the telemetry metrics for the `rasp.rule.match` metric, lazily filled + raspRuleMatch *xsync.MapOf[raspMetricKey[string], telemetry.MetricHandle] +} + +var baseRASPTags = [len(addresses.RASPRuleTypes)][]string{ + addresses.RASPRuleTypeLFI: {"rule_type:" + addresses.RASPRuleTypeLFI.String()}, + addresses.RASPRuleTypeSSRF: {"rule_type:" + addresses.RASPRuleTypeSSRF.String()}, + addresses.RASPRuleTypeSQLI: {"rule_type:" + addresses.RASPRuleTypeSQLI.String()}, + addresses.RASPRuleTypeCMDI: {"rule_type:" + addresses.RASPRuleTypeCMDI.String(), "rule_variant:exec"}, +} + +// NewMetricsInstance creates a new HandleMetrics struct and submit the `waf.init` or `waf.updates` metric. To be called with the raw results of the WAF handle initialization +func NewMetricsInstance(newHandle *libddwaf.Handle, eventRulesVersion string) HandleMetrics { + telemetry.Count(telemetry.NamespaceAppSec, newHandleTelemetryMetric, []string{ + "waf_version:" + libddwaf.Version(), + "event_rules_version:" + eventRulesVersion, + "success:" + strconv.FormatBool(newHandle != nil), + }).Submit(1) + + changeToWafUpdates.Do(func() { + newHandleTelemetryMetric = "waf.updates" + }) + + baseTags := []string{ + "event_rules_version:" + eventRulesVersion, + "waf_version:" + libddwaf.Version(), + } + + metrics := HandleMetrics{ + baseTags: baseTags, + externalTimerDistributions: map[addresses.Scope]telemetry.MetricHandle{ + addresses.RASPScope: telemetry.Distribution(telemetry.NamespaceAppSec, "rasp.duration_ext", baseTags), + addresses.WAFScope: telemetry.Distribution(telemetry.NamespaceAppSec, "waf.duration_ext", baseTags), + }, + internalTimerDistributions: map[addresses.Scope]telemetry.MetricHandle{ + addresses.RASPScope: telemetry.Distribution(telemetry.NamespaceAppSec, "rasp.duration", baseTags), + addresses.WAFScope: telemetry.Distribution(telemetry.NamespaceAppSec, "waf.duration", baseTags), + }, + wafRequestsCounts: xsync.NewMapOf[RequestMilestones, telemetry.MetricHandle](xsync.WithGrowOnly(), xsync.WithPresize(2^6)), + truncationCounts: xsync.NewMapOf[libddwaf.TruncationReason, telemetry.MetricHandle](xsync.WithGrowOnly(), xsync.WithPresize(2^3)), + truncationDistributions: xsync.NewMapOf[libddwaf.TruncationReason, telemetry.MetricHandle](xsync.WithGrowOnly(), xsync.WithPresize(2^2)), + wafErrorCount: xsync.NewMapOf[int, telemetry.MetricHandle](xsync.WithGrowOnly(), xsync.WithPresize(2^3)), + raspErrorCount: xsync.NewMapOf[raspMetricKey[int], telemetry.MetricHandle](xsync.WithGrowOnly(), xsync.WithPresize(2^3)), + raspRuleMatch: xsync.NewMapOf[raspMetricKey[string], telemetry.MetricHandle](xsync.WithGrowOnly(), xsync.WithPresize(2^3)), + } + + for ruleType := range metrics.baseRASPTags { + tags := make([]string, len(baseRASPTags[ruleType])+len(baseTags)) + copy(tags, baseRASPTags[ruleType]) + copy(tags[len(baseRASPTags[ruleType]):], baseTags) + metrics.baseRASPTags[ruleType] = tags + } + + for ruleType := range metrics.raspRuleEval { + metrics.raspRuleEval[ruleType] = telemetry.Count(telemetry.NamespaceAppSec, "rasp.rule.eval", metrics.baseRASPTags[ruleType]) + } + + for ruleType := range metrics.raspTimeout { + metrics.raspTimeout[ruleType] = telemetry.Count(telemetry.NamespaceAppSec, "rasp.timeout", metrics.baseRASPTags[ruleType]) + } + + return metrics +} + +func (m *HandleMetrics) NewContextMetrics() *ContextMetrics { + return &ContextMetrics{ + HandleMetrics: m, + SumDurations: map[addresses.Scope]map[timer.Key]*atomic.Int64{ + addresses.WAFScope: { + libddwaf.EncodeTimeKey: &atomic.Int64{}, + libddwaf.DurationTimeKey: &atomic.Int64{}, + libddwaf.DecodeTimeKey: &atomic.Int64{}, + }, + addresses.RASPScope: { + libddwaf.EncodeTimeKey: &atomic.Int64{}, + libddwaf.DurationTimeKey: &atomic.Int64{}, + libddwaf.DecodeTimeKey: &atomic.Int64{}, + }, + }, + } +} + +type ContextMetrics struct { + *HandleMetrics + + // SumRASPCalls is the sum of all the RASP calls made by the WAF whatever the rasp rule type it is. + SumRASPCalls atomic.Uint32 + // SumWAFErrors is the sum of all the WAF errors that happened not in the RASP scope. + SumWAFErrors atomic.Uint32 + // SumRASPErrors is the sum of all the RASP errors that happened in the RASP scope. + SumRASPErrors atomic.Uint32 + + // SumWAFTimeouts is the sum of all the WAF timeouts that happened not in the RASP scope. + SumWAFTimeouts atomic.Uint32 + + // SumRASPTimeouts is the sum of all the RASP timeouts that happened in the RASP scope by rule type. + SumRASPTimeouts [len(addresses.RASPRuleTypes)]atomic.Uint32 + + // SumDurations is the sum of all the run durations calls to ddwaf_run behind go-libddwaf + // This map is built statically when ContextMetrics is created and readonly after that. + SumDurations map[addresses.Scope]map[timer.Key]*atomic.Int64 + + // Milestones are the tags of the metric `waf.requests` that will be submitted at the end of the waf context + Milestones RequestMilestones +} + +// Submit increment the metrics for the WAF run stats at the end of each waf context lifecycle +// It registers the metrics: +// - `waf.duration_ext` and `rasp.duration_ext` using [libddwaf.Context.Timer] +// - `waf.duration` and `rasp.duration` using [libddwaf.Result.TimerStats] accumulated in the ContextMetrics +// - `rasp.timeout` for the RASP scope using [libddwaf.Stats.TimeoutRASPCount] +// - `waf.input_truncated` and `waf.truncated_value_size` for the truncations using [libddwaf.Stats.Truncations] +// - `waf.requests` for the milestones using [ContextMetrics.Milestones] +func (m *ContextMetrics) Submit(truncations map[libddwaf.TruncationReason][]int, timerStats map[timer.Key]time.Duration) { + for scope, value := range timerStats { + // Add metrics `{waf,rasp}.duration_ext` + metric, found := m.externalTimerDistributions[scope] + if !found { + telemetrylog.Error("unexpected scope name: %s", scope, telemetry.WithTags([]string{"product:appsec"})) + continue + } + + metric.Submit(float64(value) / float64(time.Microsecond.Nanoseconds())) + + // Add metrics `{waf,rasp}.duration` + for key, value := range m.SumDurations[scope] { + if key != libddwaf.DurationTimeKey { + continue + } + + if metric, found := m.internalTimerDistributions[scope]; found { + metric.Submit(float64(value.Load()) / float64(time.Microsecond.Nanoseconds())) + } + } + } + + for ruleTyp := range m.SumRASPTimeouts { + if nbTimeouts := m.SumRASPTimeouts[ruleTyp].Load(); nbTimeouts > 0 { + m.raspTimeout[ruleTyp].Submit(float64(nbTimeouts)) + } + } + + var truncationTypes libddwaf.TruncationReason + for reason, sizes := range truncations { + truncationTypes |= reason + handle, _ := m.truncationDistributions.LoadOrCompute(reason, func() telemetry.MetricHandle { + return telemetry.Distribution(telemetry.NamespaceAppSec, "waf.truncated_value_size", []string{"truncation_reason:" + strconv.Itoa(int(reason))}) + }) + for _, size := range sizes { + handle.Submit(float64(size)) + } + } + + if truncationTypes != 0 { + handle, _ := m.truncationCounts.LoadOrCompute(truncationTypes, func() telemetry.MetricHandle { + return telemetry.Count(telemetry.NamespaceAppSec, "waf.input_truncated", []string{"truncation_reason:" + strconv.Itoa(int(truncationTypes))}) + }) + handle.Submit(1) + } + + if len(truncations) > 0 { + m.Milestones.inputTruncated = true + } + + m.incWafRequestsCounts() +} + +// incWafRequestsCounts increments the `waf.requests` metric with the current milestones and creates a new metric handle if it does not exist +func (m *ContextMetrics) incWafRequestsCounts() { + handle, _ := m.wafRequestsCounts.LoadOrCompute(m.Milestones, func() telemetry.MetricHandle { + return telemetry.Count(telemetry.NamespaceAppSec, "waf.requests", append([]string{ + "request_blocked:" + strconv.FormatBool(m.Milestones.requestBlocked), + "rule_triggered:" + strconv.FormatBool(m.Milestones.ruleTriggered), + "waf_timeout:" + strconv.FormatBool(m.Milestones.wafTimeout), + "rate_limited:" + strconv.FormatBool(m.Milestones.rateLimited), + "waf_error:" + strconv.FormatBool(m.Milestones.wafError), + "input_truncated:" + strconv.FormatBool(m.Milestones.inputTruncated), + }, m.baseTags...)) + }) + + handle.Submit(1) +} + +// RegisterWafRun register the different outputs of the WAF for the `waf.requests` and also directly increment the `rasp.rule.match` and `rasp.rule.eval` metrics. +// It registers the metrics: +// - `rasp.rule.match` +// - `rasp.rule.eval` +// It accumulate data for: +// - `waf.requests` +// - `rasp.duration` +// - `waf.duration` +func (m *ContextMetrics) RegisterWafRun(addrs libddwaf.RunAddressData, timerStats map[timer.Key]time.Duration, tags RequestMilestones) { + for key, value := range timerStats { + m.SumDurations[addrs.TimerKey][key].Add(int64(value)) + } + + switch addrs.TimerKey { + case addresses.RASPScope: + m.SumRASPCalls.Add(1) + ruleType, ok := addresses.RASPRuleTypeFromAddressSet(addrs) + if !ok { + telemetrylog.Error("unexpected call to RASPRuleTypeFromAddressSet", telemetry.WithTags([]string{"product:appsec"})) + return + } + if metric := m.raspRuleEval[ruleType]; metric != nil { + metric.Submit(1) + } + if tags.ruleTriggered { + blockTag := "block:irrelevant" + if tags.requestBlocked { // TODO: add block:failure to the mix + blockTag = "block:success" + } + + handle, _ := m.raspRuleMatch.LoadOrCompute(raspMetricKey[string]{typ: ruleType, additionalTag: blockTag}, func() telemetry.MetricHandle { + return telemetry.Count(telemetry.NamespaceAppSec, "rasp.rule.match", append([]string{ + blockTag, + }, m.baseRASPTags[ruleType]...)) + }) + + handle.Submit(1) + } + if tags.wafTimeout { + m.SumRASPTimeouts[ruleType].Add(1) + } + case addresses.WAFScope, "": + if tags.requestBlocked { + m.Milestones.requestBlocked = true + } + if tags.ruleTriggered { + m.Milestones.ruleTriggered = true + } + if tags.wafTimeout { + m.Milestones.wafTimeout = true + m.SumWAFTimeouts.Add(1) + } + if tags.rateLimited { + m.Milestones.rateLimited = true + } + if tags.wafError { + m.Milestones.wafError = true + } + default: + telemetrylog.Error("unexpected scope name: %s", addrs.TimerKey, telemetry.WithTags([]string{"product:appsec"})) + } +} + +// IncWafError should be called if go-libddwaf.(*Context).Run() returns an error to increments metrics linked to WAF errors +// It registers the metrics: +// - `waf.error` +// - `rasp.error` +func (m *ContextMetrics) IncWafError(addrs libddwaf.RunAddressData, in error) { + if in == nil { + return + } + + if !errors.Is(in, waferrors.ErrTimeout) { + telemetrylog.Error("unexpected WAF error: %s", in, telemetry.WithTags(append([]string{ + "product:appsec", + }, m.baseTags...))) + } + + switch addrs.TimerKey { + case addresses.RASPScope: + ruleType, ok := addresses.RASPRuleTypeFromAddressSet(addrs) + if !ok { + telemetrylog.Error("unexpected call to RASPRuleTypeFromAddressSet: %s", in, telemetry.WithTags([]string{"product:appsec"})) + } + m.raspError(in, ruleType) + case addresses.WAFScope, "": + m.wafError(in) + default: + telemetrylog.Error("unexpected scope name: %s", addrs.TimerKey, telemetry.WithTags([]string{"product:appsec"})) + } +} + +// defaultWafErrorCode is the default error code if the error does not implement [libddwaf.RunError] +// meaning if the error actual come for the bindings and not from the WAF itself +const defaultWafErrorCode = -127 + +func (m *ContextMetrics) wafError(in error) { + m.SumWAFErrors.Add(1) + errCode := defaultWafErrorCode + if code := waferrors.ToWafErrorCode(in); code != 0 { + errCode = code + } + + handle, _ := m.wafErrorCount.LoadOrCompute(errCode, func() telemetry.MetricHandle { + return telemetry.Count(telemetry.NamespaceAppSec, "waf.error", append([]string{ + "error_code:" + strconv.Itoa(errCode), + }, m.baseTags...)) + }) + + handle.Submit(1) +} + +func (m *ContextMetrics) raspError(in error, ruleType addresses.RASPRuleType) { + m.SumRASPErrors.Add(1) + errCode := defaultWafErrorCode + if code := waferrors.ToWafErrorCode(in); code != 0 { + errCode = code + } + + handle, _ := m.raspErrorCount.LoadOrCompute(raspMetricKey[int]{typ: ruleType, additionalTag: errCode}, func() telemetry.MetricHandle { + return telemetry.Count(telemetry.NamespaceAppSec, "rasp.error", append([]string{ + "error_code:" + strconv.Itoa(errCode), + }, m.baseRASPTags[ruleType]...)) + }) + + handle.Submit(1) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/run.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/run.go new file mode 100644 index 00000000..b12c3319 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/run.go @@ -0,0 +1,91 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package waf + +import ( + "context" + "errors" + "maps" + + "github.com/DataDog/dd-trace-go/v2/appsec/events" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" + "github.com/DataDog/go-libddwaf/v4" + "github.com/DataDog/go-libddwaf/v4/waferrors" +) + +// Run runs the WAF with the given address data and sends the results to the event receiver +// the event receiver can be the same os the method receiver but not always +// the event receiver is the one that will receive the actions events generated by the WAF +func (op *ContextOperation) Run(eventReceiver dyngo.Operation, addrs libddwaf.RunAddressData) { + ctx := op.context.Load() + if ctx == nil { // Context was closed concurrently + return + } + + // Remove unsupported addresses in case the listener was registered but some addresses are still unsupported + // Technically the WAF does this step for us but doing this check before calling the WAF makes us skip encoding huge + // values that may be discarded by the WAF afterward. + // e.g. gRPC response body address that is not in the default ruleset but will still be sent to the WAF and may be huge + for _, addrType := range []map[string]any{addrs.Persistent, addrs.Ephemeral} { + maps.DeleteFunc(addrType, func(key string, _ any) bool { + _, ok := op.supportedAddresses[key] + return !ok + }) + } + + result, err := ctx.Run(addrs) + if errors.Is(err, waferrors.ErrTimeout) { + log.Debug("appsec: WAF timeout value reached: %s", err.Error()) + } + + op.metrics.IncWafError(addrs, err) + + wafTimeout := errors.Is(err, waferrors.ErrTimeout) + rateLimited := op.AddEvents(result.Events...) + blocking := actions.SendActionEvents(eventReceiver, result.Actions) + op.AbsorbDerivatives(result.Derivatives) + + // Set the trace to ManualKeep if the WAF instructed us to keep it. + if result.Keep { + op.SetTag(ext.ManualKeep, samplernames.AppSec) + } + + if result.HasEvents() { + dyngo.EmitData(op, &SecurityEvent{}) + } + + op.metrics.RegisterWafRun(addrs, result.TimerStats, RequestMilestones{ + requestBlocked: blocking, + ruleTriggered: result.HasEvents(), + wafTimeout: wafTimeout, + rateLimited: rateLimited, + wafError: err != nil && !wafTimeout, + }) +} + +// RunSimple runs the WAF with the given address data and returns an error that should be forwarded to the caller +func RunSimple(ctx context.Context, addrs libddwaf.RunAddressData, errorLog string) error { + parent, _ := dyngo.FromContext(ctx) + if parent == nil { + log.Error("%s", errorLog) + return nil + } + + var err error + op := dyngo.NewOperation(parent) + dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { + err = e + }) + dyngo.EmitData(op, RunEvent{ + Operation: op, + RunAddressData: addrs, + }) + return err +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/features.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/features.go new file mode 100644 index 00000000..b6b7c0fb --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/features.go @@ -0,0 +1,85 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package appsec + +import ( + "errors" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/graphqlsec" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/grpcsec" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/ossec" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/sqlsec" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/trace" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/usersec" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +var features = []listener.NewFeature{ + trace.NewAppsecSpanTransport, + waf.NewWAFFeature, + httpsec.NewHTTPSecFeature, + grpcsec.NewGRPCSecFeature, + graphqlsec.NewGraphQLSecFeature, + usersec.NewUserSecFeature, + sqlsec.NewSQLSecFeature, + ossec.NewOSSecFeature, + httpsec.NewSSRFProtectionFeature, +} + +func (a *appsec) SwapRootOperation() error { + newRoot := dyngo.NewRootOperation() + newFeatures := make([]listener.Feature, 0, len(features)) + var featureErrors []error + for _, newFeature := range features { + feature, err := newFeature(a.cfg, newRoot) + if err != nil { + featureErrors = append(featureErrors, err) + continue + } + + // If error is nil and feature is nil, it means the feature did not activate itself + if feature == nil { + continue + } + + newFeatures = append(newFeatures, feature) + } + + err := errors.Join(featureErrors...) + if err != nil { + for _, feature := range newFeatures { + feature.Stop() + } + return err + } + + a.featuresMu.Lock() + defer a.featuresMu.Unlock() + + oldFeatures := a.features + a.features = newFeatures + + if len(oldFeatures) > 0 { + log.Debug("appsec: stopping the following features: %q", oldFeatures) + } + if len(newFeatures) > 0 { + log.Debug("appsec: starting the following features: %q", newFeatures) + } + + dyngo.SwapRootOperation(newRoot) + + log.Debug("appsec: swapped root operation") + + for _, oldFeature := range oldFeatures { + oldFeature.Stop() + } + + return nil +} diff --git a/vendor/github.com/DataDog/appsec-internal-go/limiter/limiter.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/limiter/limiter.go similarity index 93% rename from vendor/github.com/DataDog/appsec-internal-go/limiter/limiter.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/limiter/limiter.go index f1f16d36..625c7d98 100644 --- a/vendor/github.com/DataDog/appsec-internal-go/limiter/limiter.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/limiter/limiter.go @@ -7,9 +7,8 @@ package limiter import ( - "time" - "sync/atomic" + "time" ) // Limiter is used to abstract the rate limiter implementation to only expose the needed function for rate limiting. @@ -28,14 +27,21 @@ type Limiter interface { type TokenTicker struct { tokens atomic.Int64 // The amount of tokens currently available maxTokens int64 // The maximum amount of tokens the bucket can hold + interval time.Duration // The interval at which the tokens are refilled ticker *time.Ticker // The ticker used to update the bucket (nil if not started yet) stopChan chan struct{} // The channel to stop the ticker updater (nil if not started yet) } // NewTokenTicker is a utility function that allocates a token ticker, initializes necessary fields and returns it func NewTokenTicker(tokens, maxTokens int64) *TokenTicker { + return NewTokenTickerWithInterval(tokens, maxTokens, time.Second) +} + +// NewTokenTickerWithInterval is a utility function that allocates a token ticker with a custom interval +func NewTokenTickerWithInterval(tokens, maxTokens int64, interval time.Duration) *TokenTicker { t := &TokenTicker{ maxTokens: maxTokens, + interval: interval, } t.tokens.Store(tokens) return t @@ -44,7 +50,7 @@ func NewTokenTicker(tokens, maxTokens int64) *TokenTicker { // updateBucket performs a select loop to update the token amount in the bucket. // Used in a goroutine by the rate limiter. func (t *TokenTicker) updateBucket(startTime time.Time, ticksChan <-chan time.Time, stopChan <-chan struct{}, syncChan chan<- struct{}) { - nsPerToken := time.Second.Nanoseconds() / t.maxTokens + nsPerToken := t.interval.Nanoseconds() / t.maxTokens elapsedNs := int64(0) prevStamp := startTime diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/feature.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/feature.go new file mode 100644 index 00000000..d22067cd --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/feature.go @@ -0,0 +1,24 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package listener + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" +) + +// Feature is an interface that represents a feature that can be started and stopped. +type Feature interface { + // String should return a user-friendly name for the feature. + String() string + // Stop stops the feature. + Stop() +} + +// NewFeature is a function that creates a new feature. +// The error returned will be fatal for the application if not nil. +// If both the feature and the error are nil, the feature will be considered inactive. +type NewFeature func(*config.Config, dyngo.Operation) (Feature, error) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/graphqlsec/graphql.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/graphqlsec/graphql.go new file mode 100644 index 00000000..4de4b953 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/graphqlsec/graphql.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package graphqlsec + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/graphqlsec" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" +) + +type Feature struct{} + +func (*Feature) String() string { + return "GraphQL Security" +} + +func (*Feature) Stop() {} + +func (f *Feature) OnResolveField(op *graphqlsec.ResolveOperation, args graphqlsec.ResolveOperationArgs) { + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: addresses.NewAddressesBuilder(). + WithGraphQLResolver(args.FieldName, args.Arguments). + Build(), + }) +} + +func NewGraphQLSecFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !config.SupportedAddresses.AnyOf(addresses.GraphQLServerResolverAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.On(rootOp, feature.OnResolveField) + + return feature, nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/grpcsec/grpc.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/grpcsec/grpc.go new file mode 100644 index 00000000..13222a9d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/grpcsec/grpc.go @@ -0,0 +1,75 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package grpcsec + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/grpcsec" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +type Feature struct{} + +func (*Feature) String() string { + return "gRPC Security" +} + +func (*Feature) Stop() {} + +func NewGRPCSecFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !config.SupportedAddresses.AnyOf( + addresses.ClientIPAddr, + addresses.GRPCServerMethodAddr, + addresses.GRPCServerRequestMessageAddr, + addresses.GRPCServerRequestMetadataAddr, + addresses.GRPCServerResponseMessageAddr, + addresses.GRPCServerResponseMetadataHeadersAddr, + addresses.GRPCServerResponseMetadataTrailersAddr, + addresses.GRPCServerResponseStatusCodeAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.On(rootOp, feature.OnStart) + dyngo.OnFinish(rootOp, feature.OnFinish) + return feature, nil +} + +func (f *Feature) OnStart(op *grpcsec.HandlerOperation, args grpcsec.HandlerOperationArgs) { + ipTags, clientIP := httpsec.ClientIPTags(args.Metadata, false, args.RemoteAddr) + log.Debug("appsec: http client ip detection returned `%s`", clientIP) + + op.SetStringTags(ipTags) + + SetRequestMetadataTags(op, args.Metadata) + + op.Run(op, + addresses.NewAddressesBuilder(). + WithGRPCMethod(args.Method). + WithGRPCRequestMetadata(args.Metadata). + WithClientIP(clientIP). + Build(), + ) +} + +func (f *Feature) OnFinish(op *grpcsec.HandlerOperation, res grpcsec.HandlerOperationRes) { + op.Run(op, + addresses.NewAddressesBuilder(). + WithGRPCResponseStatusCode(res.StatusCode). + Build(), + ) +} + +func SetRequestMetadataTags(span trace.TagSetter, metadata map[string][]string) { + for h, v := range httpsec.NormalizeHTTPHeaders(metadata) { + span.SetTag("grpc.metadata."+h, v) + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/clientip.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/clientip.go new file mode 100644 index 00000000..84350ac8 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/clientip.go @@ -0,0 +1,153 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package httpsec + +import ( + "net" + "net/netip" + "net/textproto" + "regexp" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/theckman/httpforwarded" +) + +// ClientIP returns the first public IP address found in the given headers. If +// none is present, it returns the first valid IP address present, possibly +// being a local IP address. The remote address, when valid, is used as fallback +// when no IP address has been found at all. +func ClientIP(hdrs map[string][]string, hasCanonicalHeaders bool, remoteAddr string, monitoredHeaders []string) (remoteIP, clientIP netip.Addr) { + // Walk IP-related headers + var foundIP netip.Addr +headersLoop: + for _, headerName := range monitoredHeaders { + if hasCanonicalHeaders { + headerName = textproto.CanonicalMIMEHeaderKey(headerName) + } + + headerValues, exists := hdrs[headerName] + if !exists { + continue // this monitored header is not present + } + + // Assuming a list of comma-separated IP addresses, split them and build + // the list of values to try to parse as IP addresses + var ips []string + for _, headerValue := range headerValues { + if strings.ToLower(headerName) == "forwarded" { + ips = append(ips, parseForwardedHeader(headerValue)...) + } else { + ips = append(ips, strings.Split(headerValue, ",")...) + } + } + + // Look for the first valid or global IP address in the comma-separated list + for _, ipstr := range ips { + ip := parseIP(strings.TrimSpace(ipstr)) + if !ip.IsValid() { + continue + } + // Replace foundIP if still not valid in order to keep the oldest + if !foundIP.IsValid() { + foundIP = ip + } + if isGlobalIP(ip) { + foundIP = ip + break headersLoop + } + } + } + + // Decide which IP address is the client one by starting with the remote IP + if ip := parseIP(remoteAddr); ip.IsValid() { + remoteIP = ip + clientIP = ip + } + + // The IP address found in the headers supersedes a private remote IP address. + if foundIP.IsValid() && !isGlobalIP(remoteIP) || isGlobalIP(foundIP) { + clientIP = foundIP + } + + return remoteIP, clientIP +} + +var ( + forwardedPortRe = regexp.MustCompile(`^(?:\[([a-f0-9:]+)\]|(\d+\.\d+\.\d+\.\d+))(?::\d+)?$`) +) + +// parseForwardedHeader parses the value of the `Forwarded` header, returning +// the values of all `for` directives it contains, in the order they appear. +// Values may not always be IP addresses; but those values that are will have +// any quoting and port information removed. +// +// If the value is found to be syntactically incorrect, a null slice is returned. +// +// See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Forwarded +func parseForwardedHeader(value string) []string { + result, err := httpforwarded.ParseParameter("for", []string{value}) + if err != nil { + log.Debug("invalid Forwarded header value: %v", err.Error()) + return nil + } + + for idx, val := range result { + matches := forwardedPortRe.FindStringSubmatch(val) + if matches == nil { + continue + } + // Remove the port information from the value, and un-brace IPv6 addresses. + if matches[1] != "" { + result[idx] = matches[1] + } else { + result[idx] = matches[2] + } + } + + return result +} + +func parseIP(s string) netip.Addr { + if ip, err := netip.ParseAddr(s); err == nil { + return ip + } + if h, _, err := net.SplitHostPort(s); err == nil { + if ip, err := netip.ParseAddr(h); err == nil { + return ip + } + } + return netip.Addr{} +} + +var ( + ipv6SpecialNetworks = [...]netip.Prefix{ + netip.MustParsePrefix("fec0::/10"), // site local + } + + // This IP block is not routable on internet and an industry standard/trend + // is emerging to use it for traditional IT-managed networking environments + // with limited RFC1918 space allocations. This is also frequently used by + // kubernetes pods' internal networking. It is hence deemed private for the + // purpose of Client IP extraction. + k8sInternalIPv4Prefix = netip.MustParsePrefix("100.65.0.0/10") +) + +func isGlobalIP(ip netip.Addr) bool { + // IsPrivate also checks for ipv6 ULA. + // We care to check for these addresses are not considered public, hence not global. + // See https://www.rfc-editor.org/rfc/rfc4193.txt for more details. + isGlobal := ip.IsValid() && !ip.IsPrivate() && !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !k8sInternalIPv4Prefix.Contains(ip) + if !isGlobal || !ip.Is6() { + return isGlobal + } + for _, n := range ipv6SpecialNetworks { + if n.Contains(ip) { + return false + } + } + return isGlobal +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/http.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/http.go new file mode 100644 index 00000000..f3652a5b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/http.go @@ -0,0 +1,150 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package httpsec + +import ( + "net/netip" + "strings" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/apisec" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +type Feature struct { + APISec config.APISecConfig + ForceKeepWhenGeneratingSchema bool +} + +func (*Feature) String() string { + return "HTTP Security" +} + +func (*Feature) Stop() {} + +func NewHTTPSecFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !config.SupportedAddresses.AnyOf(addresses.ServerRequestMethodAddr, + addresses.ServerRequestRawURIAddr, + addresses.ServerRequestHeadersNoCookiesAddr, + addresses.ServerRequestCookiesAddr, + addresses.ServerRequestQueryAddr, + addresses.ServerRequestPathParamsAddr, + addresses.ServerRequestBodyAddr, + addresses.ServerResponseBodyAddr, + addresses.ServerResponseStatusAddr, + addresses.ServerResponseHeadersNoCookiesAddr, + addresses.ClientIPAddr, + ) { + // We extract headers even when the security features are not enabled... + feature := &HeaderExtractionFeature{} + dyngo.On(rootOp, feature.OnRequest) + dyngo.OnFinish(rootOp, feature.OnResponse) + return feature, nil + } + + feature := &Feature{ + APISec: config.APISec, + ForceKeepWhenGeneratingSchema: config.TracingAsTransport, + } + + dyngo.On(rootOp, feature.OnRequest) + dyngo.OnFinish(rootOp, feature.OnResponse) + return feature, nil +} + +func (feature *Feature) OnRequest(op *httpsec.HandlerOperation, args httpsec.HandlerOperationArgs) { + headers, ip := extractRequestHeaders(op, args) + + op.Run(op, + addresses.NewAddressesBuilder(). + WithMethod(args.Method). + WithRawURI(args.RequestURI). + WithHeadersNoCookies(headers). + WithCookies(args.Cookies). + WithQuery(args.QueryParams). + WithPathParams(args.PathParams). + WithClientIP(ip). + Build(), + ) +} + +func (feature *Feature) OnResponse(op *httpsec.HandlerOperation, resp httpsec.HandlerOperationRes) { + headers := extractResponseHeaders(op, resp) + + builder := addresses.NewAddressesBuilder(). + WithResponseHeadersNoCookies(headers). + WithResponseStatus(resp.StatusCode) + + if feature.shouldExtractShema(op, resp.StatusCode) { + builder = builder.ExtractSchema() + + if feature.ForceKeepWhenGeneratingSchema { + op.SetTag(ext.ManualKeep, samplernames.AppSec) + } + } + + op.Run(op, builder.Build()) + + metric := "no_schema" + for k := range op.Derivatives() { + if strings.HasPrefix(k, "_dd.appsec.s.") { + metric = "schema" + break + } + } + telemetry.Count(telemetry.NamespaceAppSec, "api_security.request."+metric, []string{"framework:" + op.Framework()}).Submit(1) +} + +// shouldExtractShema checks that API Security is enabled and that sampling rate +// allows extracting schemas +func (feature *Feature) shouldExtractShema(op *httpsec.HandlerOperation, statusCode int) bool { + return feature.APISec.Enabled && + feature.APISec.Sampler.DecisionFor(apisec.SamplingKey{ + Method: op.Method(), + Route: op.Route(), + StatusCode: statusCode, + }) +} + +type HeaderExtractionFeature struct{} + +func (*HeaderExtractionFeature) String() string { + return "HTTP Header Extraction" +} + +func (*HeaderExtractionFeature) Stop() {} + +func (*HeaderExtractionFeature) OnRequest(op *httpsec.HandlerOperation, args httpsec.HandlerOperationArgs) { + _, _ = extractRequestHeaders(op, args) +} + +func (*HeaderExtractionFeature) OnResponse(op *httpsec.HandlerOperation, resp httpsec.HandlerOperationRes) { + _ = extractResponseHeaders(op, resp) +} + +func extractRequestHeaders(op *httpsec.HandlerOperation, args httpsec.HandlerOperationArgs) (map[string][]string, netip.Addr) { + tags, ip := ClientIPTags(args.Headers, true, args.RemoteAddr) + + op.SetStringTags(tags) + headers := headersRemoveCookies(args.Headers) + headers["host"] = []string{args.Host} + + setRequestHeadersTags(op, headers) + + return headers, ip +} + +func extractResponseHeaders(op *httpsec.HandlerOperation, resp httpsec.HandlerOperationRes) map[string][]string { + headers := headersRemoveCookies(resp.Headers) + setResponseHeadersTags(op, headers) + return headers +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/request.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/request.go new file mode 100644 index 00000000..0068da32 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/request.go @@ -0,0 +1,186 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package httpsec + +import ( + "net/http" + "net/netip" + "strings" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace" + "github.com/DataDog/dd-trace-go/v2/internal/env" +) + +const ( + // envClientIPHeader is the name of the env var used to specify the IP header to be used for client IP collection. + envClientIPHeader = "DD_TRACE_CLIENT_IP_HEADER" +) + +var ( + // defaultIPHeaders is the default list of IP-related headers leveraged to + // retrieve the public client IP address in RemoteAddr. The headers are + // checked in the order they are listed; do not re-order unless you know what + // you are doing. + defaultIPHeaders = []string{ + "x-forwarded-for", + "x-real-ip", + "true-client-ip", + "x-client-ip", + "forwarded", + "forwarded-for", + "x-cluster-client-ip", + "fastly-client-ip", + "cf-connecting-ip", + "cf-connecting-ipv6", + } + + // defaultCollectedHeaders is the default list of HTTP headers collected as + // request span tags when appsec is enabled. + defaultCollectedHeaders = append([]string{ + "accept-encoding", + "accept-language", + "accept", + "akamai-user-risk", + "cf-ray", + "cloudfront-viewer-ja3-fingerprint", + "content-encoding", + "content-language", + "content-length", + "content-type", + "host", + "user-agent", + "via", + "x-amzn-trace-id", + "x-appgw-trace-id", + "x-cloud-trace-context", + "x-forwarded", + "x-sigsci-requestid", + "x-sigsci-tags", + }, defaultIPHeaders...) + + // collectedHeadersLookupMap is a helper lookup map of HTTP headers to + // collect as request span tags when appsec is enabled. It is computed at + // init-time based on defaultCollectedHeaders and leveraged by NormalizeHTTPHeaders. + collectedHeadersLookupMap map[string]struct{} + + // monitoredClientIPHeadersCfg is the list of IP-related headers leveraged to + // retrieve the public client IP address in RemoteAddr. This is defined at init + // time in function of the value of the envClientIPHeader environment variable. + monitoredClientIPHeadersCfg []string +) + +// ClientIPTags returns the resulting Datadog span tags `http.client_ip` +// containing the client IP and `network.client.ip` containing the remote IP. +// The tags are present only if a valid ip address has been returned by +// RemoteAddr(). +func ClientIPTags(headers map[string][]string, hasCanonicalHeaders bool, remoteAddr string) (tags map[string]string, clientIP netip.Addr) { + remoteIP, clientIP := ClientIP(headers, hasCanonicalHeaders, remoteAddr, monitoredClientIPHeadersCfg) + return ClientIPTagsFor(remoteIP, clientIP), clientIP +} + +func ClientIPTagsFor(remoteIP netip.Addr, clientIP netip.Addr) map[string]string { + remoteIPValid := remoteIP.IsValid() + clientIPValid := clientIP.IsValid() + + if !remoteIPValid && !clientIPValid { + return nil + } + + tags := make(map[string]string, 2) + if remoteIPValid { + tags[ext.NetworkClientIP] = remoteIP.String() + } + if clientIPValid { + tags[ext.HTTPClientIP] = clientIP.String() + } + + return tags +} + +// NormalizeHTTPHeaders returns the HTTP headers following Datadog's +// normalization format. +func NormalizeHTTPHeaders(headers map[string][]string) (normalized map[string]string) { + if len(headers) == 0 { + return nil + } + normalized = make(map[string]string, len(collectedHeadersLookupMap)) + for k, v := range headers { + k = normalizeHTTPHeaderName(k) + if _, found := collectedHeadersLookupMap[k]; found { + normalized[k] = normalizeHTTPHeaderValue(v) + } + } + if len(normalized) == 0 { + return nil + } + return normalized +} + +// Remove cookies from the request headers and return the map of headers +// Used from `server.request.headers.no_cookies` and server.response.headers.no_cookies` addresses for the WAF +func headersRemoveCookies(headers http.Header) map[string][]string { + headersNoCookies := make(http.Header, len(headers)) + for k, v := range headers { + k := strings.ToLower(k) + if k == "cookie" { + continue + } + headersNoCookies[k] = v + } + return headersNoCookies +} + +func normalizeHTTPHeaderName(name string) string { + return strings.ToLower(name) +} + +func normalizeHTTPHeaderValue(values []string) string { + return strings.Join(values, ",") +} + +func init() { + makeCollectedHTTPHeadersLookupMap() + readMonitoredClientIPHeadersConfig() +} + +func makeCollectedHTTPHeadersLookupMap() { + collectedHeadersLookupMap = make(map[string]struct{}, len(defaultCollectedHeaders)) + for _, h := range defaultCollectedHeaders { + collectedHeadersLookupMap[h] = struct{}{} + } +} + +func readMonitoredClientIPHeadersConfig() { + if header := env.Get(envClientIPHeader); header != "" { + // Make this header the only one to consider in RemoteAddr + monitoredClientIPHeadersCfg = []string{header} + + // Add this header to the list of collected headers + header = normalizeHTTPHeaderName(header) + collectedHeadersLookupMap[header] = struct{}{} + } else { + // No specific IP header was configured, use the default list + monitoredClientIPHeadersCfg = defaultIPHeaders + } +} + +// setRequestHeadersTags sets the AppSec-specific request headers span tags. +func setRequestHeadersTags(span trace.TagSetter, headers map[string][]string) { + setHeadersTags(span, "http.request.headers.", headers) +} + +// setResponseHeadersTags sets the AppSec-specific response headers span tags. +func setResponseHeadersTags(span trace.TagSetter, headers map[string][]string) { + setHeadersTags(span, "http.response.headers.", headers) +} + +// setHeadersTags sets the AppSec-specific headers span tags. +func setHeadersTags(span trace.TagSetter, tagPrefix string, headers map[string][]string) { + for h, v := range NormalizeHTTPHeaders(headers) { + span.SetTag(tagPrefix+h, v) + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/roundtripper.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/roundtripper.go new file mode 100644 index 00000000..ad65ae5d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/roundtripper.go @@ -0,0 +1,41 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package httpsec + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" + + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" +) + +type SSRFProtectionFeature struct{} + +func (*SSRFProtectionFeature) String() string { + return "SSRF Protection" +} + +func (*SSRFProtectionFeature) Stop() {} + +func NewSSRFProtectionFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !config.RASP || !config.SupportedAddresses.AnyOf(addresses.ServerIoNetURLAddr) { + return nil, nil + } + + feature := &SSRFProtectionFeature{} + dyngo.On(rootOp, feature.OnStart) + return feature, nil +} + +func (*SSRFProtectionFeature) OnStart(op *httpsec.RoundTripOperation, args httpsec.RoundTripOperationArgs) { + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: addresses.NewAddressesBuilder().WithURL(args.URL).Build(), + }) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/ossec/lfi.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/ossec/lfi.go new file mode 100644 index 00000000..57cc0995 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/ossec/lfi.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package ossec + +import ( + "os" + + "github.com/DataDog/dd-trace-go/v2/appsec/events" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/ossec" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" +) + +type Feature struct{} + +func (*Feature) String() string { + return "LFI Protection" +} + +func (*Feature) Stop() {} + +func NewOSSecFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !cfg.RASP || !cfg.SupportedAddresses.AnyOf(addresses.ServerIOFSFileAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.On(rootOp, feature.OnStart) + return feature, nil +} + +func (*Feature) OnStart(op *ossec.OpenOperation, args ossec.OpenOperationArgs) { + dyngo.OnData(op, func(err *events.BlockingSecurityEvent) { + dyngo.OnFinish(op, func(_ *ossec.OpenOperation, res ossec.OpenOperationRes[*os.File]) { + if res.Err != nil { + *res.Err = err + } + }) + }) + + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: addresses.NewAddressesBuilder(). + WithFilePath(args.Path). + Build(), + }) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/sqlsec/sql.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/sqlsec/sql.go new file mode 100644 index 00000000..7f3e35d7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/sqlsec/sql.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package sqlsec + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/sqlsec" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" +) + +type Feature struct{} + +func (*Feature) String() string { + return "SQLi Protection" +} + +func (*Feature) Stop() {} + +func NewSQLSecFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !cfg.RASP || !cfg.SupportedAddresses.AnyOf(addresses.ServerDBTypeAddr, addresses.ServerDBStatementAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.On(rootOp, feature.OnStart) + return feature, nil +} + +func (*Feature) OnStart(op *sqlsec.SQLOperation, args sqlsec.SQLOperationArgs) { + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: addresses.NewAddressesBuilder(). + WithDBStatement(args.Query). + WithDBType(args.Driver). + Build(), + }) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/trace/trace.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/trace/trace.go new file mode 100644 index 00000000..eb721c35 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/trace/trace.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package trace + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" +) + +// AppSec-specific span tags that are expected to +// be in the web service entry span (span of type `web`) when AppSec is enabled. +var staticAppsecTags = map[string]any{ + "_dd.appsec.enabled": 1, + "_dd.runtime_family": "go", +} + +type AppsecSpanTransport struct{} + +func (*AppsecSpanTransport) String() string { + return "Appsec Span Transport" +} + +func (*AppsecSpanTransport) Stop() {} + +func NewAppsecSpanTransport(_ *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + ast := &AppsecSpanTransport{} + + dyngo.On(rootOp, ast.OnServiceEntryStart) + dyngo.On(rootOp, ast.OnSpanStart) + + return ast, nil +} + +// OnServiceEntryStart is the start listener of the trace.ServiceEntrySpanOperation start event. +// It listens for tags and serializable tags and sets them on the span when finishing the operation. +func (*AppsecSpanTransport) OnServiceEntryStart(op *trace.ServiceEntrySpanOperation, _ trace.ServiceEntrySpanArgs) { + op.SetTags(staticAppsecTags) + dyngo.OnData(op, op.OnSpanTagEvent) + dyngo.OnData(op, op.OnServiceEntrySpanTagEvent) + dyngo.OnData(op, op.OnJSONServiceEntrySpanTagEvent) + dyngo.OnData(op, op.OnServiceEntrySpanTagsBulkEvent) +} + +// OnSpanStart is the start listener of the trace.SpanOperation start event. +// It listens for tags and sets them on the current span when finishing the operation. +func (*AppsecSpanTransport) OnSpanStart(op *trace.SpanOperation, _ trace.SpanArgs) { + dyngo.OnData(op, op.OnSpanTagEvent) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/usersec/usec.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/usersec/usec.go new file mode 100644 index 00000000..91e490e8 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/usersec/usec.go @@ -0,0 +1,71 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package usersec + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/usersec" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" +) + +type Feature struct{} + +func (*Feature) String() string { + return "User Security" +} + +func (*Feature) Stop() {} + +func NewUserSecFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !cfg.SupportedAddresses.AnyOf( + addresses.UserIDAddr, + addresses.UserLoginAddr, + addresses.UserOrgAddr, + addresses.UserSessionIDAddr, + addresses.UserLoginSuccessAddr, + addresses.UserLoginFailureAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.OnFinish(rootOp, feature.OnFinish) + return feature, nil +} + +func (*Feature) OnFinish(op *usersec.UserLoginOperation, res usersec.UserLoginOperationRes) { + builder := addresses.NewAddressesBuilder(). + WithUserID(res.UserID). + WithUserLogin(res.UserLogin). + WithUserOrg(res.UserOrg). + WithUserSessionID(res.SessionID) + + switch op.EventType { + case usersec.UserLoginSuccess: + builder = builder.WithUserLoginSuccess(). + WithUserID(res.UserID). + WithUserLogin(res.UserLogin). + WithUserOrg(res.UserOrg). + WithUserSessionID(res.SessionID) + case usersec.UserLoginFailure: + builder = builder.WithUserLoginFailure(). + WithUserID(res.UserID). + WithUserLogin(res.UserLogin). + WithUserOrg(res.UserOrg) + case usersec.UserSet: + builder = builder.WithUserID(res.UserID). + WithUserLogin(res.UserLogin). + WithUserOrg(res.UserOrg). + WithUserSessionID(res.SessionID) + } + + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: builder.Build(), + }) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/tags.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/tags.go new file mode 100644 index 00000000..6ce403e4 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/tags.go @@ -0,0 +1,96 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package waf + +import ( + "slices" + "time" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace" + "github.com/DataDog/dd-trace-go/v2/internal" + emitter "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" + "github.com/DataDog/go-libddwaf/v4" + "github.com/DataDog/go-libddwaf/v4/timer" +) + +const ( + wafSpanTagPrefix = "_dd.appsec." + eventRulesVersionTag = wafSpanTagPrefix + "event_rules.version" + wafVersionTag = wafSpanTagPrefix + "waf.version" + wafErrorTag = wafSpanTagPrefix + "waf.error" + wafTimeoutTag = wafSpanTagPrefix + "waf.timeouts" + raspRuleEvalTag = wafSpanTagPrefix + "rasp.rule.eval" + raspErrorTag = wafSpanTagPrefix + "rasp.error" + raspTimeoutTag = wafSpanTagPrefix + "rasp.timeout" + truncationTagPrefix = wafSpanTagPrefix + "truncated." + + durationExtSuffix = ".duration_ext" + + blockedRequestTag = "appsec.blocked" +) + +// AddRulesMonitoringTags adds the tags related to security rules monitoring +func AddRulesMonitoringTags(th trace.TagSetter) { + th.SetTag(wafVersionTag, libddwaf.Version()) + th.SetTag(ext.ManualKeep, samplernames.AppSec) +} + +// AddWAFMonitoringTags adds the tags related to the monitoring of the WAF +func AddWAFMonitoringTags(th trace.TagSetter, metrics *emitter.ContextMetrics, rulesVersion string, truncations map[libddwaf.TruncationReason][]int, timerStats map[timer.Key]time.Duration) { + // Rules version is set for every request to help the backend associate Feature duration metrics with rule version + th.SetTag(eventRulesVersionTag, rulesVersion) + + if raspCallsCount := metrics.SumRASPCalls.Load(); raspCallsCount > 0 { + th.SetTag(raspRuleEvalTag, raspCallsCount) + } + + if raspErrorsCount := metrics.SumRASPErrors.Load(); raspErrorsCount > 0 { + th.SetTag(raspErrorTag, raspErrorsCount) + } + + if wafErrorsCount := metrics.SumWAFErrors.Load(); wafErrorsCount > 0 { + th.SetTag(wafErrorTag, wafErrorsCount) + } + + // Add metrics like `waf.duration` and `rasp.duration_ext` + for scope, value := range timerStats { + th.SetTag(wafSpanTagPrefix+string(scope)+durationExtSuffix, float64(value.Nanoseconds())/float64(time.Microsecond.Nanoseconds())) + for component, atomicValue := range metrics.SumDurations[scope] { + if value := atomicValue.Load(); value > 0 { + th.SetTag(wafSpanTagPrefix+string(scope)+"."+string(component), float64(value)/float64(time.Microsecond.Nanoseconds())) + } + } + } + + if value := metrics.SumWAFTimeouts.Load(); value > 0 { + th.SetTag(wafTimeoutTag, value) + } + + var sumRASPTimeouts uint32 + for ruleType := range metrics.SumRASPTimeouts { + sumRASPTimeouts += metrics.SumRASPTimeouts[ruleType].Load() + } + + if sumRASPTimeouts > 0 { + th.SetTag(raspTimeoutTag, sumRASPTimeouts) + } + + for reason, count := range truncations { + if len(count) > 0 { + th.SetTag(truncationTagPrefix+reason.String(), slices.Max(count)) + } + } +} + +// SetEventSpanTags sets the security event span tags related to an appsec event +func SetEventSpanTags(span trace.TagSetter) { + span.SetTag("_dd.origin", "appsec") + // Set the appsec.event tag needed by the appsec backend + span.SetTag("appsec.event", true) + span.SetTag("_dd.p.ts", internal.TraceSourceTagValue{Value: internal.ASMTraceSource}) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/waf.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/waf.go new file mode 100644 index 00000000..0ce10788 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/waf.go @@ -0,0 +1,167 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package waf + +import ( + "fmt" + "sync" + "time" + + "github.com/DataDog/dd-trace-go/v2/appsec/events" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions" + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses" + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/limiter" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/listener" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/stacktrace" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + telemetrylog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" + "github.com/DataDog/go-libddwaf/v4" + "github.com/DataDog/go-libddwaf/v4/timer" +) + +type Feature struct { + timeout time.Duration + limiter *limiter.TokenTicker + handle *libddwaf.Handle + supportedAddrs config.AddressSet + rulesVersion string + reportRulesTags sync.Once + + telemetryMetrics waf.HandleMetrics + + // Determine if we can use [internal.MetaStructValue] to delegate the WAF events serialization to the trace writer + // or if we have to use the [SerializableTag] method to serialize the events + metaStructAvailable bool +} + +func NewWAFFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if ok, err := libddwaf.Load(); err != nil { + // 1. If there is an error and the loading is not ok: log as an unexpected error case and quit appsec + // Note that we assume here that the test for the unsupported target has been done before calling + // this method, so it is now considered an error for this method + if !ok { + return nil, fmt.Errorf("error while loading libddwaf: %w", err) + } + // 2. If there is an error and the loading is ok: log as an informative error where appsec can be used + telemetrylog.Warn("appsec: non-critical error while loading libddwaf: %s", err.Error(), telemetry.WithTags([]string{"product:appsec"})) + } + + newHandle, rulesVersion := cfg.NewHandle() + telemetryMetrics := waf.NewMetricsInstance(newHandle, rulesVersion) + if newHandle == nil { + // As specified @ https://docs.google.com/document/d/1t6U7WXko_QChhoNIApn0-CRNe6SAKuiiAQIyCRPUXP4/edit?tab=t.0#bookmark=id.vddhd140geg7 + telemetrylog.Error("Failed to build WAF instance: no valid rules or processors available", telemetry.WithTags([]string{ + "log_type:rc::asm_dd::diagnostic", + "appsec_config_key:*", + "rc_config_id:*", + })) + return nil, fmt.Errorf("failed to obtain WAF instance from the waf.Builder (loaded paths: %q)", cfg.WAFManager.ConfigPaths("")) + } + + cfg.SupportedAddresses = config.NewAddressSet(newHandle.Addresses()) + + tokenTicker := limiter.NewTokenTicker(cfg.TraceRateLimit, cfg.TraceRateLimit) + tokenTicker.Start() + + feature := &Feature{ + handle: newHandle, + timeout: cfg.WAFTimeout, + limiter: tokenTicker, + supportedAddrs: cfg.SupportedAddresses, + telemetryMetrics: telemetryMetrics, + metaStructAvailable: cfg.MetaStructAvailable, + rulesVersion: rulesVersion, + } + + dyngo.On(rootOp, feature.onStart) + dyngo.OnFinish(rootOp, feature.onFinish) + + return feature, nil +} + +func (waf *Feature) onStart(op *waf.ContextOperation, _ waf.ContextArgs) { + waf.reportRulesTags.Do(func() { + AddRulesMonitoringTags(op) + }) + + ctx, err := waf.handle.NewContext(timer.WithBudget(waf.timeout), timer.WithComponents(addresses.Scopes[:]...)) + if err != nil { + log.Debug("appsec: failed to create WAF context: %s", err.Error()) + } + + op.SwapContext(ctx) + op.SetLimiter(waf.limiter) + op.SetSupportedAddresses(waf.supportedAddrs) + op.SetMetricsInstance(waf.telemetryMetrics.NewContextMetrics()) + + // Run the WAF with the given address data + dyngo.OnData(op, op.OnEvent) + + waf.SetupActionHandlers(op) +} + +func (*Feature) SetupActionHandlers(op *waf.ContextOperation) { + // Set the blocking tag on the operation when a blocking event is received + dyngo.OnData(op, func(*events.BlockingSecurityEvent) { + log.Debug("appsec: blocking event detected") + op.SetTag(blockedRequestTag, true) + op.SetRequestBlocked() + }) + + // Register the stacktrace if one is requested by a WAF action + dyngo.OnData(op, func(action *actions.StackTraceAction) { + log.Debug("appsec: registering stack trace for security purposes") + op.AddStackTraces(action.Event) + }) + + dyngo.OnData(op, func(*waf.SecurityEvent) { + log.Debug("appsec: WAF detected a suspicious event") + SetEventSpanTags(op) + }) +} + +func (waf *Feature) onFinish(op *waf.ContextOperation, _ waf.ContextRes) { + ctx := op.SwapContext(nil) + if ctx == nil { + return + } + + ctx.Close() + + truncations := ctx.Truncations() + timerStats := ctx.Timer.Stats() + metrics := op.GetMetricsInstance() + AddWAFMonitoringTags(op, metrics, waf.rulesVersion, truncations, timerStats) + metrics.Submit(truncations, timerStats) + + if wafEvents := op.Events(); len(wafEvents) > 0 { + tagValue := map[string][]any{"triggers": wafEvents} + if waf.metaStructAvailable { + op.SetTag("appsec", internal.MetaStructValue{Value: tagValue}) + } else { + op.SetSerializableTag("_dd.appsec.json", tagValue) + } + } + + op.SetSerializableTags(op.Derivatives()) + if stacks := op.StackTraces(); len(stacks) > 0 { + op.SetTag(stacktrace.SpanKey, stacktrace.GetSpanValue(stacks...)) + } +} + +func (*Feature) String() string { + return "Web Application Firewall" +} + +func (waf *Feature) Stop() { + waf.limiter.Stop() + waf.handle.Close() +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/remoteconfig.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/remoteconfig.go new file mode 100644 index 00000000..838a517b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/remoteconfig.go @@ -0,0 +1,403 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022 Datadog, Inc. + +package appsec + +import ( + "encoding/json" + "errors" + "fmt" + "maps" + "slices" + "strings" + + "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/orchestrion" + "github.com/DataDog/dd-trace-go/v2/internal/remoteconfig" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + telemetrylog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" + "github.com/DataDog/go-libddwaf/v4" +) + +// onRCRulesUpdate is the RC callback called when security rules related RC updates are available +func (a *appsec) onRCRulesUpdate(updates map[string]remoteconfig.ProductUpdate) map[string]state.ApplyStatus { + statuses := make(map[string]state.ApplyStatus) + + // If appsec was deactivated through RC, stop here + if !a.started { + for _, pu := range updates { + for path := range pu { + // We are not acknowledging anything... since we are ignoring all these updates... + statuses[path] = state.ApplyStatus{State: state.ApplyStateUnacknowledged} + } + } + return statuses + } + + // Updates the local [config.WAFManager] with the new data... We track deletions and add/updates + // separately as it is important to process all deletions first. + // See: https://docs.google.com/document/d/1t6U7WXko_QChhoNIApn0-CRNe6SAKuiiAQIyCRPUXP4/edit?tab=t.0#heading=h.pqke0ujtvm2j + type UpdatedConfig struct { + Product string + Content map[string]any + } + addOrUpdates := make(map[string]UpdatedConfig) + + for product, prodUpdates := range updates { + for path, data := range prodUpdates { + switch product { + case state.ProductASMDD, state.ProductASMData, state.ProductASM: + if data == nil { + // Perofrm the deletion right away; we need to do these before any other updates... + log.Debug("appsec: remote config: removing configuration %q", path) + a.cfg.WAFManager.RemoveConfig(path) + statuses[path] = state.ApplyStatus{State: state.ApplyStateAcknowledged} + continue + } + cfg := UpdatedConfig{Product: product} + if err := json.Unmarshal(data, &cfg.Content); err != nil { + log.Error("appsec: unmarshaling remote config update for %s (%q): %s", product, path, err.Error()) + statuses[product] = state.ApplyStatus{State: state.ApplyStateError, Error: err.Error()} + continue + } + addOrUpdates[path] = cfg + if product == state.ProductASMDD { + // Remove the default config if present when an ASM_DD config is received. + log.Debug("appsec: remote config: processed ASM_DD addition/update; removing default config if present") + if deletedDefault := a.cfg.WAFManager.RemoveDefaultConfig(); deletedDefault { + log.Debug("appsec: remote config: successfully removed default config") + } + } + case state.ProductASMFeatures: + // This is a global hook, so it'll receive [remoteconfig.ProductASMFeatures] updates as well, which are not + // relevant for this particular handler. These are handled by a product-specific handler, + // [appsec.handleASMFeatures]. + default: + log.Debug("appsec: remote config: ignoring RC update for non-ASM product %q", path) + } + } + } + + // Sort the paths to apply updates in a deterministic order... + addOrUpdatePaths := slices.Collect(maps.Keys(addOrUpdates)) + slices.Sort(addOrUpdatePaths) + + // Apply all the additions and updates + for _, path := range addOrUpdatePaths { + update := addOrUpdates[path] + log.Debug("appsec: remote config: adding/updating configuration %q", path) + diag, err := a.cfg.WAFManager.AddOrUpdateConfig(path, update.Content) + if err != nil { + log.Debug("appsec: remote config: error while adding/updating configuration %q: %s", path, err.Error()) + // Configuration object has been fully rejected; or there was an error processing it or parsing the diagnostics + // value. If we have a diagnostics object, encode all errors from the diagnostics object as a JSON value, as + // described by: + // https://docs.google.com/document/d/1t6U7WXko_QChhoNIApn0-CRNe6SAKuiiAQIyCRPUXP4/edit?tab=t.0#heading=h.6ud96uy74pzs + type errInfo struct { + Error string `json:"error,omitempty"` + Errors map[string][]string `json:"errors,omitempty"` + } + var errs map[string]errInfo + diag.EachFeature(func(name string, feat *libddwaf.Feature) { + var ( + info errInfo + some bool + ) + if feat.Error != "" { + log.Debug("appsec: remote config: error in %q feature %s: %s", path, name, feat.Error) + info.Error = feat.Error + some = true + } + for msg, ids := range feat.Errors { + log.Debug("appsec: remote config: error in %q feature %s: %s for %q", path, name, msg, ids) + if info.Errors == nil { + info.Errors = make(map[string][]string) + } + info.Errors[msg] = ids + some = true + } + if !some { + return + } + if errs == nil { + errs = make(map[string]errInfo) + } + errs[name] = info + }) + + errMsg := err.Error() + if len(errs) > 0 { + if data, err := json.Marshal(errs); err == nil { + errMsg = string(data) + } else { + telemetrylog.Error("appsec: remote config: failed to marshal error details: %s", err.Error()) + } + } + + statuses[path] = state.ApplyStatus{State: state.ApplyStateError, Error: errMsg} + continue + } + + statuses[path] = state.ApplyStatus{State: state.ApplyStateAcknowledged} + diag.EachFeature(logDiagnosticMessages(update.Product, path)) + } + if len(a.cfg.WAFManager.ConfigPaths(`^(?:datadog/\d+|employee)/ASM_DD/.+`)) == 0 { + log.Debug("appsec: remote config: no ASM_DD config loaded; restoring default config if available") + if err := a.cfg.WAFManager.RestoreDefaultConfig(); err != nil { + telemetrylog.Error("appsec: RC could not restore default config: %s", err.Error()) + } + } + + if log.DebugEnabled() { + // Avoiding the call to ConfigPaths if the Debug level is not enabled... + log.Debug("appsec: remote config: rules loaded after update: %q", a.cfg.WAFManager.ConfigPaths("")) + } + + // If an error occurs while updating the WAF handle, don't swap the RulesManager and propagate the error + // to all config statuses since we can't know which config is the faulty one + if err := a.SwapRootOperation(); err != nil { + log.Error("appsec: remote config: could not apply the new security rules: %s", err.Error()) + for k := range statuses { + if statuses[k].State == state.ApplyStateError || statuses[k].State == state.ApplyStateUnacknowledged { + // Leave failed & un-acknowledged configs as-is... This failure is not related to these... + continue + } + statuses[k] = state.ApplyStatus{State: state.ApplyStateError, Error: err.Error()} + } + } + + return statuses +} + +func logDiagnosticMessages(product string, path string) func(string, *libddwaf.Feature) { + return func(name string, feat *libddwaf.Feature) { + if feat.Error == "" && len(feat.Errors) == 0 && len(feat.Warnings) == 0 { + // No errors or warnings; nothing to report... + return + } + + path, _ := remoteconfig.ParsePath(path) + // As defined @ https://docs.google.com/document/d/1t6U7WXko_QChhoNIApn0-CRNe6SAKuiiAQIyCRPUXP4/edit?tab=t.0#bookmark=id.cthxzqjuodhh + tags := []string{ + "log_type:rc::" + strings.ToLower(product) + "::diagnostic", + "appsec_config_key:" + name, + "rc_config_id:" + path.ConfigID, + } + if err := feat.Error; err != "" { + telemetrylog.Error("%s", err, telemetry.WithTags(tags)) + } + for err, ids := range feat.Errors { + telemetrylog.Error("%q: %q", err, ids, telemetry.WithTags(tags)) + } + for err, ids := range feat.Warnings { + telemetrylog.Warn("%q: %q", err, ids, telemetry.WithTags(tags)) + } + } +} + +// handleASMFeatures deserializes an ASM_FEATURES configuration received through remote config +// and starts/stops appsec accordingly. +func (a *appsec) handleASMFeatures(u remoteconfig.ProductUpdate) map[string]state.ApplyStatus { + if len(u) == 0 { + // That should not actually happen; but would not be "invalid" per se. + return nil + } + + if len(u) > 1 { + log.Warn("appsec: Remote Config: received multiple ASM_FEATURES update; not processing any.") + statuses := make(map[string]state.ApplyStatus, len(u)) + for path := range u { + statuses[path] = state.ApplyStatus{State: state.ApplyStateUnacknowledged} + } + return statuses + } + + // NOTE: There is exactly 1 item in the map at this point; but it's a map, so we for-range over it. + var ( + path string + raw []byte + ) + for p, r := range u { + path, raw = p, r + } + + log.Debug("appsec: remote config: processing %s", path) + + // A nil config means ASM was disabled, and we stopped receiving the config file + // Don't ack the config in this case and return early + if raw == nil { + log.Debug("appsec: remote config: Stopping AppSec") + a.stop() + return map[string]state.ApplyStatus{path: {State: state.ApplyStateAcknowledged}} + } + + // Parse the config object we just received... + var parsed state.ASMFeaturesData + if err := json.Unmarshal(raw, &parsed); err != nil { + log.Error("appsec: remote config: error while unmarshalling %q: %s. Configuration won't be applied.", path, err.Error()) + return map[string]state.ApplyStatus{path: {State: state.ApplyStateError, Error: err.Error()}} + } + + // RC triggers activation of ASM; ASM is not started yet... Starting it! + if parsed.ASM.Enabled && !a.started { + log.Debug("appsec: remote config: Starting AppSec") + if err := a.start(); err != nil { + log.Error("appsec: remote config: error while processing %q. Configuration won't be applied: %s", path, err.Error()) + return map[string]state.ApplyStatus{path: {State: state.ApplyStateError, Error: err.Error()}} + } + } + + // RC triggers desactivation of ASM; ASM is started... Stopping it! + if !parsed.ASM.Enabled && a.started { + log.Debug("appsec: remote config: Stopping AppSec") + a.stop() + return map[string]state.ApplyStatus{path: {State: state.ApplyStateAcknowledged}} + } + + // If we got here, we have an idempotent success! + return map[string]state.ApplyStatus{path: {State: state.ApplyStateAcknowledged}} +} + +func (a *appsec) startRC() error { + if a.cfg.RC != nil { + return remoteconfig.Start(*a.cfg.RC) + } + return nil +} + +func (a *appsec) stopRC() { + if a.cfg.RC != nil { + remoteconfig.Stop() + } +} + +func (a *appsec) registerRCProduct(p string) error { + if a.cfg.RC == nil { + return fmt.Errorf("no valid remote configuration client") + } + return remoteconfig.RegisterProduct(p) +} + +func (a *appsec) registerRCCapability(c remoteconfig.Capability) error { + if a.cfg.RC == nil { + return fmt.Errorf("no valid remote configuration client") + } + return remoteconfig.RegisterCapability(c) +} + +func (a *appsec) unregisterRCCapability(c remoteconfig.Capability) error { + if a.cfg.RC == nil { + log.Debug("appsec: remote config: no valid remote configuration client") + return nil + } + return remoteconfig.UnregisterCapability(c) +} + +func (a *appsec) enableRemoteActivation() error { + if a.cfg.RC == nil { + return errors.New("no valid remote configuration client") + } + log.Debug("appsec: Remote Config: subscribing to ASM_FEATURES updates...") + return remoteconfig.Subscribe(state.ProductASMFeatures, a.handleASMFeatures, remoteconfig.ASMActivation) +} + +var baseCapabilities = [...]remoteconfig.Capability{ + remoteconfig.ASMDDMultiConfig, + remoteconfig.ASMDDRules, + remoteconfig.ASMExclusions, + remoteconfig.ASMCustomRules, + remoteconfig.ASMTrustedIPs, + remoteconfig.ASMExclusionData, + remoteconfig.ASMEndpointFingerprinting, + remoteconfig.ASMSessionFingerprinting, + remoteconfig.ASMNetworkFingerprinting, + remoteconfig.ASMHeaderFingerprinting, + remoteconfig.ASMTraceTaggingRules, +} + +var blockingCapabilities = [...]remoteconfig.Capability{ + remoteconfig.ASMUserBlocking, + remoteconfig.ASMRequestBlocking, + remoteconfig.ASMIPBlocking, + remoteconfig.ASMCustomBlockingResponse, +} + +func (a *appsec) enableRCBlocking() { + if a.cfg.RC == nil { + log.Debug("appsec: remote config: no valid remote configuration client") + return + } + + products := []string{state.ProductASM, state.ProductASMDD, state.ProductASMData} + for _, p := range products { + if err := a.registerRCProduct(p); err != nil { + log.Debug("appsec: remote config: couldn't register product %q: %s", p, err.Error()) + } + } + + log.Debug("appsec: remote config: registering onRCRulesUpdate callback") + if err := remoteconfig.RegisterCallback(a.onRCRulesUpdate); err != nil { + log.Debug("appsec: remote config: couldn't register callback: %s", err.Error()) + } + + for _, c := range baseCapabilities { + if err := a.registerRCCapability(c); err != nil { + log.Debug("appsec: remote config: couldn't register capability %d: %s", c, err.Error()) + } + } + + if localRulesPath, hasLocalRules := env.Lookup(config.EnvRules); hasLocalRules { + log.Debug("appsec: remote config: using rules from %s; will not register blocking capabilities", localRulesPath) + return + } + if !a.cfg.BlockingUnavailable { + for _, c := range blockingCapabilities { + if err := a.registerRCCapability(c); err != nil { + log.Debug("appsec: remote config: couldn't register capability %d: %s", c, err.Error()) + } + } + } +} + +func (a *appsec) enableRASP() { + if !a.cfg.RASP { + return + } + if err := remoteconfig.RegisterCapability(remoteconfig.ASMRASPSSRF); err != nil { + log.Debug("appsec: remote config: couldn't register RASP SSRF: %s", err.Error()) + } + if err := remoteconfig.RegisterCapability(remoteconfig.ASMRASPSQLI); err != nil { + log.Debug("appsec: remote config: couldn't register RASP SQLI: %s", err.Error()) + } + if orchestrion.Enabled() { + if err := remoteconfig.RegisterCapability(remoteconfig.ASMRASPLFI); err != nil { + log.Debug("appsec: remote config: couldn't register RASP LFI: %s", err.Error()) + } + } +} + +func (a *appsec) disableRCBlocking() { + if a.cfg.RC == nil { + return + } + for _, c := range baseCapabilities { + if err := a.unregisterRCCapability(c); err != nil { + log.Debug("appsec: remote config: couldn't unregister capability %d: %s", c, err.Error()) + } + } + if !a.cfg.BlockingUnavailable { + for _, c := range blockingCapabilities { + if err := a.unregisterRCCapability(c); err != nil { + log.Debug("appsec: remote config: couldn't unregister capability %d: %s", c, err.Error()) + } + } + } + if err := remoteconfig.UnregisterCallback(a.onRCRulesUpdate); err != nil { + log.Debug("appsec: remote config: couldn't unregister callback: %s", err.Error()) + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/telemetry.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/telemetry.go new file mode 100644 index 00000000..94b07516 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/telemetry.go @@ -0,0 +1,236 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package appsec + +import ( + "bytes" + "errors" + "io" + "os" + "os/exec" + "runtime" + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/appsec/config" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + telemetrylog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log" + "github.com/DataDog/go-libddwaf/v4" + "github.com/DataDog/go-libddwaf/v4/waferrors" +) + +var ( + detectLibDLOnce sync.Once + wafUsable, wafError = libddwaf.Usable() + wafSupported = !errors.As(wafError, &waferrors.UnsupportedOSArchError{}) && !errors.As(wafError, &waferrors.UnsupportedGoVersionError{}) + staticConfigs = []telemetry.Configuration{ + {Name: "goos", Value: runtime.GOOS, Origin: telemetry.OriginCode}, + {Name: "goarch", Value: runtime.GOARCH, Origin: telemetry.OriginCode}, + {Name: "cgo_enabled", Value: cgoEnabled, Origin: telemetry.OriginCode}, + {Name: "waf_supports_target", Value: wafSupported, Origin: telemetry.OriginCode}, + {Name: "waf_healthy", Value: wafUsable, Origin: telemetry.OriginCode}, + } +) + +// init sends the static telemetry for AppSec. +func init() { + telemetry.RegisterAppConfigs(staticConfigs...) +} + +func registerAppsecStartTelemetry(mode config.EnablementMode, origin telemetry.Origin) { + if mode == config.RCStandby { + return + } + + if origin == telemetry.OriginCode { + telemetry.RegisterAppConfig("WithEnablementMode", mode, telemetry.OriginCode) + } + + telemetry.ProductStarted(telemetry.NamespaceAppSec) + // TODO: add appsec.enabled metric once this metric is enabled backend-side + + detectLibDLOnce.Do(detectLibDL) +} + +func detectLibDL() { + if runtime.GOOS != "linux" { + return + } + + for _, method := range detectLibDLMethods { + if ok, err := method.method(); ok { + telemetrylog.Debug("libdl detected using method: %s", method.name, telemetry.WithTags([]string{"method:" + method.name})) + log.Debug("libdl detected using method: %s", method.name) + telemetry.RegisterAppConfig("libdl_present", true, telemetry.OriginCode) + return + } else if err != nil { + log.Debug("failed to detect libdl with method %s: %v", method.name, err.Error()) + } + } + + telemetry.RegisterAppConfig("libdl_present", false, telemetry.OriginCode) +} + +func registerAppsecStopTelemetry() { + telemetry.ProductStopped(telemetry.NamespaceAppSec) +} + +var detectLibDLMethods = []struct { + name string + method func() (bool, error) +}{ + {"cgo", func() (bool, error) { + return cgoEnabled, nil + }}, + {"ldconfig", ldconfig}, + {"ldsocache", ldCache}, + {"ldd", ldd}, + {"proc_maps", procMaps}, + {"manual_search", manualSearch}, +} + +// ldCache is messily looking into /etc/ld.so.cache to check if libdl.so.2 is present. +// Normally ld.so.cache should be parsed but standards differ so simply looking for the string should make sense. +// It is sadly disabled by default in alpine images. +func ldCache() (bool, error) { + fp, err := os.Open("/etc/ld.so.cache") + if err != nil { + if os.IsNotExist(err) { + return false, nil // ld.so.cache does not exist, so we assume libdl is not present + } + return false, err + } + defer fp.Close() + + output, err := io.ReadAll(io.LimitReader(fp, libDLReadLimit)) + if err != nil { + return false, err + } + + return searchLibdl(output), nil +} + +// ldd on ourself will check if libdl.so if we are currently running with libdl. It needs the ldd binary. +// We first try to check the whole system, then we check the current process. +func ldd() (bool, error) { + var output limitedBuffer + cmd := exec.Command("ldd", "/proc/1/exe") + cmd.Stdout = &output + cmd.Stderr = io.Discard + + oneErr := cmd.Run() + + if searchLibdl(output.Bytes()) { + return true, nil + } + + var selfOutput limitedBuffer + cmd = exec.Command("ldd", "/proc/self/exe") + cmd.Stdout = &output + cmd.Stderr = io.Discard + + selfErr := cmd.Run() + + return searchLibdl(selfOutput.Bytes()), errors.Join(oneErr, selfErr) +} + +// ldconfig -p is the most reliable way to check for libdl.so presence but it does not work on musl. It also +// needs the ldconfig binary, which is not always available in containers or minimal environments. +func ldconfig() (bool, error) { + var output limitedBuffer + cmd := exec.Command("ldconfig", "-p") + cmd.Stdout = &output + cmd.Stderr = io.Discard + + if err := cmd.Run(); err != nil { + return false, err + } + + return searchLibdl(output.Bytes()), nil +} + +// procMaps is another way to check for libdl.so presence, that works on musl if we are running with libdl already. +// but does not always work because libdl can be symlink. +// We first try to check the whole system, then we check the current process. +func procMaps() (bool, error) { + fp, err := os.Open("/proc/1/maps") + if err != nil { + if os.IsNotExist(err) || os.IsPermission(err) { + return false, nil + } + return false, err + } + + defer fp.Close() + + output, oneErr := io.ReadAll(io.LimitReader(fp, libDLReadLimit)) + + if searchLibdl(output) { + return true, nil + } + + fp, err = os.Open("/proc/self/maps") + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + defer fp.Close() + + output, selfErr := io.ReadAll(io.LimitReader(fp, libDLReadLimit)) + + return searchLibdl(output), errors.Join(oneErr, selfErr) +} + +// manualSearch is a fallback method to search for libdl.so.2 in common library directories. +// See ld.so(8) for more details on the directories searched by the dynamic linker. +func manualSearch() (bool, error) { + for _, dir := range []string{"/lib", "/usr/lib", "/lib64", "/usr/lib64"} { + entries, err := os.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + continue + } + return false, err + } + + for _, entry := range entries { + if !entry.IsDir() && entry.Name() == libDLName { + return true, nil + } + } + } + + return false, nil +} + +func searchLibdl(input []byte) bool { + data := bytes.TrimSpace(input) + if len(data) == 0 { + return false + } + + return bytes.Contains(data, []byte(libDLName)) +} + +// limitedBuffer is a custom buffer that limits its size to 256 KiB. +type limitedBuffer struct { + bytes.Buffer +} + +const ( + libDLReadLimit = 256 * 1024 + libDLName = "libdl.so.2" +) + +func (b *limitedBuffer) Write(p []byte) (n int, err error) { + if b.Len()+len(p) > libDLReadLimit { // 256 KiB limit + return 0, io.ErrShortWrite + } + return b.Buffer.Write(p) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/telemetry_cgo.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/telemetry_cgo.go new file mode 100644 index 00000000..da3ce74f --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/telemetry_cgo.go @@ -0,0 +1,10 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +//go:build cgo + +package appsec + +const cgoEnabled = true diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/telemetry_nocgo.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/telemetry_nocgo.go new file mode 100644 index 00000000..f7896932 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/telemetry_nocgo.go @@ -0,0 +1,10 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +//go:build !cgo + +package appsec + +const cgoEnabled = false diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/civisibility.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/civisibility.go new file mode 100644 index 00000000..d1d55329 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/civisibility.go @@ -0,0 +1,41 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package civisibility + +import "sync/atomic" + +type State int + +const ( + StateUninitialized State = iota + StateInitializing + StateInitialized + StateExiting + StateExited +) + +var ( + status atomic.Int32 + isTestMode atomic.Bool +) + +func GetState() State { + // Get the state atomically + return State(status.Load()) +} + +func SetState(state State) { + // Set the state atomically + status.Store(int32(state)) +} + +func SetTestMode() { + isTestMode.Store(true) +} + +func IsTestMode() bool { + return isTestMode.Load() +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/ci.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/ci.go similarity index 95% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/ci.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/ci.go index d1ca2e80..4d98c37e 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/ci.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/ci.go @@ -6,6 +6,9 @@ package constants const ( + // CIJobID indicates the id of the CI job. + CIJobID = "ci.job.id" + // CIJobName indicates the name of the CI job. CIJobName = "ci.job.name" diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/env.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/env.go new file mode 100644 index 00000000..ad6e485a --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/env.go @@ -0,0 +1,58 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package constants + +const ( + // CIVisibilityEnabledEnvironmentVariable indicates if CI Visibility mode is enabled. + // This environment variable should be set to "1" or "true" to enable CI Visibility mode, which activates tracing and other + // features related to CI Visibility in the Datadog platform. + CIVisibilityEnabledEnvironmentVariable = "DD_CIVISIBILITY_ENABLED" + + // CIVisibilityAgentlessEnabledEnvironmentVariable indicates if CI Visibility agentless mode is enabled. + // This environment variable should be set to "1" or "true" to enable agentless mode for CI Visibility, where traces + // are sent directly to Datadog without using a local agent. + CIVisibilityAgentlessEnabledEnvironmentVariable = "DD_CIVISIBILITY_AGENTLESS_ENABLED" + + // CIVisibilityAgentlessURLEnvironmentVariable forces the agentless URL to a custom one. + // This environment variable allows you to specify a custom URL for the agentless intake in CI Visibility mode. + CIVisibilityAgentlessURLEnvironmentVariable = "DD_CIVISIBILITY_AGENTLESS_URL" + + // APIKeyEnvironmentVariable indicates the API key to be used for agentless intake. + // This environment variable should be set to your Datadog API key, allowing the agentless mode to authenticate and + // send data directly to the Datadog platform. + APIKeyEnvironmentVariable = "DD_API_KEY" + + // CIVisibilityTestSessionNameEnvironmentVariable indicate the test session name to be used on CI Visibility payloads + CIVisibilityTestSessionNameEnvironmentVariable = "DD_TEST_SESSION_NAME" + + // CIVisibilityFlakyRetryEnabledEnvironmentVariable kill-switch that allows to explicitly disable retries even if the remote setting is enabled. + // This environment variable should be set to "0" or "false" to disable the flaky retry feature. + CIVisibilityFlakyRetryEnabledEnvironmentVariable = "DD_CIVISIBILITY_FLAKY_RETRY_ENABLED" + + // CIVisibilityFlakyRetryCountEnvironmentVariable indicates the maximum number of retry attempts for a single test case. + CIVisibilityFlakyRetryCountEnvironmentVariable = "DD_CIVISIBILITY_FLAKY_RETRY_COUNT" + + // CIVisibilityTotalFlakyRetryCountEnvironmentVariable indicates the maximum number of retry attempts for the entire session. + CIVisibilityTotalFlakyRetryCountEnvironmentVariable = "DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT" + + // CIVisibilityTestManagementEnabledEnvironmentVariable indicates if the test management feature is enabled. + CIVisibilityTestManagementEnabledEnvironmentVariable = "DD_TEST_MANAGEMENT_ENABLED" + + // CIVisibilityTestManagementAttemptToFixRetriesEnvironmentVariable indicates the maximum number of retries for the attempt to fix a test. + CIVisibilityTestManagementAttemptToFixRetriesEnvironmentVariable = "DD_TEST_MANAGEMENT_ATTEMPT_TO_FIX_RETRIES" + + // CIVisibilityAutoInstrumentationProviderEnvironmentVariable indicates that the auto-instrumentation script was used. + CIVisibilityAutoInstrumentationProviderEnvironmentVariable = "DD_CIVISIBILITY_AUTO_INSTRUMENTATION_PROVIDER" + + // CIVisibilityEnvironmentDataFilePath is the environment variable that holds the path to the file containing the environmental data. + CIVisibilityEnvironmentDataFilePath = "DD_TEST_OPTIMIZATION_ENV_DATA_FILE" + + // CIVisibilityImpactedTestsDetectionEnabled indicates if the impacted tests detection feature is enabled. + CIVisibilityImpactedTestsDetectionEnabled = "DD_CIVISIBILITY_IMPACTED_TESTS_DETECTION_ENABLED" + + // CIVisibilityInternalParallelEarlyFlakeDetectionEnabled indicates if the internal parallel early flake detection feature is enabled. + CIVisibilityInternalParallelEarlyFlakeDetectionEnabled = "DD_CIVISIBILITY_INTERNAL_PARALLEL_EARLY_FLAKE_DETECTION_ENABLED" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/git.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/git.go new file mode 100644 index 00000000..f23209fc --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/git.go @@ -0,0 +1,88 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package constants + +const ( + // GitBranch indicates the current git branch. + // This constant is used to tag traces with the branch name being used in the CI/CD process. + GitBranch = "git.branch" + + // GitCommitAuthorDate indicates the git commit author date related to the build. + // This constant is used to tag traces with the date when the author created the commit. + GitCommitAuthorDate = "git.commit.author.date" + + // GitCommitAuthorEmail indicates the git commit author email related to the build. + // This constant is used to tag traces with the email of the author who created the commit. + GitCommitAuthorEmail = "git.commit.author.email" + + // GitCommitAuthorName indicates the git commit author name related to the build. + // This constant is used to tag traces with the name of the author who created the commit. + GitCommitAuthorName = "git.commit.author.name" + + // GitCommitCommitterDate indicates the git commit committer date related to the build. + // This constant is used to tag traces with the date when the committer applied the commit. + GitCommitCommitterDate = "git.commit.committer.date" + + // GitCommitCommitterEmail indicates the git commit committer email related to the build. + // This constant is used to tag traces with the email of the committer who applied the commit. + GitCommitCommitterEmail = "git.commit.committer.email" + + // GitCommitCommitterName indicates the git commit committer name related to the build. + // This constant is used to tag traces with the name of the committer who applied the commit. + GitCommitCommitterName = "git.commit.committer.name" + + // GitCommitMessage indicates the git commit message related to the build. + // This constant is used to tag traces with the message associated with the commit. + GitCommitMessage = "git.commit.message" + + // GitCommitSHA indicates the git commit SHA1 hash related to the build. + // This constant is used to tag traces with the SHA1 hash of the commit. + GitCommitSHA = "git.commit.sha" + + // GitRepositoryURL indicates the git repository URL related to the build. + // This constant is used to tag traces with the URL of the repository where the commit is stored. + GitRepositoryURL = "git.repository_url" + + // GitTag indicates the current git tag. + // This constant is used to tag traces with the tag name associated with the current commit. + GitTag = "git.tag" + + // GitHeadCommit indicates the GIT head commit hash. + GitHeadCommit = "git.commit.head.sha" + + // GitHeadMessage indicates the GIT head commit message. + GitHeadMessage = "git.commit.head.message" + + // GitHeadAuthorDate indicates the GIT head commit author date. + GitHeadAuthorDate = "git.commit.head.author.date" + + // GitHeadAuthorEmail indicates the GIT head commit author email. + GitHeadAuthorEmail = "git.commit.head.author.email" + + // GitHeadAuthorName indicates the GIT head commit author name. + GitHeadAuthorName = "git.commit.head.author.name" + + // GitHeadCommitterDate indicates the GIT head commit committer date. + GitHeadCommitterDate = "git.commit.head.committer.date" + + // GitHeadCommitterEmail indicates the GIT head commit committer email. + GitHeadCommitterEmail = "git.commit.head.committer.email" + + // GitHeadCommitterName indicates the GIT head commit committer name. + GitHeadCommitterName = "git.commit.head.committer.name" + + // GitPrBaseCommit indicates the GIT PR base commit hash. + GitPrBaseCommit = "git.pull_request.base_branch_sha" + + // GitPrBaseHeadCommit indicates the GIT PR base branch head commit hash. + GitPrBaseHeadCommit = "git.pull_request.base_branch_head_sha" + + // GitPrBaseBranch indicates the GIT PR base branch name. + GitPrBaseBranch = "git.pull_request.base_branch" + + // PrNumber indicates the pull request number. + PrNumber = "pr.number" +) diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/os.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/os.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/os.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/os.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/runtime.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/runtime.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/runtime.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/runtime.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/span_types.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/span_types.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/span_types.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/span_types.go diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/tags.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/tags.go new file mode 100644 index 00000000..ba1499a2 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/tags.go @@ -0,0 +1,71 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package constants + +const ( + // Origin is a tag used to indicate the origin of the data. + // This tag helps in identifying the source of the trace data. + Origin = "_dd.origin" + + // LogicalCPUCores is a tag used to indicate the number of logical cpu cores + // This tag is used by the backend to perform calculations + LogicalCPUCores = "_dd.host.vcpu_count" + + // CIAppTestOrigin defines the CIApp test origin value. + // This constant is used to tag traces that originate from CIApp test executions. + CIAppTestOrigin = "ciapp-test" + + // TestSessionIDTag defines the test session ID tag for the CI Visibility Protocol. + // This constant is used to tag traces with the ID of the test session. + TestSessionIDTag string = "test_session_id" + + // TestModuleIDTag defines the test module ID tag for the CI Visibility Protocol. + // This constant is used to tag traces with the ID of the test module. + TestModuleIDTag string = "test_module_id" + + // TestSuiteIDTag defines the test suite ID tag for the CI Visibility Protocol. + // This constant is used to tag traces with the ID of the test suite. + TestSuiteIDTag string = "test_suite_id" + + // ItrCorrelationIDTag defines the correlation ID for the intelligent test runner tag for the CI Visibility Protocol. + // This constant is used to tag traces with the correlation ID for intelligent test runs. + ItrCorrelationIDTag string = "itr_correlation_id" + + // UserProvidedTestServiceTag defines if the user provided the test service. + UserProvidedTestServiceTag string = "_dd.test.is_user_provided_service" +) + +// Coverage tags +const ( + // CodeCoverageEnabledTag defines if code coverage has been enabled. + // This constant is used to tag traces to indicate whether code coverage measurement is enabled. + CodeCoverageEnabledTag string = "test.code_coverage.enabled" + + // CodeCoveragePercentageOfTotalLines defines the percentage of total code coverage by a session. + // This constant is used to tag traces with the percentage of code lines covered during the test session. + CodeCoveragePercentageOfTotalLines string = "test.code_coverage.lines_pct" +) + +// Capabilities +const ( + // LibraryCapabilitiesTestImpactAnalysis is a tag used to indicate the test impact analysis capability of the library. + LibraryCapabilitiesTestImpactAnalysis = "_dd.library_capabilities.test_impact_analysis" + + // LibraryCapabilitiesEarlyFlakeDetection is a tag used to indicate the early flake detection capability of the library. + LibraryCapabilitiesEarlyFlakeDetection = "_dd.library_capabilities.early_flake_detection" + + // LibraryCapabilitiesAutoTestRetries is a tag used to indicate the auto test retries capability of the library. + LibraryCapabilitiesAutoTestRetries = "_dd.library_capabilities.auto_test_retries" + + // LibraryCapabilitiesTestManagementQuarantine is a tag used to indicate the quarantine capability of the library. + LibraryCapabilitiesTestManagementQuarantine = "_dd.library_capabilities.test_management.quarantine" + + // LibraryCapabilitiesTestManagementDisable is a tag used to indicate the disable capability of the library. + LibraryCapabilitiesTestManagementDisable = "_dd.library_capabilities.test_management.disable" + + // LibraryCapabilitiesTestManagementAttemptToFix is a tag used to indicate the attempt to fix capability of the library. + LibraryCapabilitiesTestManagementAttemptToFix = "_dd.library_capabilities.test_management.attempt_to_fix" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/test_tags.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/test_tags.go new file mode 100644 index 00000000..cd8c40dc --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/test_tags.go @@ -0,0 +1,180 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package constants + +const ( + // TestModule indicates the test module name. + // This constant is used to tag traces with the name of the test module. + TestModule = "test.module" + + // TestSuite indicates the test suite name. + // This constant is used to tag traces with the name of the test suite. + TestSuite = "test.suite" + + // TestName indicates the test name. + // This constant is used to tag traces with the name of the test. + TestName = "test.name" + + // TestType indicates the type of the test (e.g., test, benchmark). + // This constant is used to tag traces with the type of the test. + TestType = "test.type" + + // TestFramework indicates the test framework name. + // This constant is used to tag traces with the name of the test framework. + TestFramework = "test.framework" + + // TestFrameworkVersion indicates the test framework version. + // This constant is used to tag traces with the version of the test framework. + TestFrameworkVersion = "test.framework_version" + + // TestStatus indicates the test execution status. + // This constant is used to tag traces with the execution status of the test. + TestStatus = "test.status" + + // TestSkipReason indicates the skip reason of the test. + // This constant is used to tag traces with the reason why the test was skipped. + TestSkipReason = "test.skip_reason" + + // TestSourceFile indicates the source file where the test is located. + // This constant is used to tag traces with the file path of the test source code. + TestSourceFile = "test.source.file" + + // TestSourceStartLine indicates the line of the source file where the test starts. + // This constant is used to tag traces with the line number in the source file where the test starts. + TestSourceStartLine = "test.source.start" + + // TestSourceEndLine indicates the line of the source file where the test ends. + // This constant is used to tag traces with the line number in the source file where the test ends. + TestSourceEndLine = "test.source.end" + + // TestCodeOwners indicates the test code owners. + // This constant is used to tag traces with the code owners responsible for the test. + TestCodeOwners = "test.codeowners" + + // TestCommand indicates the test command. + // This constant is used to tag traces with the command used to execute the test. + TestCommand = "test.command" + + // TestCommandExitCode indicates the test command exit code. + // This constant is used to tag traces with the exit code of the test command. + TestCommandExitCode = "test.exit_code" + + // TestCommandWorkingDirectory indicates the test command working directory relative to the source root. + // This constant is used to tag traces with the working directory path relative to the source root. + TestCommandWorkingDirectory = "test.working_directory" + + // TestSessionName indicates the test session name + // This constant is used to tag traces with the test session name + TestSessionName = "test_session.name" + + // TestIsNew indicates a new test + // This constant is used to tag test events that are detected as new by early flake detection + TestIsNew = "test.is_new" + + // TestIsRetry indicates a retry execution + // This constant is used to tag test events that are part of a retry execution + TestIsRetry = "test.is_retry" + + // TestRetryReason indicates the reason for retrying the test + TestRetryReason = "test.retry_reason" + + // TestEarlyFlakeDetectionRetryAborted indicates a retry abort reason by the early flake detection feature + TestEarlyFlakeDetectionRetryAborted = "test.early_flake.abort_reason" + + // TestSkippedByITR indicates a test skipped by the ITR feature + TestSkippedByITR = "test.skipped_by_itr" + + // SkippedByITRReason indicates the reason why the test was skipped by the ITR feature + SkippedByITRReason = "Skipped by Datadog Intelligent Test Runner" + + // ITRTestsSkipped indicates that tests were skipped by the ITR feature + ITRTestsSkipped = "_dd.ci.itr.tests_skipped" + + // ITRTestsSkippingEnabled indicates that the ITR test skipping feature is enabled + ITRTestsSkippingEnabled = "test.itr.tests_skipping.enabled" + + // ITRTestsSkippingType indicates the type of ITR test skipping + ITRTestsSkippingType = "test.itr.tests_skipping.type" + + // ITRTestsSkippingCount indicates the number of tests skipped by the ITR feature + ITRTestsSkippingCount = "test.itr.tests_skipping.count" + + // CodeCoverageEnabled indicates that code coverage is enabled + CodeCoverageEnabled = "test.code_coverage.enabled" + + // TestUnskippable indicates that the test is unskippable + TestUnskippable = "test.itr.unskippable" + + // TestForcedToRun indicates that the test is forced to run because is unskippable + TestForcedToRun = "test.itr.forced_run" + + // TestIsQuarantined indicates that the test is quarantined + TestIsQuarantined = "test.test_management.is_quarantined" + + // TestIsDisabled indicates that the test is disabled + TestIsDisabled = "test.test_management.is_test_disabled" + + // TestIsAttempToFix indicates that the test is an attempt to fix + TestIsAttempToFix = "test.test_management.is_attempt_to_fix" + + // TestHasFailedAllRetries indicates that the test has failed all retries + TestHasFailedAllRetries = "test.has_failed_all_retries" + + // TestAttemptToFixPassed indicates that the attempt to fix has passed + TestAttemptToFixPassed = "test.test_management.attempt_to_fix_passed" + + // TestManagementEnabled indicates that the test management feature is enabled + TestManagementEnabled = "test.test_management.enabled" + + // TestIsModified indicates that the test is modified + TestIsModified = "test.is_modified" +) + +// Define valid test status types. +const ( + // TestStatusPass marks test execution as passed. + // This constant is used to tag traces with a status indicating that the test passed. + TestStatusPass = "pass" + + // TestStatusFail marks test execution as failed. + // This constant is used to tag traces with a status indicating that the test failed. + TestStatusFail = "fail" + + // TestStatusSkip marks test execution as skipped. + // This constant is used to tag traces with a status indicating that the test was skipped. + TestStatusSkip = "skip" +) + +// Define valid test types. +const ( + // TestTypeTest defines test type as test. + // This constant is used to tag traces indicating that the type of test is a standard test. + TestTypeTest = "test" + + // TestTypeBenchmark defines test type as benchmark. + // This constant is used to tag traces indicating that the type of test is a benchmark. + TestTypeBenchmark = "benchmark" +) + +// Retry reasons +const ( + // AttemptToFixRetryReason indicates that the test is retried due to an attempt to fix. + AttemptToFixRetryReason = "attempt_to_fix" + + // EarlyFlakeDetectionRetryReason indicates that the test is retried due to early flake detection. + EarlyFlakeDetectionRetryReason = "early_flake_detection" + + // AutoTestRetriesRetryReason indicates that the test is retried due to auto test retries. + AutoTestRetriesRetryReason = "auto_test_retry" + + // ExternalRetryReason indicates that the test is retried due to an external reason. + ExternalRetryReason = "external" +) + +const ( + // TestDisabledSkipReason indicates the skip reason for a test that is disabled. + TestDisabledSkipReason = "Flaky test is disabled by Datadog" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/ci_providers.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/ci_providers.go new file mode 100644 index 00000000..7ad0b6fc --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/ci_providers.go @@ -0,0 +1,690 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "encoding/json" + "fmt" + "os" + "regexp" + "sort" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// providerType defines a function type that returns a map of string key-value pairs. +type providerType = func() map[string]string + +// providers maps environment variable names to their corresponding CI provider extraction functions. +var providers = map[string]providerType{ + "APPVEYOR": extractAppveyor, + "TF_BUILD": extractAzurePipelines, + "BITBUCKET_COMMIT": extractBitbucket, + "BUDDY": extractBuddy, + "BUILDKITE": extractBuildkite, + "CIRCLECI": extractCircleCI, + "GITHUB_SHA": extractGithubActions, + "GITLAB_CI": extractGitlab, + "JENKINS_URL": extractJenkins, + "TEAMCITY_VERSION": extractTeamcity, + "TRAVIS": extractTravis, + "BITRISE_BUILD_SLUG": extractBitrise, + "CF_BUILD_ID": extractCodefresh, + "CODEBUILD_INITIATOR": extractAwsCodePipeline, + "DRONE": extractDrone, +} + +// getEnvVarsJSON returns a JSON representation of the specified environment variables. +func getEnvVarsJSON(envVars ...string) ([]byte, error) { + envVarsMap := make(map[string]string) + for _, envVar := range envVars { + value := env.Get(envVar) + if value != "" { + envVarsMap[envVar] = value + } + } + return json.Marshal(envVarsMap) +} + +// getProviderTags extracts CI information from environment variables. +func getProviderTags() map[string]string { + tags := map[string]string{} + for key, provider := range providers { + if _, ok := env.Lookup(key); !ok { + continue + } + tags = provider() + } + + // replace with user specific tags + replaceWithUserSpecificTags(tags) + + // Normalize tags + normalizeTags(tags) + + // Expand ~ + if tag, ok := tags[constants.CIWorkspacePath]; ok && tag != "" { + tags[constants.CIWorkspacePath] = ExpandPath(tag) + } + + // remove empty values + for tag, value := range tags { + if value == "" { + delete(tags, tag) + } + } + + if log.DebugEnabled() { + if providerName, ok := tags[constants.CIProviderName]; ok { + log.Debug("civisibility: detected ci provider: %s", providerName) + } else { + log.Debug("civisibility: no ci provider was detected.") + } + } + + return tags +} + +// normalizeTags normalizes specific tags to remove prefixes and sensitive information. +func normalizeTags(tags map[string]string) { + if tag, ok := tags[constants.GitBranch]; ok && tag != "" { + if strings.Contains(tag, "refs/tags") || strings.Contains(tag, "origin/tags") || strings.Contains(tag, "refs/heads/tags") { + tags[constants.GitTag] = normalizeRef(tag) + } + tags[constants.GitBranch] = normalizeRef(tag) + } + if tag, ok := tags[constants.GitTag]; ok && tag != "" { + tags[constants.GitTag] = normalizeRef(tag) + } + if tag, ok := tags[constants.GitPrBaseBranch]; ok && tag != "" { + tags[constants.GitPrBaseBranch] = normalizeRef(tag) + } + if tag, ok := tags[constants.GitRepositoryURL]; ok && tag != "" { + tags[constants.GitRepositoryURL] = filterSensitiveInfo(tag) + } + if tag, ok := tags[constants.CIPipelineURL]; ok && tag != "" { + tags[constants.CIPipelineURL] = filterSensitiveInfo(tag) + } + if tag, ok := tags[constants.CIJobURL]; ok && tag != "" { + tags[constants.CIJobURL] = filterSensitiveInfo(tag) + } + if tag, ok := tags[constants.CIEnvVars]; ok && tag != "" { + tags[constants.CIEnvVars] = filterSensitiveInfo(tag) + } +} + +// replaceWithUserSpecificTags replaces certain tags with user-specific environment variable values. +func replaceWithUserSpecificTags(tags map[string]string) { + replace := func(tagName, envName string) { + tags[tagName] = getEnvironmentVariableIfIsNotEmpty(envName, tags[tagName]) + } + + replace(constants.GitBranch, "DD_GIT_BRANCH") + replace(constants.GitTag, "DD_GIT_TAG") + replace(constants.GitRepositoryURL, "DD_GIT_REPOSITORY_URL") + replace(constants.GitCommitSHA, "DD_GIT_COMMIT_SHA") + replace(constants.GitCommitMessage, "DD_GIT_COMMIT_MESSAGE") + replace(constants.GitCommitAuthorName, "DD_GIT_COMMIT_AUTHOR_NAME") + replace(constants.GitCommitAuthorEmail, "DD_GIT_COMMIT_AUTHOR_EMAIL") + replace(constants.GitCommitAuthorDate, "DD_GIT_COMMIT_AUTHOR_DATE") + replace(constants.GitCommitCommitterName, "DD_GIT_COMMIT_COMMITTER_NAME") + replace(constants.GitCommitCommitterEmail, "DD_GIT_COMMIT_COMMITTER_EMAIL") + replace(constants.GitCommitCommitterDate, "DD_GIT_COMMIT_COMMITTER_DATE") + replace(constants.GitPrBaseBranch, "DD_GIT_PULL_REQUEST_BASE_BRANCH") + replace(constants.GitPrBaseCommit, "DD_GIT_PULL_REQUEST_BASE_BRANCH_SHA") +} + +// getEnvironmentVariableIfIsNotEmpty returns the environment variable value if it is not empty, otherwise returns the default value. +func getEnvironmentVariableIfIsNotEmpty(key string, defaultValue string) string { + if value, ok := env.Lookup(key); ok && value != "" { + return value + } + return defaultValue +} + +// normalizeRef normalizes a Git reference name by removing common prefixes. +func normalizeRef(name string) string { + // Define the prefixes to remove + prefixes := []string{"refs/heads/", "refs/", "origin/", "tags/"} + + // Iterate over prefixes and remove them if present + for _, prefix := range prefixes { + if strings.HasPrefix(name, prefix) { + name = strings.TrimPrefix(name, prefix) + } + } + return name +} + +// firstEnv returns the value of the first non-empty environment variable from the provided list. +func firstEnv(keys ...string) string { + for _, key := range keys { + if value, ok := env.Lookup(key); ok { + if value != "" { + return value + } + } + } + return "" +} + +// extractAppveyor extracts CI information specific to Appveyor. +func extractAppveyor() map[string]string { + tags := map[string]string{} + url := fmt.Sprintf("https://ci.appveyor.com/project/%s/builds/%s", env.Get("APPVEYOR_REPO_NAME"), env.Get("APPVEYOR_BUILD_ID")) + tags[constants.CIProviderName] = "appveyor" + if env.Get("APPVEYOR_REPO_PROVIDER") == "github" { + tags[constants.GitRepositoryURL] = fmt.Sprintf("https://github.com/%s.git", env.Get("APPVEYOR_REPO_NAME")) + } else { + tags[constants.GitRepositoryURL] = env.Get("APPVEYOR_REPO_NAME") + } + + tags[constants.GitCommitSHA] = env.Get("APPVEYOR_REPO_COMMIT") + tags[constants.GitBranch] = firstEnv("APPVEYOR_PULL_REQUEST_HEAD_REPO_BRANCH", "APPVEYOR_REPO_BRANCH") + tags[constants.GitTag] = env.Get("APPVEYOR_REPO_TAG_NAME") + + tags[constants.CIWorkspacePath] = env.Get("APPVEYOR_BUILD_FOLDER") + tags[constants.CIPipelineID] = env.Get("APPVEYOR_BUILD_ID") + tags[constants.CIPipelineName] = env.Get("APPVEYOR_REPO_NAME") + tags[constants.CIPipelineNumber] = env.Get("APPVEYOR_BUILD_NUMBER") + tags[constants.CIPipelineURL] = url + tags[constants.CIJobURL] = url + tags[constants.GitCommitMessage] = fmt.Sprintf("%s\n%s", env.Get("APPVEYOR_REPO_COMMIT_MESSAGE"), env.Get("APPVEYOR_REPO_COMMIT_MESSAGE_EXTENDED")) + tags[constants.GitCommitAuthorName] = env.Get("APPVEYOR_REPO_COMMIT_AUTHOR") + tags[constants.GitCommitAuthorEmail] = env.Get("APPVEYOR_REPO_COMMIT_AUTHOR_EMAIL") + + tags[constants.GitPrBaseBranch] = env.Get("APPVEYOR_REPO_BRANCH") + tags[constants.GitHeadCommit] = env.Get("APPVEYOR_PULL_REQUEST_HEAD_COMMIT") + tags[constants.PrNumber] = env.Get("APPVEYOR_PULL_REQUEST_NUMBER") + + return tags +} + +// extractAzurePipelines extracts CI information specific to Azure Pipelines. +func extractAzurePipelines() map[string]string { + tags := map[string]string{} + baseURL := fmt.Sprintf("%s%s/_build/results?buildId=%s", env.Get("SYSTEM_TEAMFOUNDATIONSERVERURI"), env.Get("SYSTEM_TEAMPROJECTID"), env.Get("BUILD_BUILDID")) + pipelineURL := baseURL + jobURL := fmt.Sprintf("%s&view=logs&j=%s&t=%s", baseURL, env.Get("SYSTEM_JOBID"), env.Get("SYSTEM_TASKINSTANCEID")) + branchOrTag := firstEnv("SYSTEM_PULLREQUEST_SOURCEBRANCH", "BUILD_SOURCEBRANCH", "BUILD_SOURCEBRANCHNAME") + branch := "" + tag := "" + if strings.Contains(branchOrTag, "tags/") { + tag = branchOrTag + } else { + branch = branchOrTag + } + tags[constants.CIProviderName] = "azurepipelines" + tags[constants.CIWorkspacePath] = env.Get("BUILD_SOURCESDIRECTORY") + + tags[constants.CIPipelineID] = env.Get("BUILD_BUILDID") + tags[constants.CIPipelineName] = env.Get("BUILD_DEFINITIONNAME") + tags[constants.CIPipelineNumber] = env.Get("BUILD_BUILDID") + tags[constants.CIPipelineURL] = pipelineURL + + tags[constants.CIStageName] = env.Get("SYSTEM_STAGEDISPLAYNAME") + + tags[constants.CIJobID] = env.Get("SYSTEM_JOBID") + tags[constants.CIJobName] = env.Get("SYSTEM_JOBDISPLAYNAME") + tags[constants.CIJobURL] = jobURL + + tags[constants.GitRepositoryURL] = firstEnv("SYSTEM_PULLREQUEST_SOURCEREPOSITORYURI", "BUILD_REPOSITORY_URI") + tags[constants.GitCommitSHA] = firstEnv("SYSTEM_PULLREQUEST_SOURCECOMMITID", "BUILD_SOURCEVERSION") + tags[constants.GitBranch] = branch + tags[constants.GitTag] = tag + tags[constants.GitCommitMessage] = env.Get("BUILD_SOURCEVERSIONMESSAGE") + tags[constants.GitCommitAuthorName] = env.Get("BUILD_REQUESTEDFORID") + tags[constants.GitCommitAuthorEmail] = env.Get("BUILD_REQUESTEDFOREMAIL") + + jsonString, err := getEnvVarsJSON("SYSTEM_TEAMPROJECTID", "BUILD_BUILDID", "SYSTEM_JOBID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + tags[constants.GitPrBaseBranch] = env.Get("SYSTEM_PULLREQUEST_TARGETBRANCH") + tags[constants.PrNumber] = env.Get("SYSTEM_PULLREQUEST_PULLREQUESTNUMBER") + + return tags +} + +// extractBitrise extracts CI information specific to Bitrise. +func extractBitrise() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "bitrise" + tags[constants.GitRepositoryURL] = env.Get("GIT_REPOSITORY_URL") + tags[constants.GitCommitSHA] = firstEnv("BITRISE_GIT_COMMIT", "GIT_CLONE_COMMIT_HASH") + tags[constants.GitBranch] = firstEnv("BITRISEIO_PULL_REQUEST_HEAD_BRANCH", "BITRISE_GIT_BRANCH") + tags[constants.GitTag] = env.Get("BITRISE_GIT_TAG") + tags[constants.CIWorkspacePath] = env.Get("BITRISE_SOURCE_DIR") + tags[constants.CIPipelineID] = env.Get("BITRISE_BUILD_SLUG") + tags[constants.CIPipelineName] = env.Get("BITRISE_TRIGGERED_WORKFLOW_ID") + tags[constants.CIPipelineNumber] = env.Get("BITRISE_BUILD_NUMBER") + tags[constants.CIPipelineURL] = env.Get("BITRISE_BUILD_URL") + tags[constants.GitCommitMessage] = env.Get("BITRISE_GIT_MESSAGE") + + tags[constants.GitPrBaseBranch] = env.Get("BITRISEIO_GIT_BRANCH_DEST") + tags[constants.PrNumber] = env.Get("BITRISE_PULL_REQUEST") + + return tags +} + +// extractBitbucket extracts CI information specific to Bitbucket. +func extractBitbucket() map[string]string { + tags := map[string]string{} + url := fmt.Sprintf("https://bitbucket.org/%s/addon/pipelines/home#!/results/%s", env.Get("BITBUCKET_REPO_FULL_NAME"), env.Get("BITBUCKET_BUILD_NUMBER")) + tags[constants.CIProviderName] = "bitbucket" + tags[constants.GitRepositoryURL] = firstEnv("BITBUCKET_GIT_SSH_ORIGIN", "BITBUCKET_GIT_HTTP_ORIGIN") + tags[constants.GitCommitSHA] = env.Get("BITBUCKET_COMMIT") + tags[constants.GitBranch] = env.Get("BITBUCKET_BRANCH") + tags[constants.GitTag] = env.Get("BITBUCKET_TAG") + tags[constants.CIWorkspacePath] = env.Get("BITBUCKET_CLONE_DIR") + tags[constants.CIPipelineID] = strings.Trim(env.Get("BITBUCKET_PIPELINE_UUID"), "{}") + tags[constants.CIPipelineNumber] = env.Get("BITBUCKET_BUILD_NUMBER") + tags[constants.CIPipelineName] = env.Get("BITBUCKET_REPO_FULL_NAME") + tags[constants.CIPipelineURL] = url + tags[constants.CIJobURL] = url + + tags[constants.GitPrBaseBranch] = env.Get("BITBUCKET_PR_DESTINATION_BRANCH") + tags[constants.PrNumber] = env.Get("BITBUCKET_PR_ID") + + return tags +} + +// extractBuddy extracts CI information specific to Buddy. +func extractBuddy() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "buddy" + tags[constants.CIPipelineID] = fmt.Sprintf("%s/%s", env.Get("BUDDY_PIPELINE_ID"), env.Get("BUDDY_EXECUTION_ID")) + tags[constants.CIPipelineName] = env.Get("BUDDY_PIPELINE_NAME") + tags[constants.CIPipelineNumber] = env.Get("BUDDY_EXECUTION_ID") + tags[constants.CIPipelineURL] = env.Get("BUDDY_EXECUTION_URL") + tags[constants.GitCommitSHA] = env.Get("BUDDY_EXECUTION_REVISION") + tags[constants.GitRepositoryURL] = env.Get("BUDDY_SCM_URL") + tags[constants.GitBranch] = env.Get("BUDDY_EXECUTION_BRANCH") + tags[constants.GitTag] = env.Get("BUDDY_EXECUTION_TAG") + tags[constants.GitCommitMessage] = env.Get("BUDDY_EXECUTION_REVISION_MESSAGE") + tags[constants.GitCommitCommitterName] = env.Get("BUDDY_EXECUTION_REVISION_COMMITTER_NAME") + tags[constants.GitCommitCommitterEmail] = env.Get("BUDDY_EXECUTION_REVISION_COMMITTER_EMAIL") + + tags[constants.GitPrBaseBranch] = env.Get("BUDDY_RUN_PR_BASE_BRANCH") + tags[constants.PrNumber] = env.Get("BUDDY_RUN_PR_NO") + + return tags +} + +// extractBuildkite extracts CI information specific to Buildkite. +func extractBuildkite() map[string]string { + tags := map[string]string{} + tags[constants.GitBranch] = env.Get("BUILDKITE_BRANCH") + tags[constants.GitCommitSHA] = env.Get("BUILDKITE_COMMIT") + tags[constants.GitRepositoryURL] = env.Get("BUILDKITE_REPO") + tags[constants.GitTag] = env.Get("BUILDKITE_TAG") + tags[constants.CIPipelineID] = env.Get("BUILDKITE_BUILD_ID") + tags[constants.CIPipelineName] = env.Get("BUILDKITE_PIPELINE_SLUG") + tags[constants.CIPipelineNumber] = env.Get("BUILDKITE_BUILD_NUMBER") + tags[constants.CIPipelineURL] = env.Get("BUILDKITE_BUILD_URL") + tags[constants.CIJobID] = env.Get("BUILDKITE_JOB_ID") + tags[constants.CIJobURL] = fmt.Sprintf("%s#%s", env.Get("BUILDKITE_BUILD_URL"), env.Get("BUILDKITE_JOB_ID")) + tags[constants.CIProviderName] = "buildkite" + tags[constants.CIWorkspacePath] = env.Get("BUILDKITE_BUILD_CHECKOUT_PATH") + tags[constants.GitCommitMessage] = env.Get("BUILDKITE_MESSAGE") + tags[constants.GitCommitAuthorName] = env.Get("BUILDKITE_BUILD_AUTHOR") + tags[constants.GitCommitAuthorEmail] = env.Get("BUILDKITE_BUILD_AUTHOR_EMAIL") + tags[constants.CINodeName] = env.Get("BUILDKITE_AGENT_ID") + + jsonString, err := getEnvVarsJSON("BUILDKITE_BUILD_ID", "BUILDKITE_JOB_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + var extraTags []string + envVars := os.Environ() + for _, envVar := range envVars { + if strings.HasPrefix(envVar, "BUILDKITE_AGENT_META_DATA_") { + envVarAsTag := envVar + envVarAsTag = strings.TrimPrefix(envVarAsTag, "BUILDKITE_AGENT_META_DATA_") + envVarAsTag = strings.ToLower(envVarAsTag) + envVarAsTag = strings.Replace(envVarAsTag, "=", ":", 1) + extraTags = append(extraTags, envVarAsTag) + } + } + + if len(extraTags) != 0 { + // HACK: Sorting isn't actually needed, but it simplifies testing if the order is consistent + sort.Sort(sort.Reverse(sort.StringSlice(extraTags))) + jsonString, err = json.Marshal(extraTags) + if err == nil { + tags[constants.CINodeLabels] = string(jsonString) + } + } + + tags[constants.GitPrBaseBranch] = env.Get("BUILDKITE_PULL_REQUEST_BASE_BRANCH") + tags[constants.PrNumber] = env.Get("BUILDKITE_PULL_REQUEST") + + return tags +} + +// extractCircleCI extracts CI information specific to CircleCI. +func extractCircleCI() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "circleci" + tags[constants.GitRepositoryURL] = env.Get("CIRCLE_REPOSITORY_URL") + tags[constants.GitCommitSHA] = env.Get("CIRCLE_SHA1") + tags[constants.GitTag] = env.Get("CIRCLE_TAG") + tags[constants.GitBranch] = env.Get("CIRCLE_BRANCH") + tags[constants.CIWorkspacePath] = env.Get("CIRCLE_WORKING_DIRECTORY") + tags[constants.CIPipelineID] = env.Get("CIRCLE_WORKFLOW_ID") + tags[constants.CIPipelineName] = env.Get("CIRCLE_PROJECT_REPONAME") + tags[constants.CIPipelineNumber] = env.Get("CIRCLE_BUILD_NUM") + tags[constants.CIPipelineURL] = fmt.Sprintf("https://app.circleci.com/pipelines/workflows/%s", env.Get("CIRCLE_WORKFLOW_ID")) + tags[constants.CIJobName] = env.Get("CIRCLE_JOB") + tags[constants.CIJobID] = env.Get("CIRCLE_BUILD_NUM") + tags[constants.CIJobURL] = env.Get("CIRCLE_BUILD_URL") + tags[constants.PrNumber] = env.Get("CIRCLE_PR_NUMBER") + + jsonString, err := getEnvVarsJSON("CIRCLE_BUILD_NUM", "CIRCLE_WORKFLOW_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + return tags +} + +// extractGithubActions extracts CI information specific to GitHub Actions. +func extractGithubActions() map[string]string { + tags := map[string]string{} + branchOrTag := firstEnv("GITHUB_HEAD_REF", "GITHUB_REF") + tag := "" + branch := "" + if strings.Contains(branchOrTag, "tags/") { + tag = branchOrTag + } else { + branch = branchOrTag + } + + serverURL := env.Get("GITHUB_SERVER_URL") + if serverURL == "" { + serverURL = "https://github.com" + } + serverURL = strings.TrimSuffix(serverURL, "/") + + rawRepository := fmt.Sprintf("%s/%s", serverURL, env.Get("GITHUB_REPOSITORY")) + pipelineID := env.Get("GITHUB_RUN_ID") + commitSha := env.Get("GITHUB_SHA") + + tags[constants.CIProviderName] = "github" + tags[constants.GitRepositoryURL] = rawRepository + ".git" + tags[constants.GitCommitSHA] = commitSha + tags[constants.GitBranch] = branch + tags[constants.GitTag] = tag + tags[constants.CIWorkspacePath] = env.Get("GITHUB_WORKSPACE") + tags[constants.CIPipelineID] = pipelineID + tags[constants.CIPipelineNumber] = env.Get("GITHUB_RUN_NUMBER") + tags[constants.CIPipelineName] = env.Get("GITHUB_WORKFLOW") + tags[constants.CIJobURL] = fmt.Sprintf("%s/commit/%s/checks", rawRepository, commitSha) + tags[constants.CIJobID] = env.Get("GITHUB_JOB") + tags[constants.CIJobName] = env.Get("GITHUB_JOB") + + attempts := env.Get("GITHUB_RUN_ATTEMPT") + if attempts == "" { + tags[constants.CIPipelineURL] = fmt.Sprintf("%s/actions/runs/%s", rawRepository, pipelineID) + } else { + tags[constants.CIPipelineURL] = fmt.Sprintf("%s/actions/runs/%s/attempts/%s", rawRepository, pipelineID, attempts) + } + + jsonString, err := getEnvVarsJSON("GITHUB_SERVER_URL", "GITHUB_REPOSITORY", "GITHUB_RUN_ID", "GITHUB_RUN_ATTEMPT") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + // Extract PR information from the github event json file + eventFilePath := env.Get("GITHUB_EVENT_PATH") + if stats, ok := os.Stat(eventFilePath); ok == nil && !stats.IsDir() { + if eventFile, err := os.Open(eventFilePath); err == nil { + defer eventFile.Close() + + var eventJSON struct { + Number int `json:"number"` + PullRequest struct { + Base struct { + Sha string `json:"sha"` + Ref string `json:"ref"` + } `json:"base"` + Head struct { + Sha string `json:"sha"` + } `json:"head"` + } `json:"pull_request"` + } + + eventDecoder := json.NewDecoder(eventFile) + if eventDecoder.Decode(&eventJSON) == nil { + tags[constants.GitHeadCommit] = eventJSON.PullRequest.Head.Sha + tags[constants.GitPrBaseHeadCommit] = eventJSON.PullRequest.Base.Sha + tags[constants.GitPrBaseBranch] = eventJSON.PullRequest.Base.Ref + tags[constants.PrNumber] = fmt.Sprintf("%d", eventJSON.Number) + } + } + } + + // Fallback if GitPrBaseBranch is not set + if tmpVal, ok := tags[constants.GitPrBaseBranch]; !ok || tmpVal == "" { + tags[constants.GitPrBaseBranch] = env.Get("GITHUB_BASE_REF") + } + + return tags +} + +// extractGitlab extracts CI information specific to GitLab. +func extractGitlab() map[string]string { + tags := map[string]string{} + url := env.Get("CI_PIPELINE_URL") + + tags[constants.CIProviderName] = "gitlab" + tags[constants.GitRepositoryURL] = env.Get("CI_REPOSITORY_URL") + tags[constants.GitCommitSHA] = env.Get("CI_COMMIT_SHA") + tags[constants.GitBranch] = firstEnv("CI_COMMIT_BRANCH", "CI_COMMIT_REF_NAME") + tags[constants.GitTag] = env.Get("CI_COMMIT_TAG") + tags[constants.CIWorkspacePath] = env.Get("CI_PROJECT_DIR") + tags[constants.CIPipelineID] = env.Get("CI_PIPELINE_ID") + tags[constants.CIPipelineName] = env.Get("CI_PROJECT_PATH") + tags[constants.CIPipelineNumber] = env.Get("CI_PIPELINE_IID") + tags[constants.CIPipelineURL] = url + tags[constants.CIJobURL] = env.Get("CI_JOB_URL") + tags[constants.CIJobID] = env.Get("CI_JOB_ID") + tags[constants.CIJobName] = env.Get("CI_JOB_NAME") + tags[constants.CIStageName] = env.Get("CI_JOB_STAGE") + tags[constants.GitCommitMessage] = env.Get("CI_COMMIT_MESSAGE") + tags[constants.CINodeName] = env.Get("CI_RUNNER_ID") + tags[constants.CINodeLabels] = env.Get("CI_RUNNER_TAGS") + + author := env.Get("CI_COMMIT_AUTHOR") + authorArray := strings.FieldsFunc(author, func(s rune) bool { + return s == '<' || s == '>' + }) + tags[constants.GitCommitAuthorName] = strings.TrimSpace(authorArray[0]) + tags[constants.GitCommitAuthorEmail] = strings.TrimSpace(authorArray[1]) + tags[constants.GitCommitAuthorDate] = env.Get("CI_COMMIT_TIMESTAMP") + + jsonString, err := getEnvVarsJSON("CI_PROJECT_URL", "CI_PIPELINE_ID", "CI_JOB_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + tags[constants.GitHeadCommit] = env.Get("CI_MERGE_REQUEST_SOURCE_BRANCH_SHA") + tags[constants.GitPrBaseHeadCommit] = env.Get("CI_MERGE_REQUEST_TARGET_BRANCH_SHA") + tags[constants.GitPrBaseCommit] = env.Get("CI_MERGE_REQUEST_DIFF_BASE_SHA") + tags[constants.GitPrBaseBranch] = env.Get("CI_MERGE_REQUEST_TARGET_BRANCH_NAME") + tags[constants.PrNumber] = env.Get("CI_MERGE_REQUEST_IID") + + return tags +} + +// extractJenkins extracts CI information specific to Jenkins. +func extractJenkins() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "jenkins" + tags[constants.GitRepositoryURL] = firstEnv("GIT_URL", "GIT_URL_1") + tags[constants.GitCommitSHA] = env.Get("GIT_COMMIT") + + branchOrTag := env.Get("GIT_BRANCH") + empty := []byte("") + name, hasName := env.Lookup("JOB_NAME") + + if strings.Contains(branchOrTag, "tags/") { + tags[constants.GitTag] = branchOrTag + } else { + tags[constants.GitBranch] = branchOrTag + // remove branch for job name + removeBranch := regexp.MustCompile(fmt.Sprintf("/%s", normalizeRef(branchOrTag))) + name = string(removeBranch.ReplaceAll([]byte(name), empty)) + } + + if hasName { + removeVars := regexp.MustCompile("/[^/]+=[^/]*") + name = string(removeVars.ReplaceAll([]byte(name), empty)) + } + + tags[constants.CIWorkspacePath] = env.Get("WORKSPACE") + tags[constants.CIPipelineID] = env.Get("BUILD_TAG") + tags[constants.CIPipelineNumber] = env.Get("BUILD_NUMBER") + tags[constants.CIPipelineName] = name + tags[constants.CIPipelineURL] = env.Get("BUILD_URL") + tags[constants.CINodeName] = env.Get("NODE_NAME") + tags[constants.PrNumber] = env.Get("CHANGE_ID") + tags[constants.GitPrBaseBranch] = env.Get("CHANGE_TARGET") + + jsonString, err := getEnvVarsJSON("DD_CUSTOM_TRACE_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + nodeLabels := env.Get("NODE_LABELS") + if len(nodeLabels) > 0 { + labelsArray := strings.Split(nodeLabels, " ") + jsonString, err := json.Marshal(labelsArray) + if err == nil { + tags[constants.CINodeLabels] = string(jsonString) + } + } + + return tags +} + +// extractTeamcity extracts CI information specific to TeamCity. +func extractTeamcity() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "teamcity" + tags[constants.CIJobURL] = env.Get("BUILD_URL") + tags[constants.CIJobName] = env.Get("TEAMCITY_BUILDCONF_NAME") + + tags[constants.PrNumber] = env.Get("TEAMCITY_PULLREQUEST_NUMBER") + tags[constants.GitPrBaseBranch] = env.Get("TEAMCITY_PULLREQUEST_TARGET_BRANCH") + return tags +} + +// extractCodefresh extracts CI information specific to Codefresh. +func extractCodefresh() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "codefresh" + tags[constants.CIPipelineID] = env.Get("CF_BUILD_ID") + tags[constants.CIPipelineName] = env.Get("CF_PIPELINE_NAME") + tags[constants.CIPipelineURL] = env.Get("CF_BUILD_URL") + tags[constants.CIJobName] = env.Get("CF_STEP_NAME") + + jsonString, err := getEnvVarsJSON("CF_BUILD_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + cfBranch := env.Get("CF_BRANCH") + isTag := strings.Contains(cfBranch, "tags/") + var refKey string + if isTag { + refKey = constants.GitTag + } else { + refKey = constants.GitBranch + } + tags[refKey] = normalizeRef(cfBranch) + + tags[constants.GitPrBaseBranch] = env.Get("CF_PULL_REQUEST_TARGET") + tags[constants.PrNumber] = env.Get("CF_PULL_REQUEST_NUMBER") + + return tags +} + +// extractTravis extracts CI information specific to Travis CI. +func extractTravis() map[string]string { + tags := map[string]string{} + prSlug := env.Get("TRAVIS_PULL_REQUEST_SLUG") + repoSlug := prSlug + if strings.TrimSpace(repoSlug) == "" { + repoSlug = env.Get("TRAVIS_REPO_SLUG") + } + tags[constants.CIProviderName] = "travisci" + tags[constants.GitRepositoryURL] = fmt.Sprintf("https://github.com/%s.git", repoSlug) + tags[constants.GitCommitSHA] = env.Get("TRAVIS_COMMIT") + tags[constants.GitTag] = env.Get("TRAVIS_TAG") + tags[constants.GitBranch] = firstEnv("TRAVIS_PULL_REQUEST_BRANCH", "TRAVIS_BRANCH") + tags[constants.CIWorkspacePath] = env.Get("TRAVIS_BUILD_DIR") + tags[constants.CIPipelineID] = env.Get("TRAVIS_BUILD_ID") + tags[constants.CIPipelineNumber] = env.Get("TRAVIS_BUILD_NUMBER") + tags[constants.CIPipelineName] = repoSlug + tags[constants.CIPipelineURL] = env.Get("TRAVIS_BUILD_WEB_URL") + tags[constants.CIJobURL] = env.Get("TRAVIS_JOB_WEB_URL") + tags[constants.GitCommitMessage] = env.Get("TRAVIS_COMMIT_MESSAGE") + + tags[constants.GitPrBaseBranch] = env.Get("TRAVIS_BRANCH") + tags[constants.GitHeadCommit] = env.Get("TRAVIS_PULL_REQUEST_SHA") + tags[constants.PrNumber] = env.Get("TRAVIS_PULL_REQUEST") + + return tags +} + +// extractAwsCodePipeline extracts CI information specific to AWS CodePipeline. +func extractAwsCodePipeline() map[string]string { + tags := map[string]string{} + + if !strings.HasPrefix(env.Get("CODEBUILD_INITIATOR"), "codepipeline") { + // CODEBUILD_INITIATOR is defined but this is not a codepipeline build + return tags + } + + tags[constants.CIProviderName] = "awscodepipeline" + tags[constants.CIPipelineID] = env.Get("DD_PIPELINE_EXECUTION_ID") + tags[constants.CIJobID] = env.Get("DD_ACTION_EXECUTION_ID") + + jsonString, err := getEnvVarsJSON("CODEBUILD_BUILD_ARN", "DD_ACTION_EXECUTION_ID", "DD_PIPELINE_EXECUTION_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + return tags +} + +// extractDrone extracts CI information specific to Drone CI. +func extractDrone() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "drone" + tags[constants.GitBranch] = env.Get("DRONE_BRANCH") + tags[constants.GitCommitSHA] = env.Get("DRONE_COMMIT_SHA") + tags[constants.GitRepositoryURL] = env.Get("DRONE_GIT_HTTP_URL") + tags[constants.GitTag] = env.Get("DRONE_TAG") + tags[constants.CIPipelineNumber] = env.Get("DRONE_BUILD_NUMBER") + tags[constants.CIPipelineURL] = env.Get("DRONE_BUILD_LINK") + tags[constants.GitCommitMessage] = env.Get("DRONE_COMMIT_MESSAGE") + tags[constants.GitCommitAuthorName] = env.Get("DRONE_COMMIT_AUTHOR_NAME") + tags[constants.GitCommitAuthorEmail] = env.Get("DRONE_COMMIT_AUTHOR_EMAIL") + tags[constants.CIWorkspacePath] = env.Get("DRONE_WORKSPACE") + tags[constants.CIJobName] = env.Get("DRONE_STEP_NAME") + tags[constants.CIStageName] = env.Get("DRONE_STAGE_NAME") + tags[constants.PrNumber] = env.Get("DRONE_PULL_REQUEST") + tags[constants.GitPrBaseBranch] = env.Get("DRONE_TARGET_BRANCH") + + return tags +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/codeowners.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/codeowners.go new file mode 100644 index 00000000..9d606b14 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/codeowners.go @@ -0,0 +1,325 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "bufio" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + + logger "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// This is a port of https://github.com/DataDog/dd-trace-dotnet/blob/v2.53.0/tracer/src/Datadog.Trace/Ci/CodeOwners.cs + +type ( + // CodeOwners represents a structured data type that holds sections of code owners. + // Each section maps to a slice of entries, where each entry includes a pattern and a list of owners. + CodeOwners struct { + Sections []*Section + } + + // Section represents a block of structured data of multiple entries in a single section + Section struct { + Name string + Entries []Entry + } + + // Entry represents a single entry in a CODEOWNERS file. + // It includes the pattern for matching files, the list of owners, and the section to which it belongs. + Entry struct { + Pattern string + Owners []string + Section string + } +) + +var ( + // codeowners holds the parsed CODEOWNERS file data. + codeowners *CodeOwners + codeownersMutex sync.Mutex +) + +// GetCodeOwners retrieves and caches the CODEOWNERS data. +// It looks for the CODEOWNERS file in various standard locations within the CI workspace. +// This function is thread-safe due to the use of a mutex. +// +// Returns: +// +// A pointer to a CodeOwners struct containing the parsed CODEOWNERS data, or nil if not found. +func GetCodeOwners() *CodeOwners { + codeownersMutex.Lock() + defer codeownersMutex.Unlock() + + if codeowners != nil { + return codeowners + } + + tags := GetCITags() + if v, ok := tags[constants.CIWorkspacePath]; ok { + paths := []string{ + filepath.Join(v, "CODEOWNERS"), + filepath.Join(v, ".github", "CODEOWNERS"), + filepath.Join(v, ".gitlab", "CODEOWNERS"), + filepath.Join(v, ".docs", "CODEOWNERS"), + } + for _, path := range paths { + if cow, err := parseCodeOwners(path); err == nil { + codeowners = cow + return codeowners + } + } + } + + // If the codeowners file is not found, let's try a last resort by looking in the current directory (for standalone test binaries) + for _, path := range []string{"CODEOWNERS", filepath.Join(filepath.Dir(os.Args[0]), "CODEOWNERS")} { + if cow, err := parseCodeOwners(path); err == nil { + codeowners = cow + return codeowners + } + } + + return nil +} + +// parseCodeOwners reads and parses the CODEOWNERS file located at the given filePath. +func parseCodeOwners(filePath string) (*CodeOwners, error) { + if _, err := os.Stat(filePath); err != nil { + return nil, err + } + cow, err := NewCodeOwners(filePath) + if err == nil { + if logger.DebugEnabled() { + logger.Debug("civisibility: codeowner file '%s' was loaded successfully.", filePath) + } + return cow, nil + } + logger.Debug("Error parsing codeowners: %s", err.Error()) + return nil, err +} + +// NewCodeOwners creates a new instance of CodeOwners by parsing a CODEOWNERS file located at the given filePath. +// It returns an error if the file cannot be read or parsed properly. +func NewCodeOwners(filePath string) (*CodeOwners, error) { + if filePath == "" { + return nil, fmt.Errorf("filePath cannot be empty") + } + + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer func() { + err = file.Close() + if err != nil && !errors.Is(os.ErrClosed, err) { + logger.Warn("Error closing codeowners file: %s", err.Error()) + } + }() + + var entriesList []Entry + var sectionsList []string + var currentSectionName string + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + + // Identify section headers, which are lines enclosed in square brackets + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSectionName = line[1 : len(line)-1] + foundSectionName := findSectionIgnoreCase(sectionsList, currentSectionName) + if foundSectionName == "" { + sectionsList = append(sectionsList, currentSectionName) + } else { + currentSectionName = foundSectionName + } + continue + } + + finalLine := line + var ownersList []string + terms := strings.Fields(line) + for _, term := range terms { + if len(term) == 0 { + continue + } + + // Identify owners by their prefixes (either @ for usernames or containing @ for emails) + if term[0] == '@' || strings.Contains(term, "@") { + ownersList = append(ownersList, term) + pos := strings.Index(finalLine, term) + if pos > 0 { + finalLine = finalLine[:pos] + finalLine[pos+len(term):] + } + } + } + + finalLine = strings.TrimSpace(finalLine) + if len(finalLine) == 0 { + continue + } + + entriesList = append(entriesList, Entry{Pattern: finalLine, Owners: ownersList, Section: currentSectionName}) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // Reverse the entries list to maintain the order of appearance in the file + for i, j := 0, len(entriesList)-1; i < j; i, j = i+1, j-1 { + entriesList[i], entriesList[j] = entriesList[j], entriesList[i] + } + + codeOwners := &CodeOwners{} + for _, entry := range entriesList { + var section *Section + for _, val := range codeOwners.Sections { + if val.Name == entry.Section { + section = val + break + } + } + + if section == nil { + section = &Section{Name: entry.Section, Entries: []Entry{}} + codeOwners.Sections = append(codeOwners.Sections, section) + } + + section.Entries = append(section.Entries, entry) + } + + return codeOwners, nil +} + +// findSectionIgnoreCase searches for a section name in a case-insensitive manner. +// It returns the section name if found, otherwise returns an empty string. +func findSectionIgnoreCase(sections []string, section string) string { + sectionLower := strings.ToLower(section) + for _, s := range sections { + if strings.ToLower(s) == sectionLower { + return s + } + } + return "" +} + +// GetSection gets the first Section entry in the CodeOwners that matches the section name. +// It returns a pointer to the matched entry, or nil if no match is found +func (co *CodeOwners) GetSection(section string) *Section { + for _, value := range co.Sections { + if value.Name == section { + return value + } + } + + return nil +} + +// Match finds the first entry in the CodeOwners that matches the given value. +// It returns a pointer to the matched entry, or nil if no match is found. +func (co *CodeOwners) Match(value string) (*Entry, bool) { + var matchedEntries []Entry + + for _, section := range co.Sections { + for _, entry := range section.Entries { + pattern := entry.Pattern + finalPattern := pattern + + var includeAnythingBefore, includeAnythingAfter bool + + if strings.HasPrefix(pattern, "/") { + includeAnythingBefore = false + } else { + if strings.HasPrefix(finalPattern, "*") { + finalPattern = finalPattern[1:] + } + includeAnythingBefore = true + } + + if strings.HasSuffix(pattern, "/") { + includeAnythingAfter = true + } else if strings.HasSuffix(pattern, "/*") { + includeAnythingAfter = true + finalPattern = finalPattern[:len(finalPattern)-1] + } else { + includeAnythingAfter = false + } + + if includeAnythingAfter { + found := includeAnythingBefore && strings.Contains(value, finalPattern) || strings.HasPrefix(value, finalPattern) + if !found { + continue + } + + if !strings.HasSuffix(pattern, "/*") { + matchedEntries = append(matchedEntries, entry) + break + } + + patternEnd := strings.Index(value, finalPattern) + if patternEnd != -1 { + patternEnd += len(finalPattern) + remainingString := value[patternEnd:] + if strings.Index(remainingString, "/") == -1 { + matchedEntries = append(matchedEntries, entry) + break + } + } + } else { + if includeAnythingBefore { + if strings.HasSuffix(value, finalPattern) { + matchedEntries = append(matchedEntries, entry) + break + } + } else if value == finalPattern { + matchedEntries = append(matchedEntries, entry) + break + } + } + } + } + + switch len(matchedEntries) { + case 0: + return nil, false + case 1: + return &matchedEntries[0], true + default: + patterns := make([]string, 0) + owners := make([]string, 0) + sections := make([]string, 0) + for _, entry := range matchedEntries { + patterns = append(patterns, entry.Pattern) + owners = append(owners, entry.Owners...) + sections = append(sections, entry.Section) + } + return &Entry{ + Pattern: strings.Join(patterns, " | "), + Owners: owners, + Section: strings.Join(sections, " | "), + }, true + } +} + +// GetOwnersString returns a formatted string of the owners list in an Entry. +// It returns an empty string if there are no owners. +func (e *Entry) GetOwnersString() string { + if e.Owners == nil || len(e.Owners) == 0 { + return "" + } + + return "[\"" + strings.Join(e.Owners, "\",\"") + "\"]" +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/environmentTags.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/environmentTags.go new file mode 100644 index 00000000..2854b67b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/environmentTags.go @@ -0,0 +1,346 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "fmt" + "maps" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/osinfo" +) + +var ( + // ciTags holds the CI/CD environment variable information. + currentCiTags map[string]string // currentCiTags holds the CI/CD tags after originalCiTags + addedTags + originalCiTags map[string]string // originalCiTags holds the original CI/CD tags after all the CMDs + addedTags map[string]string // addedTags holds the tags added by the user + ciTagsMutex sync.Mutex + + // ciMetrics holds the CI/CD environment numeric variable information + currentCiMetrics map[string]float64 // currentCiMetrics holds the CI/CD metrics after originalCiMetrics + addedMetrics + originalCiMetrics map[string]float64 // originalCiMetrics holds the original CI/CD metrics after all the CMDs + addedMetrics map[string]float64 // addedMetrics holds the metrics added by the user + ciMetricsMutex sync.Mutex +) + +// GetCITags retrieves and caches the CI/CD tags from environment variables. +// It initializes the ciTags map if it is not already initialized. +// This function is thread-safe due to the use of a mutex. +// +// Returns: +// +// A map[string]string containing the CI/CD tags. +func GetCITags() map[string]string { + ciTagsMutex.Lock() + defer ciTagsMutex.Unlock() + + // Return the current tags if they are already initialized + if currentCiTags != nil { + return currentCiTags + } + + if originalCiTags == nil { + // If the original tags are not initialized, create them + originalCiTags = createCITagsMap() + } + + // Create a new map with the added tags + newTags := maps.Clone(originalCiTags) + for k, v := range addedTags { + newTags[k] = v + } + + // Update the current tags + currentCiTags = newTags + return currentCiTags +} + +// AddCITags adds a new tag to the CI/CD tags map. +func AddCITags(tagName, tagValue string) { + ciTagsMutex.Lock() + defer ciTagsMutex.Unlock() + + // Add the tag to the added tags dictionary + if addedTags == nil { + addedTags = make(map[string]string) + } + addedTags[tagName] = tagValue + + // Reset the current tags + currentCiTags = nil +} + +// AddCITagsMap adds a new map of tags to the CI/CD tags map. +func AddCITagsMap(tags map[string]string) { + if tags == nil { + return + } + + ciTagsMutex.Lock() + defer ciTagsMutex.Unlock() + + // Add the tag to the added tags dictionary + if addedTags == nil { + addedTags = make(map[string]string) + } + for k, v := range tags { + addedTags[k] = v + } + + // Reset the current tags + currentCiTags = nil +} + +// ResetCITags resets the CI/CD tags to their original values. +func ResetCITags() { + ciTagsMutex.Lock() + defer ciTagsMutex.Unlock() + + originalCiTags = nil + currentCiTags = nil + addedTags = nil +} + +// GetCIMetrics retrieves and caches the CI/CD metrics from environment variables. +// It initializes the ciMetrics map if it is not already initialized. +// This function is thread-safe due to the use of a mutex. +// +// Returns: +// +// A map[string]float64 containing the CI/CD metrics. +func GetCIMetrics() map[string]float64 { + ciMetricsMutex.Lock() + defer ciMetricsMutex.Unlock() + + // Return the current metrics if they are already initialized + if currentCiMetrics != nil { + return currentCiMetrics + } + + if originalCiMetrics == nil { + // If the original metrics are not initialized, create them + originalCiMetrics = createCIMetricsMap() + } + + // Create a new map with the added metrics + newMetrics := maps.Clone(originalCiMetrics) + for k, v := range addedMetrics { + newMetrics[k] = v + } + + // Update the current metrics + currentCiMetrics = newMetrics + return currentCiMetrics +} + +// AddCIMetrics adds a new metric to the CI/CD metrics map. +func AddCIMetrics(metricName string, metricValue float64) { + ciMetricsMutex.Lock() + defer ciMetricsMutex.Unlock() + + // Add the metric to the added metrics dictionary + if addedMetrics == nil { + addedMetrics = make(map[string]float64) + } + addedMetrics[metricName] = metricValue + + // Reset the current metrics + currentCiMetrics = nil +} + +// AddCIMetricsMap adds a new map of metrics to the CI/CD metrics map. +func AddCIMetricsMap(metrics map[string]float64) { + if metrics == nil { + return + } + + ciMetricsMutex.Lock() + defer ciMetricsMutex.Unlock() + + // Add the metric to the added metrics dictionary + if addedMetrics == nil { + addedMetrics = make(map[string]float64) + } + for k, v := range metrics { + addedMetrics[k] = v + } + + // Reset the current metrics + currentCiMetrics = nil +} + +// ResetCIMetrics resets the CI/CD metrics to their original values. +func ResetCIMetrics() { + ciMetricsMutex.Lock() + defer ciMetricsMutex.Unlock() + + originalCiMetrics = nil + currentCiMetrics = nil + addedMetrics = nil +} + +// GetRelativePathFromCITagsSourceRoot calculates the relative path from the CI workspace root to the specified path. +// If the CI workspace root is not available in the tags, it returns the original path. +// +// Parameters: +// +// path - The absolute or relative file path for which the relative path should be calculated. +// +// Returns: +// +// The relative path from the CI workspace root to the specified path, or the original path if an error occurs. +func GetRelativePathFromCITagsSourceRoot(path string) string { + tags := GetCITags() + if v, ok := tags[constants.CIWorkspacePath]; ok { + relPath, err := filepath.Rel(v, path) + if err == nil { + return filepath.ToSlash(relPath) + } + } + + return path +} + +// createCITagsMap creates a map of CI/CD tags by extracting information from environment variables and the local Git repository. +// It also adds OS and runtime information to the tags. +// +// Returns: +// +// A map[string]string containing the extracted CI/CD tags. +func createCITagsMap() map[string]string { + localTags := getProviderTags() + + // Populate runtime values + localTags[constants.OSPlatform] = runtime.GOOS + localTags[constants.OSVersion] = osinfo.OSVersion() + localTags[constants.OSArchitecture] = runtime.GOARCH + localTags[constants.RuntimeName] = runtime.Compiler + localTags[constants.RuntimeVersion] = runtime.Version() + log.Debug("civisibility: os platform: %s", runtime.GOOS) + log.Debug("civisibility: os architecture: %s", runtime.GOARCH) + log.Debug("civisibility: runtime version: %s", runtime.Version()) + + // Get command line test command + var cmd string + if len(os.Args) == 1 { + cmd = filepath.Base(os.Args[0]) + } else { + cmd = fmt.Sprintf("%s %s ", filepath.Base(os.Args[0]), strings.Join(os.Args[1:], " ")) + } + + // Filter out some parameters to make the command more stable. + cmd = regexp.MustCompile(`(?si)-test.gocoverdir=(.*)\s`).ReplaceAllString(cmd, "") + cmd = regexp.MustCompile(`(?si)-test.v=(.*)\s`).ReplaceAllString(cmd, "") + cmd = regexp.MustCompile(`(?si)-test.testlogfile=(.*)\s`).ReplaceAllString(cmd, "") + cmd = strings.TrimSpace(cmd) + localTags[constants.TestCommand] = cmd + log.Debug("civisibility: test command: %s", cmd) + + // Populate the test session name + if testSessionName, ok := env.Lookup(constants.CIVisibilityTestSessionNameEnvironmentVariable); ok { + localTags[constants.TestSessionName] = testSessionName + } else if jobName, ok := localTags[constants.CIJobName]; ok { + localTags[constants.TestSessionName] = fmt.Sprintf("%s-%s", jobName, cmd) + } else { + localTags[constants.TestSessionName] = cmd + } + log.Debug("civisibility: test session name: %s", localTags[constants.TestSessionName]) + + // Check if the user provided the test service + if ddService := env.Get("DD_SERVICE"); ddService != "" { + localTags[constants.UserProvidedTestServiceTag] = "true" + } else { + localTags[constants.UserProvidedTestServiceTag] = "false" + } + + // Populate missing git data + gitData, _ := getLocalGitData() + + // Populate Git metadata from the local Git repository if not already present in localTags + if _, ok := localTags[constants.CIWorkspacePath]; !ok { + localTags[constants.CIWorkspacePath] = gitData.SourceRoot + } + if _, ok := localTags[constants.GitRepositoryURL]; !ok { + localTags[constants.GitRepositoryURL] = gitData.RepositoryURL + } + if _, ok := localTags[constants.GitCommitSHA]; !ok { + localTags[constants.GitCommitSHA] = gitData.CommitSha + } + if _, ok := localTags[constants.GitBranch]; !ok { + localTags[constants.GitBranch] = gitData.Branch + } + + // If the commit SHA matches, populate additional Git metadata + if localTags[constants.GitCommitSHA] == gitData.CommitSha { + if _, ok := localTags[constants.GitCommitAuthorDate]; !ok { + localTags[constants.GitCommitAuthorDate] = gitData.AuthorDate.String() + } + if _, ok := localTags[constants.GitCommitAuthorName]; !ok { + localTags[constants.GitCommitAuthorName] = gitData.AuthorName + } + if _, ok := localTags[constants.GitCommitAuthorEmail]; !ok { + localTags[constants.GitCommitAuthorEmail] = gitData.AuthorEmail + } + if _, ok := localTags[constants.GitCommitCommitterDate]; !ok { + localTags[constants.GitCommitCommitterDate] = gitData.CommitterDate.String() + } + if _, ok := localTags[constants.GitCommitCommitterName]; !ok { + localTags[constants.GitCommitCommitterName] = gitData.CommitterName + } + if _, ok := localTags[constants.GitCommitCommitterEmail]; !ok { + localTags[constants.GitCommitCommitterEmail] = gitData.CommitterEmail + } + if _, ok := localTags[constants.GitCommitMessage]; !ok { + localTags[constants.GitCommitMessage] = gitData.CommitMessage + } + } + + // If the head commit SHA is available, populate additional Git head metadata + if headCommitSha, ok := localTags[constants.GitHeadCommit]; ok { + if headCommitData, err := fetchCommitData(headCommitSha); err != nil { + log.Warn("civisibility: failed to fetch head commit data: %s", err.Error()) + } else if headCommitSha == headCommitData.CommitSha { + localTags[constants.GitHeadAuthorDate] = headCommitData.AuthorDate.String() + localTags[constants.GitHeadAuthorName] = headCommitData.AuthorName + localTags[constants.GitHeadAuthorEmail] = headCommitData.AuthorEmail + localTags[constants.GitHeadCommitterDate] = headCommitData.CommitterDate.String() + localTags[constants.GitHeadCommitterName] = headCommitData.CommitterName + localTags[constants.GitHeadCommitterEmail] = headCommitData.CommitterEmail + localTags[constants.GitHeadMessage] = headCommitData.CommitMessage + } else { + log.Warn("civisibility: head commit SHA %s does not match the fetched commit SHA %s", headCommitSha, headCommitData.CommitSha) + } + } + + // Apply environmental data if is available + applyEnvironmentalDataIfRequired(localTags) + + log.Debug("civisibility: workspace directory: %s", localTags[constants.CIWorkspacePath]) + log.Debug("civisibility: common tags created with %d items", len(localTags)) + return localTags +} + +// createCIMetricsMap creates a map of CI/CD tags by extracting information from environment variables and runtime information. +// +// Returns: +// +// A map[string]float64 containing the metrics extracted +func createCIMetricsMap() map[string]float64 { + localMetrics := make(map[string]float64) + localMetrics[constants.LogicalCPUCores] = float64(runtime.NumCPU()) + + log.Debug("civisibility: common metrics created with %d items", len(localMetrics)) + return localMetrics +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/file_environmental_data.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/file_environmental_data.go new file mode 100644 index 00000000..8274889d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/file_environmental_data.go @@ -0,0 +1,276 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package utils + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + _ "unsafe" // for go:linkname + + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/env" + logger "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +type ( + /* + { + "ci.workspace_path": "ci.workspace_path", + "git.repository_url": "git.repository_url", + "git.commit.sha": "git.commit.sha", + "git.branch": "user-supplied-branch", + "git.tag": "user-supplied-tag", + "git.commit.author.date": "usersupplied-authordate", + "git.commit.author.name": "usersupplied-authorname", + "git.commit.author.email": "usersupplied-authoremail", + "git.commit.committer.date": "usersupplied-comitterdate", + "git.commit.committer.name": "usersupplied-comittername", + "git.commit.committer.email": "usersupplied-comitteremail", + "git.commit.message": "usersupplied-message", + "ci.provider.name": "", + "ci.pipeline.id": "", + "ci.pipeline.url": "", + "ci.pipeline.name": "", + "ci.pipeline.number": "", + "ci.stage.name": "", + "ci.job.name": "", + "ci.job.url": "", + "ci.node.name": "", + "ci.node.labels": "", + "_dd.ci.env_vars": "" + } + */ + + // fileEnvironmentalData represents the environmental data for the complete test session. + fileEnvironmentalData struct { + WorkspacePath string `json:"ci.workspace_path,omitempty"` + RepositoryURL string `json:"git.repository_url,omitempty"` + CommitSHA string `json:"git.commit.sha,omitempty"` + Branch string `json:"git.branch,omitempty"` + Tag string `json:"git.tag,omitempty"` + CommitAuthorDate string `json:"git.commit.author.date,omitempty"` + CommitAuthorName string `json:"git.commit.author.name,omitempty"` + CommitAuthorEmail string `json:"git.commit.author.email,omitempty"` + CommitCommitterDate string `json:"git.commit.committer.date,omitempty"` + CommitCommitterName string `json:"git.commit.committer.name,omitempty"` + CommitCommitterEmail string `json:"git.commit.committer.email,omitempty"` + CommitMessage string `json:"git.commit.message,omitempty"` + CIProviderName string `json:"ci.provider.name,omitempty"` + CIPipelineID string `json:"ci.pipeline.id,omitempty"` + CIPipelineURL string `json:"ci.pipeline.url,omitempty"` + CIPipelineName string `json:"ci.pipeline.name,omitempty"` + CIPipelineNumber string `json:"ci.pipeline.number,omitempty"` + CIStageName string `json:"ci.stage.name,omitempty"` + CIJobName string `json:"ci.job.name,omitempty"` + CIJobURL string `json:"ci.job.url,omitempty"` + CINodeName string `json:"ci.node.name,omitempty"` + CINodeLabels string `json:"ci.node.labels,omitempty"` + DDCIEnvVars string `json:"_dd.ci.env_vars,omitempty"` + } +) + +// getEnvironmentalData reads the environmental data from the file. +// +//go:linkname getEnvironmentalData +func getEnvironmentalData() *fileEnvironmentalData { + envDataFileName := getEnvDataFileName() + if _, err := os.Stat(envDataFileName); os.IsNotExist(err) { + logger.Debug("civisibility: reading environmental data from %s not found.", envDataFileName) + return nil + } + file, err := os.Open(envDataFileName) + if err != nil { + logger.Error("civisibility: error reading environmental data from %s: %v", envDataFileName, err.Error()) + return nil + } + defer file.Close() + var envData fileEnvironmentalData + if err := json.NewDecoder(file).Decode(&envData); err != nil { + logger.Error("civisibility: error decoding environmental data from %s: %v", envDataFileName, err.Error()) + return nil + } + logger.Debug("civisibility: loaded environmental data from %s", envDataFileName) + return &envData +} + +// getEnvDataFileName returns the environmental data file name. +// +//go:linkname getEnvDataFileName +func getEnvDataFileName() string { + envDataFileName := strings.TrimSpace(env.Get(constants.CIVisibilityEnvironmentDataFilePath)) + if envDataFileName != "" { + return envDataFileName + } + cmd := filepath.Base(os.Args[0]) + cmdWithoutExt := strings.TrimSuffix(cmd, filepath.Ext(cmd)) + folder := filepath.Dir(os.Args[0]) + return filepath.Join(folder, cmdWithoutExt+".env.json") +} + +// applyEnvironmentalDataIfRequired applies the environmental data to the given tags if required. +// +//go:linkname applyEnvironmentalDataIfRequired +func applyEnvironmentalDataIfRequired(tags map[string]string) { + if tags == nil { + return + } + envData := getEnvironmentalData() + if envData == nil { + logger.Debug("civisibility: no environmental data found") + return + } + + logger.Debug("civisibility: applying environmental data") + + if envData.WorkspacePath != "" && tags[constants.CIWorkspacePath] == "" { + tags[constants.CIWorkspacePath] = envData.WorkspacePath + } + + if envData.RepositoryURL != "" && tags[constants.GitRepositoryURL] == "" { + tags[constants.GitRepositoryURL] = envData.RepositoryURL + } + + if envData.CommitSHA != "" && tags[constants.GitCommitSHA] == "" { + tags[constants.GitCommitSHA] = envData.CommitSHA + } + + if envData.Branch != "" && tags[constants.GitBranch] == "" { + tags[constants.GitBranch] = envData.Branch + } + + if envData.Tag != "" && tags[constants.GitTag] == "" { + tags[constants.GitTag] = envData.Tag + } + + if envData.CommitAuthorDate != "" && tags[constants.GitCommitAuthorDate] == "" { + tags[constants.GitCommitAuthorDate] = envData.CommitAuthorDate + } + + if envData.CommitAuthorName != "" && tags[constants.GitCommitAuthorName] == "" { + tags[constants.GitCommitAuthorName] = envData.CommitAuthorName + } + + if envData.CommitAuthorEmail != "" && tags[constants.GitCommitAuthorEmail] == "" { + tags[constants.GitCommitAuthorEmail] = envData.CommitAuthorEmail + } + + if envData.CommitCommitterDate != "" && tags[constants.GitCommitCommitterDate] == "" { + tags[constants.GitCommitCommitterDate] = envData.CommitCommitterDate + } + + if envData.CommitCommitterName != "" && tags[constants.GitCommitCommitterName] == "" { + tags[constants.GitCommitCommitterName] = envData.CommitCommitterName + } + + if envData.CommitCommitterEmail != "" && tags[constants.GitCommitCommitterEmail] == "" { + tags[constants.GitCommitCommitterEmail] = envData.CommitCommitterEmail + } + + if envData.CommitMessage != "" && tags[constants.GitCommitMessage] == "" { + tags[constants.GitCommitMessage] = envData.CommitMessage + } + + if envData.CIProviderName != "" && tags[constants.CIProviderName] == "" { + tags[constants.CIProviderName] = envData.CIProviderName + } + + if envData.CIPipelineID != "" && tags[constants.CIPipelineID] == "" { + tags[constants.CIPipelineID] = envData.CIPipelineID + } + + if envData.CIPipelineURL != "" && tags[constants.CIPipelineURL] == "" { + tags[constants.CIPipelineURL] = envData.CIPipelineURL + } + + if envData.CIPipelineName != "" && tags[constants.CIPipelineName] == "" { + tags[constants.CIPipelineName] = envData.CIPipelineName + } + + if envData.CIPipelineNumber != "" && tags[constants.CIPipelineNumber] == "" { + tags[constants.CIPipelineNumber] = envData.CIPipelineNumber + } + + if envData.CIStageName != "" && tags[constants.CIStageName] == "" { + tags[constants.CIStageName] = envData.CIStageName + } + + if envData.CIJobName != "" && tags[constants.CIJobName] == "" { + tags[constants.CIJobName] = envData.CIJobName + } + + if envData.CIJobURL != "" && tags[constants.CIJobURL] == "" { + tags[constants.CIJobURL] = envData.CIJobURL + } + + if envData.CINodeName != "" && tags[constants.CINodeName] == "" { + tags[constants.CINodeName] = envData.CINodeName + } + + if envData.CINodeLabels != "" && tags[constants.CINodeLabels] == "" { + tags[constants.CINodeLabels] = envData.CINodeLabels + } + + if envData.DDCIEnvVars != "" && tags[constants.CIEnvVars] == "" { + tags[constants.CIEnvVars] = envData.DDCIEnvVars + } +} + +// createEnvironmentalDataFromTags creates a fileEnvironmentalData object from the given tags. +// +//go:linkname createEnvironmentalDataFromTags +func createEnvironmentalDataFromTags(tags map[string]string) *fileEnvironmentalData { + if tags == nil { + return nil + } + + return &fileEnvironmentalData{ + WorkspacePath: tags[constants.CIWorkspacePath], + RepositoryURL: tags[constants.GitRepositoryURL], + CommitSHA: tags[constants.GitCommitSHA], + Branch: tags[constants.GitBranch], + Tag: tags[constants.GitTag], + CommitAuthorDate: tags[constants.GitCommitAuthorDate], + CommitAuthorName: tags[constants.GitCommitAuthorName], + CommitAuthorEmail: tags[constants.GitCommitAuthorEmail], + CommitCommitterDate: tags[constants.GitCommitCommitterDate], + CommitCommitterName: tags[constants.GitCommitCommitterName], + CommitCommitterEmail: tags[constants.GitCommitCommitterEmail], + CommitMessage: tags[constants.GitCommitMessage], + CIProviderName: tags[constants.CIProviderName], + CIPipelineID: tags[constants.CIPipelineID], + CIPipelineURL: tags[constants.CIPipelineURL], + CIPipelineName: tags[constants.CIPipelineName], + CIPipelineNumber: tags[constants.CIPipelineNumber], + CIStageName: tags[constants.CIStageName], + CIJobName: tags[constants.CIJobName], + CIJobURL: tags[constants.CIJobURL], + CINodeName: tags[constants.CINodeName], + CINodeLabels: tags[constants.CINodeLabels], + DDCIEnvVars: tags[constants.CIEnvVars], + } +} + +// writeEnvironmentalDataToFile writes the environmental data to a file. +// +//go:linkname writeEnvironmentalDataToFile +func writeEnvironmentalDataToFile(filePath string, tags map[string]string) error { + envData := createEnvironmentalDataFromTags(tags) + if envData == nil { + return nil + } + + file, err := os.Create(filePath) + if err != nil { + return err + } + defer file.Close() + + encoder := json.NewEncoder(file) + encoder.SetIndent("", " ") + return encoder.Encode(envData) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/git.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/git.go new file mode 100644 index 00000000..131fe4e8 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/git.go @@ -0,0 +1,1010 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// MaxPackFileSizeInMb is the maximum size of a pack file in megabytes. +const MaxPackFileSizeInMb = 3 + +// localCommitData holds information about a single commit in the local Git repository. +type localCommitData struct { + CommitSha string + AuthorDate time.Time + AuthorName string + AuthorEmail string + CommitterDate time.Time + CommitterName string + CommitterEmail string + CommitMessage string +} + +// localGitData holds various pieces of information about the local Git repository, +// including the source root, repository URL, branch, commit SHA, author and committer details, and commit message. +type localGitData struct { + localCommitData + SourceRoot string + RepositoryURL string + Branch string +} + +// gitVersionData holds the major, minor, and patch version numbers of the Git executable. +type gitVersionData struct { + major int + minor int + patch int + err error +} + +var ( + // gitCommandMutex is a mutex used to synchronize access to Git commands to prevent lock errors in git + gitCommandMutex sync.Mutex + + // regexpSensitiveInfo is a regular expression used to match and filter out sensitive information from URLs. + regexpSensitiveInfo = regexp.MustCompile("(https?://|ssh?://)[^/]*@") + + // Constants for base branch detection algorithm + possibleBaseBranches = []string{"main", "master", "preprod", "prod", "dev", "development", "trunk"} + + // BASE_LIKE_BRANCH_FILTER - regex to check if the branch name is similar to a possible base branch + baseLikeBranchFilter = regexp.MustCompile(`^(main|master|preprod|prod|dev|development|trunk|release\/.*|hotfix\/.*)$`) + + // Cached data + + // isGitFoundValue is a boolean flag indicating whether the Git executable is available on the system. + isGitFoundValue bool + + // gitFinder is a sync.Once instance used to ensure that the Git executable is only checked once. + gitFinderOnce sync.Once + + // gitVersion is a sync.Once instance used to ensure that the Git version is only retrieved once. + gitVersionOnce sync.Once + + // gitVersionValue holds the version of the Git executable installed on the system. + gitVersionValue gitVersionData + + // isAShallowCloneRepositoryOnce is a sync.Once instance used to ensure that the check for a shallow clone repository is only performed once. + isAShallowCloneRepositoryOnce atomic.Pointer[sync.Once] + + // isAShallowCloneRepositoryValue is a boolean flag indicating whether the repository is a shallow clone. + isAShallowCloneRepositoryValue bool +) + +// branchMetrics holds metrics for evaluating base branch candidates +type branchMetrics struct { + behind int + ahead int + baseSha string +} + +// isGitFound checks if the Git executable is available on the system. +func isGitFound() bool { + gitFinderOnce.Do(func() { + _, err := exec.LookPath("git") + isGitFoundValue = err == nil + if err != nil { + log.Debug("civisibility.git: git executable not found") + } + }) + return isGitFoundValue +} + +// execGit executes a Git command with the given arguments. +func execGit(commandType telemetry.CommandType, args ...string) (val []byte, err error) { + startTime := time.Now() + if commandType != telemetry.NotSpecifiedCommandsType { + telemetry.GitCommand(commandType) + defer func() { + telemetry.GitCommandMs(commandType, float64(time.Since(startTime).Milliseconds())) + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + switch exitErr.ExitCode() { + case -1: + telemetry.GitCommandErrors(commandType, telemetry.ECMinus1CommandExitCode) + case 1: + telemetry.GitCommandErrors(commandType, telemetry.EC1CommandExitCode) + case 2: + telemetry.GitCommandErrors(commandType, telemetry.EC2CommandExitCode) + case 127: + telemetry.GitCommandErrors(commandType, telemetry.EC127CommandExitCode) + case 128: + telemetry.GitCommandErrors(commandType, telemetry.EC128CommandExitCode) + case 129: + telemetry.GitCommandErrors(commandType, telemetry.EC129CommandExitCode) + default: + telemetry.GitCommandErrors(commandType, telemetry.UnknownCommandExitCode) + } + } else if err != nil { + telemetry.GitCommandErrors(commandType, telemetry.MissingCommandExitCode) + } + }() + } + if log.DebugEnabled() { + defer func() { + durationInMs := time.Since(startTime).Milliseconds() + if err != nil { + log.Debug("civisibility.git.command [%s][%s][%dms]: git %s", commandType, err.Error(), durationInMs, strings.Join(args, " ")) + } else { + log.Debug("civisibility.git.command [%s][%dms]: git %s", commandType, durationInMs, strings.Join(args, " ")) + } + }() + } + if !isGitFound() { + return nil, errors.New("git executable not found") + } + gitCommandMutex.Lock() + defer gitCommandMutex.Unlock() + return exec.Command("git", args...).CombinedOutput() +} + +// execGitString executes a Git command with the given arguments and returns the output as a string. +func execGitString(commandType telemetry.CommandType, args ...string) (string, error) { + out, err := execGit(commandType, args...) + strOut := strings.TrimSpace(strings.Trim(string(out), "\n")) + return strOut, err +} + +// execGitStringWithInput executes a Git command with the given input and arguments and returns the output as a string. +func execGitStringWithInput(commandType telemetry.CommandType, input string, args ...string) (val string, err error) { + startTime := time.Now() + if commandType != telemetry.NotSpecifiedCommandsType { + telemetry.GitCommand(commandType) + defer func() { + telemetry.GitCommandMs(commandType, float64(time.Since(startTime).Milliseconds())) + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + switch exitErr.ExitCode() { + case -1: + telemetry.GitCommandErrors(commandType, telemetry.ECMinus1CommandExitCode) + case 1: + telemetry.GitCommandErrors(commandType, telemetry.EC1CommandExitCode) + case 2: + telemetry.GitCommandErrors(commandType, telemetry.EC2CommandExitCode) + case 127: + telemetry.GitCommandErrors(commandType, telemetry.EC127CommandExitCode) + case 128: + telemetry.GitCommandErrors(commandType, telemetry.EC128CommandExitCode) + case 129: + telemetry.GitCommandErrors(commandType, telemetry.EC129CommandExitCode) + default: + telemetry.GitCommandErrors(commandType, telemetry.UnknownCommandExitCode) + } + } else if err != nil { + telemetry.GitCommandErrors(commandType, telemetry.MissingCommandExitCode) + } + }() + } + if log.DebugEnabled() { + defer func() { + durationInMs := time.Since(startTime).Milliseconds() + if err != nil { + log.Debug("civisibility.git.command [%s][%s][%dms]: git %s", commandType, err.Error(), durationInMs, strings.Join(args, " ")) + } else { + log.Debug("civisibility.git.command [%s][%dms]: git %s", commandType, durationInMs, strings.Join(args, " ")) + } + }() + } + gitCommandMutex.Lock() + defer gitCommandMutex.Unlock() + cmd := exec.Command("git", args...) + cmd.Stdin = strings.NewReader(input) + out, err := cmd.CombinedOutput() + strOut := strings.TrimSpace(strings.Trim(string(out), "\n")) + return strOut, err +} + +// getGitVersion retrieves the version of the Git executable installed on the system. +func getGitVersion() (major int, minor int, patch int, err error) { + gitVersionOnce.Do(func() { + out, lerr := execGitString(telemetry.NotSpecifiedCommandsType, "--version") + if lerr != nil { + gitVersionValue = gitVersionData{err: lerr} + return + } + out = strings.TrimSpace(strings.ReplaceAll(out, "git version ", "")) + versionParts := strings.Split(out, ".") + if len(versionParts) < 3 { + gitVersionValue = gitVersionData{err: errors.New("invalid git version")} + return + } + major, _ = strconv.Atoi(versionParts[0]) + minor, _ = strconv.Atoi(versionParts[1]) + patch, _ = strconv.Atoi(versionParts[2]) + gitVersionValue = gitVersionData{ + major: major, + minor: minor, + patch: patch, + err: nil, + } + }) + + return gitVersionValue.major, gitVersionValue.minor, gitVersionValue.patch, gitVersionValue.err +} + +// getLocalGitData retrieves information about the local Git repository from the current HEAD. +// It gathers details such as the repository URL, current branch, latest commit SHA, author and committer details, and commit message. +// +// Returns: +// +// A localGitData struct populated with the retrieved Git data. +// An error if any Git command fails or the retrieved data is incomplete. +func getLocalGitData() (localGitData, error) { + gitData := localGitData{} + + if !isGitFound() { + return gitData, errors.New("git executable not found") + } + + // Ensure we have permissions to read the git directory + if currentDir, err := os.Getwd(); err == nil { + if gitDir, err := getParentGitFolder(currentDir); err == nil && gitDir != "" { + log.Debug("civisibility.git: setting permissions to git folder: %s", gitDir) + if out, err := execGitString(telemetry.GitAddPermissionCommandType, "config", "--global", "--add", "safe.directory", gitDir); err != nil { + log.Debug("civisibility.git: error while setting permissions to git folder: %s\n out: %s\n error: %s", gitDir, out, err.Error()) + } + } else { + log.Debug("civisibility.git: error getting the parent git folder.") + } + } else { + log.Debug("civisibility.git: error getting the current working directory.") + } + + // Extract the absolute path to the Git directory + log.Debug("civisibility.git: getting the absolute path to the Git directory") + out, err := execGitString(telemetry.GetWorkingDirectoryCommandType, "rev-parse", "--show-toplevel") + if err == nil { + gitData.SourceRoot = out + } + + // Extract the repository URL + log.Debug("civisibility.git: getting the repository URL") + out, err = execGitString(telemetry.GetRepositoryCommandsType, "ls-remote", "--get-url") + if err == nil { + gitData.RepositoryURL = filterSensitiveInfo(out) + } + + // Extract the current branch name + log.Debug("civisibility.git: getting the current branch name") + out, err = execGitString(telemetry.GetBranchCommandsType, "rev-parse", "--abbrev-ref", "HEAD") + if err == nil { + gitData.Branch = out + } + + // Get commit details from the latest commit using git log (git log -1 --pretty='%H","%aI","%an","%ae","%cI","%cn","%ce","%B') + log.Debug("civisibility.git: getting the latest commit details") + out, err = execGitString(telemetry.GetGitCommitInfoCommandType, "log", "-1", "--pretty=%H\",\"%at\",\"%an\",\"%ae\",\"%ct\",\"%cn\",\"%ce\",\"%B") + if err != nil { + return gitData, err + } + + // Split the output into individual components + outArray := strings.Split(out, "\",\"") + if len(outArray) < 8 { + return gitData, errors.New("git log failed") + } + + // Parse author and committer dates from Unix timestamp + authorUnixDate, _ := strconv.ParseInt(outArray[1], 10, 64) + committerUnixDate, _ := strconv.ParseInt(outArray[4], 10, 64) + + // Populate the localGitData struct with the parsed information + gitData.CommitSha = outArray[0] + gitData.AuthorDate = time.Unix(authorUnixDate, 0) + gitData.AuthorName = outArray[2] + gitData.AuthorEmail = outArray[3] + gitData.CommitterDate = time.Unix(committerUnixDate, 0) + gitData.CommitterName = outArray[5] + gitData.CommitterEmail = outArray[6] + gitData.CommitMessage = strings.Trim(outArray[7], "\n") + return gitData, nil +} + +// fetchCommitData retrieves commit data for a specific commit SHA in a shallow clone Git repository. +func fetchCommitData(commitSha string) (localCommitData, error) { + commitData := localCommitData{} + + // let's do a first check to see if the repository is a shallow clone + log.Debug("civisibility.fetchCommitData: checking if the repository is a shallow clone") + isAShallowClone, err := isAShallowCloneRepository() + if err != nil { + return commitData, fmt.Errorf("civisibility.fetchCommitData: error checking if the repository is a shallow clone: %s", err.Error()) + } + + // if the git repo is a shallow clone, we try to fecth the commit sha data + if isAShallowClone { + // let's check the git version >= 2.27.0 (git --version) to see if we can unshallow the repository + log.Debug("civisibility.fetchCommitData: checking the git version") + major, minor, patch, err := getGitVersion() + if err != nil { + return commitData, fmt.Errorf("civisibility.fetchCommitData: error getting the git version: %s", err.Error()) + } + log.Debug("civisibility.fetchCommitData: git version: %d.%d.%d", major, minor, patch) + if major < 2 || (major == 2 && minor < 27) { + log.Debug("civisibility.fetchCommitData: the git version is less than 2.27.0 we cannot unshallow the repository") + return commitData, nil + } + + // let's get the remote name + remoteName, err := getRemoteName() + if err != nil { + return commitData, fmt.Errorf("civisibility.fetchCommitData: error getting the remote name: %s\n%s", err.Error(), remoteName) + } + if remoteName == "" { + // if the origin name is empty, we fallback to "origin" + remoteName = "origin" + } + log.Debug("civisibility.fetchCommitData: remote name: %s", remoteName) + + // let's fetch the missing commits and trees from a commit sha + // git fetch --update-shallow --filter="blob:none" --recurse-submodules=no --no-write-fetch-head + log.Debug("civisibility.fetchCommitData: fetching the missing commits and trees from the last month") + if fetchOutput, fetchErr := execGitString( + telemetry.FetchCommandType, + "fetch", + "--update-shallow", + "--filter=blob:none", + "--recurse-submodules=no", + "--no-write-fetch-head", + remoteName, + commitSha); fetchErr != nil { + return commitData, fmt.Errorf("civisibility.fetchCommitData: error: %s\n%s", fetchErr.Error(), fetchOutput) + } + } + + // Get commit details from the latest commit using git log (git show -s --format='%H","%aI","%an","%ae","%cI","%cn","%ce","%B') + log.Debug("civisibility.git: getting the latest commit details") + out, err := execGitString(telemetry.GetGitCommitInfoCommandType, "show", commitSha, "-s", "--format=%H\",\"%at\",\"%an\",\"%ae\",\"%ct\",\"%cn\",\"%ce\",\"%B") + if err != nil { + return commitData, err + } + + // Split the output into individual components + outArray := strings.Split(out, "\",\"") + if len(outArray) < 8 { + return commitData, errors.New("git log failed") + } + + // Parse author and committer dates from Unix timestamp + authorUnixDate, _ := strconv.ParseInt(outArray[1], 10, 64) + committerUnixDate, _ := strconv.ParseInt(outArray[4], 10, 64) + + // Populate the localGitData struct with the parsed information + commitData.CommitSha = outArray[0] + commitData.AuthorDate = time.Unix(authorUnixDate, 0) + commitData.AuthorName = outArray[2] + commitData.AuthorEmail = outArray[3] + commitData.CommitterDate = time.Unix(committerUnixDate, 0) + commitData.CommitterName = outArray[5] + commitData.CommitterEmail = outArray[6] + commitData.CommitMessage = strings.Trim(outArray[7], "\n") + + log.Debug("civisibility.fetchCommitData: was completed successfully") + return commitData, nil +} + +// GetLastLocalGitCommitShas retrieves the commit SHAs of the last 1000 commits in the local Git repository. +func GetLastLocalGitCommitShas() []string { + // git log --format=%H -n 1000 --since=\"1 month ago\" + log.Debug("civisibility.git: getting the commit SHAs of the last 1000 commits in the local Git repository") + out, err := execGitString(telemetry.GetLocalCommitsCommandsType, "log", "--format=%H", "-n", "1000", "--since=\"1 month ago\"") + if err != nil || out == "" { + return []string{} + } + return strings.Split(out, "\n") +} + +// UnshallowGitRepository converts a shallow clone into a complete clone by fetching all missing commits without git content (only commits and tree objects). +func UnshallowGitRepository() (bool, error) { + + // let's do a first check to see if the repository is a shallow clone + log.Debug("civisibility.unshallow: checking if the repository is a shallow clone") + isAShallowClone, err := isAShallowCloneRepository() + if err != nil { + return false, fmt.Errorf("civisibility.unshallow: error checking if the repository is a shallow clone: %s", err.Error()) + } + + // if the git repo is not a shallow clone, we can return early + if !isAShallowClone { + log.Debug("civisibility.unshallow: the repository is not a shallow clone") + return false, nil + } + + // the git repo is a shallow clone, we need to double check if there are more than just 1 commit in the logs. + log.Debug("civisibility.unshallow: the repository is a shallow clone, checking if there are more than one commit in the logs") + hasMoreThanOneCommits, err := hasTheGitLogHaveMoreThanOneCommits() + if err != nil { + return false, fmt.Errorf("civisibility.unshallow: error checking if the git log has more than one commit: %s", err.Error()) + } + + // if there are more than 1 commits, we can return early + if hasMoreThanOneCommits { + log.Debug("civisibility.unshallow: the git log has more than one commits") + return false, nil + } + + // let's check the git version >= 2.27.0 (git --version) to see if we can unshallow the repository + log.Debug("civisibility.unshallow: checking the git version") + major, minor, patch, err := getGitVersion() + if err != nil { + return false, fmt.Errorf("civisibility.unshallow: error getting the git version: %s", err.Error()) + } + log.Debug("civisibility.unshallow: git version: %d.%d.%d", major, minor, patch) + if major < 2 || (major == 2 && minor < 27) { + log.Debug("civisibility.unshallow: the git version is less than 2.27.0 we cannot unshallow the repository") + return false, nil + } + + // after asking for 2 logs lines, if the git log command returns just one commit sha, we reconfigure the repo + // to ask for git commits and trees of the last month (no blobs) + + // let's get the remote name + remoteName, err := getRemoteName() + if err != nil { + return false, fmt.Errorf("civisibility.unshallow: error getting the remote name: %s\n%s", err.Error(), remoteName) + } + if remoteName == "" { + // if the origin name is empty, we fallback to "origin" + remoteName = "origin" + } + log.Debug("civisibility.unshallow: remote name: %s", remoteName) + + // let's get the sha of the HEAD (git rev-parse HEAD) + headSha, err := execGitString(telemetry.GetHeadCommandsType, "rev-parse", "HEAD") + if err != nil { + return false, fmt.Errorf("civisibility.unshallow: error getting the HEAD sha: %s\n%s", err.Error(), headSha) + } + if headSha == "" { + // if the HEAD is empty, we fallback to the current branch (git branch --show-current) + headSha, err = execGitString(telemetry.GetBranchCommandsType, "branch", "--show-current") + if err != nil { + return false, fmt.Errorf("civisibility.unshallow: error getting the current branch: %s\n%s", err.Error(), headSha) + } + } + log.Debug("civisibility.unshallow: HEAD sha: %s", headSha) + + // let's fetch the missing commits and trees from the last month + // git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName) $(git rev-parse HEAD) + log.Debug("civisibility.unshallow: fetching the missing commits and trees from the last month") + fetchOutput, err := execGitString(telemetry.UnshallowCommandsType, "fetch", "--shallow-since=\"1 month ago\"", "--update-shallow", "--filter=blob:none", "--recurse-submodules=no", remoteName, headSha) + + // let's check if the last command was unsuccessful + if err != nil || fetchOutput == "" { + log.Debug("civisibility.unshallow: error fetching the missing commits and trees from the last month: %s", err.Error()) + // *** + // The previous command has a drawback: if the local HEAD is a commit that has not been pushed to the remote, it will fail. + // If this is the case, we fallback to: `git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName) $(git rev-parse --abbrev-ref --symbolic-full-name @{upstream})` + // This command will attempt to use the tracked branch for the current branch in order to unshallow. + // *** + + // let's get the remote branch name: git rev-parse --abbrev-ref --symbolic-full-name @{upstream} + var remoteBranchName string + log.Debug("civisibility.unshallow: getting the remote branch name") + remoteBranchName, err = execGitString(telemetry.UnshallowCommandsType, "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{upstream}") + if err == nil { + // let's try the alternative: git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName) $(git rev-parse --abbrev-ref --symbolic-full-name @{upstream}) + log.Debug("civisibility.unshallow: fetching the missing commits and trees from the last month using the remote branch name") + fetchOutput, err = execGitString(telemetry.UnshallowCommandsType, "fetch", "--shallow-since=\"1 month ago\"", "--update-shallow", "--filter=blob:none", "--recurse-submodules=no", remoteName, remoteBranchName) + } + } + + // let's check if the last command was unsuccessful + if err != nil || fetchOutput == "" { + log.Debug("civisibility.unshallow: error fetching the missing commits and trees from the last month: %s", err.Error()) + // *** + // It could be that the CI is working on a detached HEAD or maybe branch tracking hasn't been set up. + // In that case, this command will also fail, and we will finally fallback to we just unshallow all the things: + // `git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName)` + // *** + + // let's try the last fallback: git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName) + log.Debug("civisibility.unshallow: fetching the missing commits and trees from the last month using the origin name") + fetchOutput, err = execGitString(telemetry.UnshallowCommandsType, "fetch", "--shallow-since=\"1 month ago\"", "--update-shallow", "--filter=blob:none", "--recurse-submodules=no", remoteName) + } + + if err != nil { + return false, fmt.Errorf("civisibility.unshallow: error: %s\n%s", err.Error(), fetchOutput) + } + + log.Debug("civisibility.unshallow: was completed successfully") + tmpso := sync.Once{} + isAShallowCloneRepositoryOnce.Store(&tmpso) + return true, nil +} + +// GetGitDiff retrieves the diff between two Git commits using the `git diff` command. +func GetGitDiff(baseCommit, headCommit string) (string, error) { + // git diff -U0 --word-diff=porcelain {baseCommit} {headCommit} + if len(baseCommit) != 40 { + // not a commit sha + var re = regexp.MustCompile(`(?i)^[a-f0-9]{40}$`) + if !re.MatchString(baseCommit) { + // first let's get the remote + remoteOut, err := execGitString(telemetry.GetRemoteCommandsType, "remote", "show") + if err != nil { + log.Debug("civisibility.git: error on git remote show origin: %s , error: %s", remoteOut, err.Error()) + } + if remoteOut == "" { + remoteOut = "origin" + } + + // let's ensure we have all the branch names from the remote + fetchOut, err := execGitString(telemetry.GetHeadCommandsType, "fetch", remoteOut, baseCommit, "--depth=1") + if err != nil { + log.Debug("civisibility.git: error fetching %s/%s: %s, error: %s", remoteOut, baseCommit, fetchOut, err.Error()) + } + + // then let's get the remote branch name + baseCommit = fmt.Sprintf("%s/%s", remoteOut, baseCommit) + } + } + + log.Debug("civisibility.git: getting the diff between %s and %s", baseCommit, headCommit) + out, err := execGitString(telemetry.DiffCommandType, "diff", "-U0", "--word-diff=porcelain", baseCommit, headCommit) + if err != nil { + return "", fmt.Errorf("civisibility.git: error getting the diff from %s to %s: %s | %s", baseCommit, headCommit, err.Error(), out) + } + if out == "" { + return "", fmt.Errorf("civisibility.git: error getting the diff from %s to %s: empty output", baseCommit, headCommit) + } + return out, nil +} + +// filterSensitiveInfo removes sensitive information from a given URL using a regular expression. +// It replaces the user credentials part of the URL (if present) with an empty string. +// +// Parameters: +// +// url - The URL string from which sensitive information should be filtered out. +// +// Returns: +// +// The sanitized URL string with sensitive information removed. +func filterSensitiveInfo(url string) string { + return string(regexpSensitiveInfo.ReplaceAll([]byte(url), []byte("$1"))[:]) +} + +// isAShallowCloneRepository checks if the local Git repository is a shallow clone. +func isAShallowCloneRepository() (bool, error) { + var fErr error + var sOnce *sync.Once + sOnce = isAShallowCloneRepositoryOnce.Load() + if sOnce == nil { + sOnce = &sync.Once{} + isAShallowCloneRepositoryOnce.Store(sOnce) + } + sOnce.Do(func() { + // git rev-parse --is-shallow-repository + out, err := execGitString(telemetry.CheckShallowCommandsType, "rev-parse", "--is-shallow-repository") + if err != nil { + isAShallowCloneRepositoryValue = false + fErr = err + return + } + + isAShallowCloneRepositoryValue = strings.TrimSpace(out) == "true" + }) + + return isAShallowCloneRepositoryValue, fErr +} + +// hasTheGitLogHaveMoreThanOneCommits checks if the local Git repository has more than one commit. +func hasTheGitLogHaveMoreThanOneCommits() (bool, error) { + // git log --format=oneline -n 2 + out, err := execGitString(telemetry.CheckShallowCommandsType, "log", "--format=oneline", "-n", "2") + if err != nil || out == "" { + return false, err + } + + commitsCount := strings.Count(out, "\n") + 1 + return commitsCount > 1, nil +} + +// getObjectsSha get the objects shas from the git repository based on the commits to include and exclude +func getObjectsSha(commitsToInclude []string, commitsToExclude []string) []string { + // git rev-list --objects --no-object-names --filter=blob:none --since="1 month ago" HEAD " + string.Join(" ", commitsToExclude.Select(c => "^" + c)) + " " + string.Join(" ", commitsToInclude); + commitsToExcludeArgs := make([]string, len(commitsToExclude)) + for i, c := range commitsToExclude { + commitsToExcludeArgs[i] = "^" + c + } + args := append([]string{"rev-list", "--objects", "--no-object-names", "--filter=blob:none", "--since=\"1 month ago\"", "HEAD"}, append(commitsToExcludeArgs, commitsToInclude...)...) + out, err := execGitString(telemetry.GetObjectsCommandsType, args...) + if err != nil { + return []string{} + } + return strings.Split(out, "\n") +} + +// CreatePackFiles creates pack files from the given commits to include and exclude. +func CreatePackFiles(commitsToInclude []string, commitsToExclude []string) []string { + // get the objects shas to send + objectsShas := getObjectsSha(commitsToInclude, commitsToExclude) + if len(objectsShas) == 0 { + log.Debug("civisibility: no objects found to send") + return nil + } + + // create the objects shas string + var objectsShasString string + for _, objectSha := range objectsShas { + objectsShasString += objectSha + "\n" + } + + // get a temporary path to store the pack files + temporaryPath, err := os.MkdirTemp("", "pack-objects") + if err != nil { + log.Warn("civisibility: error creating temporary directory: %s", err.Error()) + return nil + } + + // git pack-objects --compression=9 --max-pack-size={MaxPackFileSizeInMb}m "{temporaryPath}" + out, err := execGitStringWithInput(telemetry.PackObjectsCommandsType, objectsShasString, + "pack-objects", "--compression=9", "--max-pack-size="+strconv.Itoa(MaxPackFileSizeInMb)+"m", temporaryPath+"/") + if err != nil { + log.Warn("civisibility: error creating pack files: %s", err.Error()) + return nil + } + + // construct the full path to the pack files + var packFiles []string + for i, packFile := range strings.Split(out, "\n") { + file := filepath.Join(temporaryPath, fmt.Sprintf("-%s.pack", packFile)) + + // check if the pack file exists + if _, err := os.Stat(file); os.IsNotExist(err) { + log.Warn("civisibility: pack file not found: %s", packFiles[i]) + continue + } + + packFiles = append(packFiles, file) + } + + return packFiles +} + +// getParentGitFolder searches from the given directory upwards to find the nearest .git directory. +func getParentGitFolder(innerFolder string) (string, error) { + if innerFolder == "" { + return "", nil + } + + dir := innerFolder + for { + gitDirPath := filepath.Join(dir, ".git") + info, err := os.Stat(gitDirPath) + if err == nil && info.IsDir() { + return gitDirPath, nil + } + if err != nil && !os.IsNotExist(err) { + return "", err + } + + parentDir := filepath.Dir(dir) + // If we've reached the root directory, stop the loop. + if parentDir == dir { + break + } + dir = parentDir + } + + return "", nil +} + +// isDefaultBranch checks if a branch is the default branch +func isDefaultBranch(branch, defaultBranch, remoteName string) bool { + return branch == defaultBranch || branch == remoteName+"/"+defaultBranch +} + +// detectDefaultBranch detects the default branch using git symbolic-ref +func detectDefaultBranch(remoteName string) (string, error) { + // Try to get the default branch using symbolic-ref + defaultRef, err := execGitString(telemetry.SymbolicRefCommandType, "symbolic-ref", "--quiet", "--short", "refs/remotes/"+remoteName+"/HEAD") + if err == nil && defaultRef != "" { + // Remove the remote prefix to get just the branch name + defaultBranch := removeRemotePrefix(defaultRef, remoteName) + if defaultBranch != "" { + log.Debug("civisibility.git: detected default branch from symbolic-ref: %s", defaultBranch) + return defaultBranch, nil + } + } + + log.Debug("civisibility.git: could not get symbolic-ref, trying to find a fallback (main, master)...") + + // Fallback to checking for main/master + fallbackBranch := findFallbackDefaultBranch(remoteName) + if fallbackBranch != "" { + return fallbackBranch, nil + } + + return "", errors.New("could not detect default branch") +} + +// findFallbackDefaultBranch tries to find main or master as fallback default branches +func findFallbackDefaultBranch(remoteName string) string { + fallbackBranches := []string{"main", "master"} + + for _, fallback := range fallbackBranches { + // Check if the remote branch exists + _, err := execGitString(telemetry.ShowRefCommandType, "show-ref", "--verify", "--quiet", "refs/remotes/"+remoteName+"/"+fallback) + if err == nil { + log.Debug("civisibility.git: found fallback default branch: %s", fallback) + return fallback + } + } + + return "" +} + +// GetBaseBranchSha detects the base branch SHA using the algorithm +func GetBaseBranchSha(defaultBranch string) (string, error) { + if !isGitFound() { + return "", errors.New("git executable not found") + } + + // Step 1 - collect info we'll need later + + // Step 1a - remote_name + remoteName, err := getRemoteName() + if err != nil { + return "", fmt.Errorf("failed to get remote name: %w", err) + } + + // Step 1b - source_branch + sourceBranch, err := getSourceBranch() + if err != nil { + return "", fmt.Errorf("failed to get source branch: %w", err) + } + + // Step 1c - Detect default branch automatically + detectedDefaultBranch, err := detectDefaultBranch(remoteName) + if err != nil { + // Fallback to the provided parameter if detection fails + if defaultBranch == "" { + defaultBranch = "main" // ultimate fallback + } + log.Debug("civisibility.git: failed to detect default branch, using fallback: %s", defaultBranch) + detectedDefaultBranch = defaultBranch + } + + // Step 2 - build candidate branches list and fetch them from remote + var candidateBranches []string + + // Check if we have git.pull_request.base_branch from CI provider environment variables + ciTags := GetCITags() + gitPrBaseBranch := ciTags[constants.GitPrBaseBranch] + + if gitPrBaseBranch != "" { + // Step 2b - we have git.pull_request.base_branch + log.Debug("civisibility.git: using git.pull_request.base_branch from CI: %s", gitPrBaseBranch) + checkAndFetchBranch(gitPrBaseBranch, remoteName) + candidateBranches = []string{gitPrBaseBranch} + } else { + // Step 2a - we don't have git.pull_request.base_branch + // Fetch all possible base branches from remote + for _, branch := range possibleBaseBranches { + checkAndFetchBranch(branch, remoteName) + } + + // Get the list of remote branches present in local repo and see which ones are base-like + remoteBranches, err := getRemoteBranches(remoteName) + if err != nil { + return "", fmt.Errorf("failed to get remote branches: %w", err) + } + + for _, branch := range remoteBranches { + if branch != sourceBranch && isMainLikeBranch(branch, remoteName) { + candidateBranches = append(candidateBranches, branch) + } + } + } + + if len(candidateBranches) == 0 { + return "", errors.New("no candidate base branches found") + } + + // Step 3 - find the best base branch + if len(candidateBranches) == 1 { + // Step 3a - single candidate + baseSha, err := execGitString(telemetry.MergeBaseCommandType, "merge-base", candidateBranches[0], sourceBranch) + if err != nil { + return "", fmt.Errorf("failed to find merge base for %s and %s: %w", candidateBranches[0], sourceBranch, err) + } + return baseSha, nil + } + + // Step 3b - multiple candidates + metrics, err := computeBranchMetrics(candidateBranches, sourceBranch) + if err != nil { + return "", fmt.Errorf("failed to compute branch metrics: %w", err) + } + + baseSha := findBestBranch(metrics, detectedDefaultBranch, remoteName) + if baseSha == "" { + return "", errors.New("failed to find best base branch") + } + + return baseSha, nil +} + +// getRemoteName determines the remote name using the algorithm from algorithm.md +func getRemoteName() (string, error) { + // Try to find remote from upstream tracking + upstream, err := execGitString(telemetry.GetRemoteUpstreamTrackingCommandsType, "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{upstream}") + if err == nil && upstream != "" { + parts := strings.Split(upstream, "/") + if len(parts) > 0 { + return parts[0], nil + } + } + + // Fallback to first remote if no upstream + remotes, err := execGitString(telemetry.GetRemoteCommandsType, "remote") + if err != nil { + return "origin", nil // ultimate fallback + } + + lines := strings.Split(strings.TrimSpace(remotes), "\n") + if len(lines) > 0 && lines[0] != "" { + return lines[0], nil + } + + return "origin", nil +} + +// getSourceBranch gets the current branch name +func getSourceBranch() (string, error) { + return execGitString(telemetry.GetBranchCommandsType, "rev-parse", "--abbrev-ref", "HEAD") +} + +// isMainLikeBranch checks if a branch name matches the base-like branch pattern +func isMainLikeBranch(branchName, remoteName string) bool { + shortBranchName := removeRemotePrefix(branchName, remoteName) + return baseLikeBranchFilter.MatchString(shortBranchName) +} + +// removeRemotePrefix removes the remote prefix from a branch name +func removeRemotePrefix(branchName, remoteName string) string { + prefix := remoteName + "/" + if strings.HasPrefix(branchName, prefix) { + return strings.TrimPrefix(branchName, prefix) + } + return branchName +} + +// checkAndFetchBranch checks if a branch exists and fetches it if needed +func checkAndFetchBranch(branch, remoteName string) { + // Check if branch exists locally (as remote ref) + _, err := execGitString(telemetry.ShowRefCommandType, "show-ref", "--verify", "--quiet", "refs/remotes/"+remoteName+"/"+branch) + if err == nil { + return // branch exists locally + } + + // Check if branch exists in remote + remoteHeads, err := execGitString(telemetry.LsRemoteHeadsCommandType, "ls-remote", "--heads", remoteName, branch) + if err != nil || remoteHeads == "" { + return // branch doesn't exist in remote + } + + // Fetch the latest commit for this branch from remote (without creating local branch) + _, err = execGitString(telemetry.FetchCommandType, "fetch", "--depth", "1", remoteName, branch) + if err != nil { + log.Debug("civisibility.git: failed to fetch branch %s: %v", branch, err.Error()) + } +} + +// getRemoteBranches gets list of remote tracking branches only (for Step 2a in algorithm) +func getRemoteBranches(remoteName string) ([]string, error) { + // Get remote tracking branches as per algorithm update + remoteOut, err := execGitString(telemetry.ForEachRefCommandType, "for-each-ref", "--format=%(refname:short)", "refs/remotes/"+remoteName) + if err != nil { + return nil, fmt.Errorf("failed to get remote branches: %w", err) + } + + var branches []string + if remoteOut != "" { + remoteBranches := strings.Split(strings.TrimSpace(remoteOut), "\n") + for _, branch := range remoteBranches { + if strings.TrimSpace(branch) != "" { + branches = append(branches, strings.TrimSpace(branch)) + } + } + } + + return branches, nil +} + +// computeBranchMetrics calculates metrics for candidate branches +func computeBranchMetrics(candidates []string, sourceBranch string) (map[string]branchMetrics, error) { + metrics := make(map[string]branchMetrics) + + for _, candidate := range candidates { + // Find common ancestor + baseSha, err := execGitString(telemetry.MergeBaseCommandType, "merge-base", candidate, sourceBranch) + if err != nil || baseSha == "" { + continue + } + + // Count commits ahead/behind + counts, err := execGitString(telemetry.RevListCommandType, "rev-list", "--left-right", "--count", candidate+"..."+sourceBranch) + if err != nil { + continue + } + + parts := strings.Fields(counts) + if len(parts) != 2 { + continue + } + + behind, err1 := strconv.Atoi(parts[0]) + ahead, err2 := strconv.Atoi(parts[1]) + if err1 != nil || err2 != nil { + continue + } + + metrics[candidate] = branchMetrics{ + behind: behind, + ahead: ahead, + baseSha: baseSha, + } + } + + return metrics, nil +} + +// findBestBranch finds the best branch from metrics, preferring default branch on tie +func findBestBranch(metrics map[string]branchMetrics, defaultBranch, remoteName string) string { + if len(metrics) == 0 { + return "" + } + + var bestBranch string + bestScore := []int{int(^uint(0) >> 1), 1, 1} // [ahead, is_not_default, is_remote_prefixed] - max int, not default, remote prefixed + + for branch, data := range metrics { + isDefault := 0 + if isDefaultBranch(branch, defaultBranch, remoteName) { + isDefault = 0 + } else { + isDefault = 1 + } + + // Check if this branch is remote-prefixed (prefer exact branch names) + isRemotePrefixed := 0 + if strings.HasPrefix(branch, remoteName+"/") { + isRemotePrefixed = 1 + } + + score := []int{data.ahead, isDefault, isRemotePrefixed} + + // Compare scores: prefer smaller ahead count, then prefer default branch, then prefer exact branch names + if score[0] < bestScore[0] || + (score[0] == bestScore[0] && score[1] < bestScore[1]) || + (score[0] == bestScore[0] && score[1] == bestScore[1] && score[2] < bestScore[2]) { + bestScore = score + bestBranch = branch + } + } + + if bestBranch != "" { + return metrics[bestBranch].baseSha + } + return "" +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/home.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/home.go new file mode 100644 index 00000000..8cf43e81 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/home.go @@ -0,0 +1,127 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "bytes" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// This code is based on: https://github.com/mitchellh/go-homedir/blob/v1.1.0/homedir.go (MIT License) + +// ExpandPath expands a file path that starts with '~' to the user's home directory. +// If the path does not start with '~', it is returned unchanged. +// +// Parameters: +// +// path - The file path to be expanded. +// +// Returns: +// +// The expanded file path, with '~' replaced by the user's home directory, if applicable. +func ExpandPath(path string) string { + if len(path) == 0 || path[0] != '~' { + return path + } + + // If the second character is not '/' or '\', return the path unchanged + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return path + } + + homeFolder := getHomeDir() + if len(homeFolder) > 0 { + return filepath.Join(homeFolder, path[1:]) + } + + return path +} + +// getHomeDir returns the home directory of the current user. +// The method used to determine the home directory depends on the operating system. +// +// On Windows, it prefers the HOME environment variable, then USERPROFILE, and finally combines HOMEDRIVE and HOMEPATH. +// On Unix-like systems, it prefers the HOME environment variable, and falls back to various shell commands +// to determine the home directory if necessary. +// +// Returns: +// +// The home directory of the current user. +func getHomeDir() (homeDir string) { + defer func() { + log.Debug("civisibility: home directory: %s", homeDir) + }() + + if runtime.GOOS == "windows" { + if home := env.Get("HOME"); home != "" { + // First prefer the HOME environment variable + return home + } + if userProfile := env.Get("USERPROFILE"); userProfile != "" { + // Prefer the USERPROFILE environment variable + return userProfile + } + + homeDrive := env.Get("HOMEDRIVE") + homePath := env.Get("HOMEPATH") + return homeDrive + homePath + } + + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, environment variables are lowercase. + homeEnv = "home" + } + + if home := env.Get(homeEnv); home != "" { + // Prefer the HOME environment variable + return home + } + + var stdout bytes.Buffer + if runtime.GOOS == "darwin" { + // On macOS, use dscl to read the NFSHomeDirectory + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result + } + } + } else { + // On other Unix-like systems, use getent to read the passwd entry for the current user + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // The passwd entry is in the format: username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5] + } + } + } + } + + // If all else fails, use the shell to determine the home directory + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + return strings.TrimSpace(stdout.String()) + } + + return "" +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/names.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/names.go new file mode 100644 index 00000000..94eacdb1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/names.go @@ -0,0 +1,93 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "bytes" + "fmt" + "path/filepath" + "runtime" + "slices" + "strings" +) + +var ( + // ignoredFunctionsFromStackTrace array with functions we want to ignore on the final stacktrace (because doesn't add anything useful) + ignoredFunctionsFromStackTrace = []string{"runtime.gopanic", "runtime.panicmem", "runtime.sigpanic"} +) + +// GetModuleAndSuiteName extracts the module name and suite name from a given program counter (pc). +// This function utilizes runtime.FuncForPC to retrieve the full function name associated with the +// program counter, then splits the string to separate the package name from the function name. +// +// Example 1: +// +// Input: github.com/DataDog/dd-sdk-go-testing.TestRun +// Output: +// module: github.com/DataDog/dd-sdk-go-testing +// suite: testing_test.go +// +// Example 2: +// +// Input: github.com/DataDog/dd-sdk-go-testing.TestRun.func1 +// Output: +// module: github.com/DataDog/dd-sdk-go-testing +// suite: testing_test.go +// +// Parameters: +// +// pc - The program counter for which the module and suite name should be retrieved. +// +// Returns: +// +// module - The module name extracted from the full function name. +// suite - The base name of the file where the function is located. +func GetModuleAndSuiteName(pc uintptr) (module string, suite string) { + funcValue := runtime.FuncForPC(pc) + funcFullName := funcValue.Name() + lastSlash := strings.LastIndexByte(funcFullName, '/') + if lastSlash < 0 { + lastSlash = 0 + } + firstDot := strings.IndexByte(funcFullName[lastSlash:], '.') + lastSlash + file, _ := funcValue.FileLine(funcValue.Entry()) + return funcFullName[:firstDot], filepath.Base(file) +} + +// GetStacktrace retrieves the current stack trace, skipping a specified number of frames. +// +// This function captures the stack trace of the current goroutine, formats it, and returns it as a string. +// It uses runtime.Callers to capture the program counters of the stack frames and runtime.CallersFrames +// to convert these program counters into readable frames. The stack trace is formatted to include the function +// name, file name, and line number of each frame. +// +// Parameters: +// +// skip - The number of stack frames to skip before capturing the stack trace. +// +// Returns: +// +// A string representation of the current stack trace, with each frame on a new line. +func GetStacktrace(skip int) string { + pcs := make([]uintptr, 256) + total := runtime.Callers(skip+2, pcs) + frames := runtime.CallersFrames(pcs[:total]) + buffer := new(bytes.Buffer) + for { + if frame, ok := frames.Next(); ok { + // let's check if we need to ignore this frame + if slices.Contains(ignoredFunctionsFromStackTrace, frame.Function) { + continue + } + // writing frame to the buffer + _, _ = fmt.Fprintf(buffer, "%s\n\t%s:%d\n", frame.Function, frame.File, frame.Line) + } else { + break + } + + } + return buffer.String() +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry/telemetry.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry/telemetry.go new file mode 100644 index 00000000..53431615 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry/telemetry.go @@ -0,0 +1,173 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package telemetry + +// TestingFramework is a type for testing frameworks +type TestingFramework string + +const ( + GoTestingFramework TestingFramework = "test_framework:testing" + UnknownFramework TestingFramework = "test_framework:unknown" +) + +// TestSessionEventType is a type for test session event types +type TestSessionType []string + +var ( + AppVeyorTestSessionType TestSessionType = []string{"provider:appveyor"} + AzurePipelinesTestSessionType TestSessionType = []string{"provider:azp"} + BitbucketTestSessionType TestSessionType = []string{"provider:bitbucket"} + BitRiseTestSessionType TestSessionType = []string{"provider:bitrise"} + BuildKiteTestSessionType TestSessionType = []string{"provider:buildkite"} + CircleCiTestSessionType TestSessionType = []string{"provider:circleci"} + CodeFreshTestSessionType TestSessionType = []string{"provider:codefresh"} + GithubActionsTestSessionType TestSessionType = []string{"provider:githubactions"} + GitlabTestSessionType TestSessionType = []string{"provider:gitlab"} + JenkinsTestSessionType TestSessionType = []string{"provider:jenkins"} + TeamcityTestSessionType TestSessionType = []string{"provider:teamcity"} + TravisCiTestSessionType TestSessionType = []string{"provider:travisci"} + BuddyCiTestSessionType TestSessionType = []string{"provider:buddyci"} + AwsCodePipelineSessionType TestSessionType = []string{"provider:aws"} + UnsupportedTestSessionType TestSessionType = []string{"provider:unsupported"} + + IsAutoInstrumentationTestSessionType TestSessionType = []string{"auto_injected:true"} +) + +// TestingEventType is a type for testing event types +type TestingEventType []string + +var ( + TestEventType TestingEventType = []string{"event_type:test"} + SuiteEventType TestingEventType = []string{"event_type:suite"} + ModuleEventType TestingEventType = []string{"event_type:module"} + SessionEventType TestingEventType = []string{"event_type:session"} + + UnsupportedCiEventType TestingEventType = []string{"is_unsupported_ci"} + HasCodeOwnerEventType TestingEventType = []string{"has_codeowner"} + IsNewEventType TestingEventType = []string{"is_new:true"} + IsRetryEventType TestingEventType = []string{"is_retry:true"} + EfdAbortSlowEventType TestingEventType = []string{"early_flake_detection_abort_reason:slow"} + IsBenchmarkEventType TestingEventType = []string{"is_benchmark"} + IsAttemptToFixEventType TestingEventType = []string{"is_attempt_to_fix:true"} + IsQuarantinedEventType TestingEventType = []string{"is_quarantined:true"} + IsDisabledEventType TestingEventType = []string{"is_disabled:true"} + HasFailedAllRetriesEventType TestingEventType = []string{"has_failed_all_retries:true"} +) + +// CoverageLibraryType is a type for coverage library types +type CoverageLibraryType string + +const ( + DefaultCoverageLibraryType CoverageLibraryType = "library:default" + UnknownCoverageLibraryType CoverageLibraryType = "library:unknown" +) + +// EndpointType is a type for endpoint types +type EndpointType string + +const ( + TestCycleEndpointType EndpointType = "endpoint:test_cycle" + CodeCoverageEndpointType EndpointType = "endpoint:code_coverage" +) + +// ErrorType is a type for error types +type ErrorType []string + +var ( + TimeoutErrorType ErrorType = []string{"error_type:timeout"} + NetworkErrorType ErrorType = []string{"error_type:network"} + StatusCodeErrorType ErrorType = []string{"error_type:status_code"} + StatusCode4xxErrorType ErrorType = []string{"error_type:status_code_4xx_response"} + StatusCode5xxErrorType ErrorType = []string{"error_type:status_code_5xx_response"} + StatusCode400ErrorType ErrorType = []string{"error_type:status_code_4xx_response", "status_code:400"} + StatusCode401ErrorType ErrorType = []string{"error_type:status_code_4xx_response", "status_code:401"} + StatusCode403ErrorType ErrorType = []string{"error_type:status_code_4xx_response", "status_code:403"} + StatusCode404ErrorType ErrorType = []string{"error_type:status_code_4xx_response", "status_code:404"} + StatusCode408ErrorType ErrorType = []string{"error_type:status_code_4xx_response", "status_code:408"} + StatusCode429ErrorType ErrorType = []string{"error_type:status_code_4xx_response", "status_code:429"} +) + +// CommandType is a type for commands types +type CommandType string + +const ( + NotSpecifiedCommandsType CommandType = "" + GetRepositoryCommandsType CommandType = "command:get_repository" + GetBranchCommandsType CommandType = "command:get_branch" + GetRemoteCommandsType CommandType = "command:get_remote" + GetRemoteUpstreamTrackingCommandsType CommandType = "command:get_remote_upstream_tracking" + GetHeadCommandsType CommandType = "command:get_head" + CheckShallowCommandsType CommandType = "command:check_shallow" + UnshallowCommandsType CommandType = "command:unshallow" + GetLocalCommitsCommandsType CommandType = "command:get_local_commits" + GetObjectsCommandsType CommandType = "command:get_objects" + PackObjectsCommandsType CommandType = "command:pack_objects" + DiffCommandType CommandType = "command:diff" + ShowRefCommandType CommandType = "command:show_ref" + LsRemoteHeadsCommandType CommandType = "command:ls_remote_heads" + FetchCommandType CommandType = "command:fetch" + ForEachRefCommandType CommandType = "command:for_each_ref" + MergeBaseCommandType CommandType = "command:merge_base" + RevListCommandType CommandType = "command:rev_list" + SymbolicRefCommandType CommandType = "command:symbolic_ref" + GetWorkingDirectoryCommandType CommandType = "command:get_working_directory" + GetGitCommitInfoCommandType CommandType = "command:get_git_info" + GitAddPermissionCommandType CommandType = "command:git_add_permission" +) + +// CommandExitCodeType is a type for command exit codes +type CommandExitCodeType string + +const ( + MissingCommandExitCode CommandExitCodeType = "exit_code:missing" + UnknownCommandExitCode CommandExitCodeType = "exit_code:unknown" + ECMinus1CommandExitCode CommandExitCodeType = "exit_code:-1" + EC1CommandExitCode CommandExitCodeType = "exit_code:1" + EC2CommandExitCode CommandExitCodeType = "exit_code:2" + EC127CommandExitCode CommandExitCodeType = "exit_code:127" + EC128CommandExitCode CommandExitCodeType = "exit_code:128" + EC129CommandExitCode CommandExitCodeType = "exit_code:129" +) + +// RequestCompressedType is a type for request compressed types +type RequestCompressedType string + +const ( + UncompressedRequestCompressedType RequestCompressedType = "" + CompressedRequestCompressedType RequestCompressedType = "rq_compressed:true" +) + +// ResponseCompressedType is a type for response compressed types +type ResponseCompressedType string + +const ( + UncompressedResponseCompressedType ResponseCompressedType = "" + CompressedResponseCompressedType ResponseCompressedType = "rs_compressed:true" +) + +// SettingsResponseType is a type for settings response types +type SettingsResponseType []string + +var ( + CoverageEnabledSettingsResponseType SettingsResponseType = []string{"coverage_enabled"} + ItrSkipEnabledSettingsResponseType SettingsResponseType = []string{"itrskip_enabled"} + EfdEnabledSettingsResponseType SettingsResponseType = []string{"early_flake_detection_enabled:true"} + FlakyTestRetriesEnabledSettingsResponseType SettingsResponseType = []string{"flaky_test_retries_enabled:true"} + TestManagementEnabledSettingsResponseType SettingsResponseType = []string{"test_management_enabled:true"} +) + +// removeEmptyStrings removes empty string values from a slice. +func removeEmptyStrings(s []string) []string { + result := make([]string, len(s)) + n := 0 + for _, str := range s { + if str != "" { + result[n] = str + n++ + } + } + return result[:n] +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry/telemetry_count.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry/telemetry_count.go new file mode 100644 index 00000000..ed686b11 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry/telemetry_count.go @@ -0,0 +1,286 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package telemetry + +import ( + "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +func getTestingFramework(testingFramework string) TestingFramework { + telemetryFramework := UnknownFramework + if testingFramework == "golang.org/pkg/testing" { + telemetryFramework = GoTestingFramework + } + return telemetryFramework +} + +func GetErrorTypeFromStatusCode(statusCode int) ErrorType { + switch statusCode { + case 0: + return NetworkErrorType + case 400: + return StatusCode400ErrorType + case 401: + return StatusCode401ErrorType + case 403: + return StatusCode403ErrorType + case 404: + return StatusCode404ErrorType + case 408: + return StatusCode408ErrorType + case 429: + return StatusCode429ErrorType + default: + if statusCode >= 500 && statusCode < 600 { + return StatusCode5xxErrorType + } else if statusCode >= 400 && statusCode < 500 { + return StatusCode4xxErrorType + } + return StatusCodeErrorType + } +} + +func getProviderTestSessionTypeFromProviderString(provider string) TestSessionType { + switch provider { + case "appveyor": + return AppVeyorTestSessionType + case "azurepipelines": + return AzurePipelinesTestSessionType + case "bitbucket": + return BitbucketTestSessionType + case "bitrise": + return BitRiseTestSessionType + case "buildkite": + return BuildKiteTestSessionType + case "circleci": + return CircleCiTestSessionType + case "codefresh": + return CodeFreshTestSessionType + case "github": + return GithubActionsTestSessionType + case "gitlab": + return GitlabTestSessionType + case "jenkins": + return JenkinsTestSessionType + case "teamcity": + return TeamcityTestSessionType + case "travisci": + return TravisCiTestSessionType + case "buddy": + return BuddyCiTestSessionType + case "awscodepipeline": + return AwsCodePipelineSessionType + default: + return UnsupportedTestSessionType + } +} + +func TestSession(providerName string) { + var tags []string + tags = append(tags, getProviderTestSessionTypeFromProviderString(providerName)...) + if env.Get(constants.CIVisibilityAutoInstrumentationProviderEnvironmentVariable) != "" { + tags = append(tags, IsAutoInstrumentationTestSessionType...) + } + telemetry.Count(telemetry.NamespaceCIVisibility, "test_session", removeEmptyStrings(tags)).Submit(1.0) +} + +// EventCreated the number of events created by CI Visibility +func EventCreated(testingFramework string, eventType TestingEventType) { + tags := []string{string(getTestingFramework(testingFramework))} + tags = append(tags, eventType...) + telemetry.Count(telemetry.NamespaceCIVisibility, "event_created", removeEmptyStrings(tags)).Submit(1.0) +} + +// EventFinished the number of events finished by CI Visibility +func EventFinished(testingFramework string, eventType TestingEventType) { + tags := []string{string(getTestingFramework(testingFramework))} + tags = append(tags, eventType...) + telemetry.Count(telemetry.NamespaceCIVisibility, "event_finished", removeEmptyStrings(tags)).Submit(1.0) +} + +// CodeCoverageStarted the number of code coverage start calls by CI Visibility +func CodeCoverageStarted(testingFramework string, coverageLibraryType CoverageLibraryType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "code_coverage_started", removeEmptyStrings([]string{ + string(getTestingFramework(testingFramework)), + string(coverageLibraryType), + })).Submit(1.0) +} + +// CodeCoverageFinished the number of code coverage finished calls by CI Visibility +func CodeCoverageFinished(testingFramework string, coverageLibraryType CoverageLibraryType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "code_coverage_finished", removeEmptyStrings([]string{ + string(getTestingFramework(testingFramework)), + string(coverageLibraryType), + })).Submit(1.0) +} + +// EventsEnqueueForSerialization the number of events enqueued for serialization by CI Visibility +func EventsEnqueueForSerialization() { + telemetry.Count(telemetry.NamespaceCIVisibility, "events_enqueued_for_serialization", nil).Submit(1.0) +} + +// EndpointPayloadRequests the number of requests sent to the endpoint, regardless of success, tagged by endpoint type +func EndpointPayloadRequests(endpointType EndpointType, requestCompressedType RequestCompressedType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "endpoint_payload.requests", removeEmptyStrings([]string{ + string(endpointType), + string(requestCompressedType), + })).Submit(1.0) +} + +// EndpointPayloadRequestsErrors the number of requests sent to the endpoint that errored, tagget by the error type and endpoint type and status code +func EndpointPayloadRequestsErrors(endpointType EndpointType, errorType ErrorType) { + tags := []string{string(endpointType)} + tags = append(tags, errorType...) + telemetry.Count(telemetry.NamespaceCIVisibility, "endpoint_payload.requests_errors", removeEmptyStrings(tags)).Submit(1.0) +} + +// EndpointPayloadDropped the number of payloads dropped after all retries by CI Visibility +func EndpointPayloadDropped(endpointType EndpointType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "endpoint_payload.dropped", removeEmptyStrings([]string{ + string(endpointType), + })).Submit(1.0) +} + +// GitCommand the number of git commands executed by CI Visibility +func GitCommand(commandType CommandType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "git.command", removeEmptyStrings([]string{ + string(commandType), + })).Submit(1.0) +} + +// GitCommandErrors the number of git command that errored by CI Visibility +func GitCommandErrors(commandType CommandType, exitCode CommandExitCodeType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "git.command_errors", removeEmptyStrings([]string{ + string(commandType), + string(exitCode), + })).Submit(1.0) +} + +// GitRequestsSearchCommits the number of requests sent to the search commit endpoint, regardless of success. +func GitRequestsSearchCommits(requestCompressed RequestCompressedType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "git_requests.search_commits", removeEmptyStrings([]string{ + string(requestCompressed), + })).Submit(1.0) +} + +// GitRequestsSearchCommitsErrors the number of requests sent to the search commit endpoint that errored, tagged by the error type. +func GitRequestsSearchCommitsErrors(errorType ErrorType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "git_requests.search_commits_errors", removeEmptyStrings(errorType)).Submit(1.0) +} + +// GitRequestsObjectsPack the number of requests sent to the objects pack endpoint, tagged by the request compressed type. +func GitRequestsObjectsPack(requestCompressed RequestCompressedType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "git_requests.objects_pack", removeEmptyStrings([]string{ + string(requestCompressed), + })).Submit(1.0) +} + +// GitRequestsObjectsPackErrors the number of requests sent to the objects pack endpoint that errored, tagged by the error type. +func GitRequestsObjectsPackErrors(errorType ErrorType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "git_requests.objects_pack_errors", removeEmptyStrings(errorType)).Submit(1.0) +} + +// GitRequestsSettings the number of requests sent to the settings endpoint, tagged by the request compressed type. +func GitRequestsSettings(requestCompressed RequestCompressedType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "git_requests.settings", removeEmptyStrings([]string{ + string(requestCompressed), + })).Submit(1.0) +} + +// GitRequestsSettingsErrors the number of requests sent to the settings endpoint that errored, tagged by the error type. +func GitRequestsSettingsErrors(errorType ErrorType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "git_requests.settings_errors", removeEmptyStrings(errorType)).Submit(1.0) +} + +// GitRequestsSettingsResponse the number of settings responses received by CI Visibility, tagged by the settings response type. +func GitRequestsSettingsResponse(settingsResponseType SettingsResponseType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "git_requests.settings_response", removeEmptyStrings(settingsResponseType)).Submit(1.0) +} + +// ITRSkippableTestsRequest the number of requests sent to the ITR skippable tests endpoint, tagged by the request compressed type. +func ITRSkippableTestsRequest(requestCompressed RequestCompressedType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "itr_skippable_tests.request", removeEmptyStrings([]string{ + string(requestCompressed), + })).Submit(1.0) +} + +// ITRSkippableTestsRequestErrors the number of requests sent to the ITR skippable tests endpoint that errored, tagged by the error type. +func ITRSkippableTestsRequestErrors(errorType ErrorType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "itr_skippable_tests.request_errors", removeEmptyStrings(errorType)).Submit(1.0) +} + +// ITRSkippableTestsResponseTests the number of tests received in the ITR skippable tests response by CI Visibility. +func ITRSkippableTestsResponseTests(value float64) { + telemetry.Count(telemetry.NamespaceCIVisibility, "itr_skippable_tests.response_tests", nil).Submit(value) +} + +// ITRSkipped the number of ITR tests skipped by CI Visibility, tagged by the event type. +func ITRSkipped(eventType TestingEventType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "itr_skipped", removeEmptyStrings(eventType)).Submit(1.0) +} + +// ITRUnskippable the number of ITR tests unskippable by CI Visibility, tagged by the event type. +func ITRUnskippable(eventType TestingEventType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "itr_unskippable", removeEmptyStrings(eventType)).Submit(1.0) +} + +// ITRForcedRun the number of tests or test suites that would've been skipped by ITR but were forced to run because of their unskippable status by CI Visibility. +func ITRForcedRun(eventType TestingEventType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "itr_forced_run", removeEmptyStrings(eventType)).Submit(1.0) +} + +// CodeCoverageIsEmpty the number of code coverage payloads that are empty by CI Visibility. +func CodeCoverageIsEmpty() { + telemetry.Count(telemetry.NamespaceCIVisibility, "code_coverage.is_empty", nil).Submit(1.0) +} + +// CodeCoverageErrors the number of errors while processing code coverage by CI Visibility. +func CodeCoverageErrors() { + telemetry.Count(telemetry.NamespaceCIVisibility, "code_coverage.errors", nil).Submit(1.0) +} + +// KnownTestsRequest the number of requests sent to the known tests endpoint, tagged by the request compressed type. +func KnownTestsRequest(requestCompressed RequestCompressedType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "known_tests.request", removeEmptyStrings([]string{ + string(requestCompressed), + })).Submit(1.0) +} + +// KnownTestsRequestErrors the number of requests sent to the known tests endpoint that errored, tagged by the error type. +func KnownTestsRequestErrors(errorType ErrorType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "known_tests.request_errors", removeEmptyStrings(errorType)).Submit(1.0) +} + +// TestManagementTestsRequest the number of requests sent to the test management tests endpoint, tagged by the request compressed type. +func TestManagementTestsRequest(requestCompressed RequestCompressedType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "test_management_tests.request", removeEmptyStrings([]string{ + string(requestCompressed), + })).Submit(1.0) +} + +// TestManagementTestsRequestErrors the number of requests sent to the test management tests endpoint that errored, tagged by the error type. +func TestManagementTestsRequestErrors(errorType ErrorType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "test_management_tests.request_errors", removeEmptyStrings(errorType)).Submit(1.0) +} + +// ImpactedTestsRequest the number of requests sent to the impacted tests endpoint, tagged by the request compressed type. +func ImpactedTestsRequest(requestCompressed RequestCompressedType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "impacted_tests_detection.request", removeEmptyStrings([]string{ + string(requestCompressed), + })).Submit(1.0) +} + +// ImpactedTestsRequestErrors the number of requests sent to the impacted tests endpoint that errored, tagged by the error type. +func ImpactedTestsRequestErrors(errorType ErrorType) { + telemetry.Count(telemetry.NamespaceCIVisibility, "impacted_tests_detection.request_errors", removeEmptyStrings(errorType)).Submit(1.0) +} + +// ImpactedTestsModified the number of impacted tests that were modified by CI Visibility. +func ImpactedTestsModified() { + telemetry.Count(telemetry.NamespaceCIVisibility, "impacted_tests_detection.is_modified", nil).Submit(1.0) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry/telemetry_distribution.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry/telemetry_distribution.go new file mode 100644 index 00000000..3d043d31 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry/telemetry_distribution.go @@ -0,0 +1,138 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package telemetry + +import "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + +// EndpointPayloadBytes records the size in bytes of the serialized payload by CI Visibility. +func EndpointPayloadBytes(endpointType EndpointType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "endpoint_payload.bytes", removeEmptyStrings([]string{ + string(endpointType), + })).Submit(value) +} + +// EndpointPayloadRequestsMs records the time it takes to send the payload sent to the endpoint in ms by CI Visibility. +func EndpointPayloadRequestsMs(endpointType EndpointType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "endpoint_payload.requests_ms", removeEmptyStrings([]string{ + string(endpointType), + })).Submit(value) +} + +// EndpointPayloadEventsCount records the number of events in the payload sent to the endpoint by CI Visibility. +func EndpointPayloadEventsCount(endpointType EndpointType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "endpoint_payload.events_count", removeEmptyStrings([]string{ + string(endpointType), + })).Submit(value) +} + +// EndpointEventsSerializationMs records the time it takes to serialize the events in the payload sent to the endpoint in ms by CI Visibility. +func EndpointEventsSerializationMs(endpointType EndpointType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "endpoint_payload.events_serialization_ms", removeEmptyStrings([]string{ + string(endpointType), + })).Submit(value) +} + +// GitCommandMs records the time it takes to execute a git command in ms by CI Visibility. +func GitCommandMs(commandType CommandType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "git.command_ms", removeEmptyStrings([]string{ + (string)(commandType), + })).Submit(value) +} + +// GitRequestsSearchCommitsMs records the time it takes to get the response of the search commit quest in ms by CI Visibility. +func GitRequestsSearchCommitsMs(responseCompressedType ResponseCompressedType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "git_requests.search_commits_ms", removeEmptyStrings([]string{ + (string)(responseCompressedType), + })).Submit(value) +} + +// GitRequestsObjectsPackMs records the time it takes to get the response of the objects pack request in ms by CI Visibility. +func GitRequestsObjectsPackMs(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "git_requests.objects_pack_ms", nil).Submit(value) +} + +// GitRequestsObjectsPackBytes records the sum of the sizes of the object pack files inside a single payload by CI Visibility +func GitRequestsObjectsPackBytes(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "git_requests.objects_pack_bytes", nil).Submit(value) +} + +// GitRequestsObjectsPackFiles records the number of files sent in the object pack payload by CI Visibility. +func GitRequestsObjectsPackFiles(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "git_requests.objects_pack_files", nil).Submit(value) +} + +// GitRequestsSettingsMs records the time it takes to get the response of the settings endpoint request in ms by CI Visibility. +func GitRequestsSettingsMs(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "git_requests.settings_ms", nil).Submit(value) +} + +// ITRSkippableTestsRequestMs records the time it takes to get the response of the itr skippable tests endpoint request in ms by CI Visibility. +func ITRSkippableTestsRequestMs(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "itr_skippable_tests.request_ms", nil).Submit(value) +} + +// ITRSkippableTestsResponseBytes records the number of bytes received by the endpoint. Tagged with a boolean flag set t if response body is compressed. +func ITRSkippableTestsResponseBytes(responseCompressedType ResponseCompressedType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "itr_skippable_tests.response_bytes", removeEmptyStrings([]string{ + (string)(responseCompressedType), + })).Submit(value) +} + +// CodeCoverageFiles records the number of files in the code coverage report by CI Visibility. +func CodeCoverageFiles(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "code_coverage.files", nil).Submit(value) +} + +// KnownTestsRequestMs records the time it takes to get the response of the known tests endpoint request in ms by CI Visibility. +func KnownTestsRequestMs(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "known_tests.request_ms", nil).Submit(value) +} + +// KnownTestsResponseBytes records the number of bytes received by the endpoint. Tagged with a boolean flag set to true if response body is compressed. +func KnownTestsResponseBytes(responseCompressedType ResponseCompressedType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "known_tests.response_bytes", removeEmptyStrings([]string{ + string(responseCompressedType), + })).Submit(value) +} + +// KnownTestsResponseTests records the number of tests in the response of the known tests endpoint by CI Visibility. +func KnownTestsResponseTests(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "known_tests.response_tests", nil).Submit(value) +} + +// TestManagementTestsRequestMs records the time it takes to get the response of the test management tests endpoint request in ms by CI Visibility. +func TestManagementTestsRequestMs(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "test_management_tests.request_ms", nil).Submit(value) +} + +// TestManagementTestsResponseBytes records the number of bytes received by the endpoint. Tagged with a boolean flag set to true if response body is compressed. +func TestManagementTestsResponseBytes(responseCompressedType ResponseCompressedType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "test_management_tests.response_bytes", removeEmptyStrings([]string{ + string(responseCompressedType), + })).Submit(value) +} + +// TestManagementTestsResponseTests records the number of tests in the response of the test management tests endpoint by CI Visibility. +func TestManagementTestsResponseTests(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "test_management_tests.response_tests", nil).Submit(value) +} + +// ImpactedTestsRequestMs records the time it takes to get the response of the impacted tests endpoint request in ms by CI Visibility. +func ImpactedTestsRequestMs(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "impacted_tests_detection.request_ms", nil).Submit(value) +} + +// ImpactedTestsResponseBytes records the number of bytes received by the endpoint. Tagged with a boolean flag set to true if response body is compressed. +func ImpactedTestsResponseBytes(responseCompressedType ResponseCompressedType, value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "impacted_tests_detection.response_bytes", removeEmptyStrings([]string{ + string(responseCompressedType), + })).Submit(value) +} + +// ImpactedTestsResponseFiles records the number of files in the response of the impacted tests endpoint by CI Visibility. +func ImpactedTestsResponseFiles(value float64) { + telemetry.Distribution(telemetry.NamespaceCIVisibility, "impacted_tests_detection.response_files", nil).Submit(value) +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/container_linux.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/container_linux.go similarity index 97% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/container_linux.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/container_linux.go index 237c293e..6ac57029 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/container_linux.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/container_linux.go @@ -48,7 +48,7 @@ var ( // containerID is the containerID read at init from /proc/self/cgroup containerID string - // entityID is the entityID to use for the container. It is the `cid-` if the container id available, + // entityID is the entityID to use for the container. It is the `ci-` if the container id available, // otherwise the cgroup node controller's inode prefixed with `in-` or an empty string on incompatible OS. // We use the memory controller on cgroupv1 and the root cgroup on cgroupv2. entityID string @@ -151,7 +151,7 @@ func readEntityID(mountPath, cgroupPath string, isHostCgroupNamespace bool) stri // First try to emit the containerID if available. It will be retrieved if the container is // running in the host cgroup namespace, independently of the cgroup version. if containerID != "" { - return "cid-" + containerID + return "ci-" + containerID } // Rely on the inode if we're not running in the host cgroup namespace. if isHostCgroupNamespace { @@ -161,7 +161,7 @@ func readEntityID(mountPath, cgroupPath string, isHostCgroupNamespace bool) stri } // EntityID attempts to return the container ID or the cgroup node controller's inode if the container ID -// is not available. The cid is prefixed with `cid-` and the inode with `in-`. +// is not available. The cid is prefixed with `ci-` and the inode with `in-`. func EntityID() string { return entityID } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/container_stub.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/container_stub.go similarity index 90% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/container_stub.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/container_stub.go index c6c24874..38f4e5ce 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/container_stub.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/container_stub.go @@ -13,7 +13,7 @@ func ContainerID() string { } // EntityID attempts to return the container ID or the cgroup v2 node inode if the container ID is not available. -// The cid is prefixed with `cid-` and the inode with `in-`. +// The cid is prefixed with `ci-` and the inode with `in-`. func EntityID() string { return "" } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/fast_queue.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/fast_queue.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/fast_queue.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/fast_queue.go diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/hash_cache.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/hash_cache.go new file mode 100644 index 00000000..4f92f7d7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/hash_cache.go @@ -0,0 +1,76 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package datastreams + +import ( + "strings" + "sync" +) + +const ( + maxHashCacheSize = 1000 +) + +type hashCache struct { + mu sync.RWMutex + m map[string]uint64 +} + +func getHashKey(edgeTags, processTags []string, parentHash uint64) string { + var s strings.Builder + l := 0 + for _, t := range edgeTags { + l += len(t) + } + for _, t := range processTags { + l += len(t) + } + l += 8 + s.Grow(l) + for _, t := range edgeTags { + s.WriteString(t) + } + for _, t := range processTags { + s.WriteString(t) + } + s.WriteByte(byte(parentHash)) + s.WriteByte(byte(parentHash >> 8)) + s.WriteByte(byte(parentHash >> 16)) + s.WriteByte(byte(parentHash >> 24)) + s.WriteByte(byte(parentHash >> 32)) + s.WriteByte(byte(parentHash >> 40)) + s.WriteByte(byte(parentHash >> 48)) + s.WriteByte(byte(parentHash >> 56)) + return s.String() +} + +func (c *hashCache) computeAndGet(key string, parentHash uint64, service, env string, edgeTags, processTags []string) uint64 { + hash := pathwayHash(nodeHash(service, env, edgeTags, processTags), parentHash) + c.mu.Lock() + defer c.mu.Unlock() + if len(c.m) >= maxHashCacheSize { + // high cardinality of hashes shouldn't happen in practice, due to a limited amount of topics consumed + // by each service. + c.m = make(map[string]uint64) + } + c.m[key] = hash + return hash +} + +func (c *hashCache) get(service, env string, edgeTags, processTags []string, parentHash uint64) uint64 { + key := getHashKey(edgeTags, processTags, parentHash) + c.mu.RLock() + if hash, ok := c.m[key]; ok { + c.mu.RUnlock() + return hash + } + c.mu.RUnlock() + return c.computeAndGet(key, parentHash, service, env, edgeTags, processTags) +} + +func newHashCache() *hashCache { + return &hashCache{m: make(map[string]uint64)} +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/pathway.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/pathway.go similarity index 95% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/pathway.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/pathway.go index a16891a7..c6ac1df4 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/pathway.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/pathway.go @@ -26,7 +26,7 @@ func isWellFormedEdgeTag(t string) bool { return false } -func nodeHash(service, env string, edgeTags []string) uint64 { +func nodeHash(service, env string, edgeTags, processTags []string) uint64 { h := fnv.New64() sort.Strings(edgeTags) h.Write([]byte(service)) @@ -38,6 +38,9 @@ func nodeHash(service, env string, edgeTags []string) uint64 { fmt.Println("not formatted correctly", t) } } + for _, t := range processTags { + h.Write([]byte(t)) + } return h.Sum64() } diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/payload.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/payload.go new file mode 100644 index 00000000..eee5dff4 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/payload.go @@ -0,0 +1,85 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:generate go run github.com/tinylib/msgp -unexported -marshal=false -o=payload_msgp.go -tests=false + +package datastreams + +// StatsPayload stores client computed stats. +type StatsPayload struct { + // Env specifies the env. of the application, as defined by the user. + Env string + // Service is the service of the application + Service string + // Stats holds all stats buckets computed within this payload. + Stats []StatsBucket + // TracerVersion is the version of the tracer + TracerVersion string + // Lang is the language of the tracer + Lang string + // Version is the version of the service + Version string + // ProcessTags contains the process level tags. + ProcessTags []string +} + +type ProduceOffset struct { + Topic string + Partition int32 + Offset int64 +} + +type CommitOffset struct { + ConsumerGroup string + Topic string + Partition int32 + Offset int64 +} + +// Backlog represents the size of a queue that hasn't been yet read by the consumer. +type Backlog struct { + // Tags that identify the backlog + Tags []string + // Value of the backlog + Value int64 +} + +// StatsBucket specifies a set of stats computed over a duration. +type StatsBucket struct { + // Start specifies the beginning of this bucket in unix nanoseconds. + Start uint64 + // Duration specifies the duration of this bucket in nanoseconds. + Duration uint64 + // Stats contains a set of statistics computed for the duration of this bucket. + Stats []StatsPoint + // Backlogs store information used to compute queue backlog + Backlogs []Backlog +} + +// TimestampType can be either current or origin. +type TimestampType string + +const ( + // TimestampTypeCurrent is for when the recorded timestamp is based on the + // timestamp of the current StatsPoint. + TimestampTypeCurrent TimestampType = "current" + // TimestampTypeOrigin is for when the recorded timestamp is based on the + // time that the first StatsPoint in the pathway is sent out. + TimestampTypeOrigin TimestampType = "origin" +) + +// StatsPoint contains a set of statistics grouped under various aggregation keys. +type StatsPoint struct { + // These fields indicate the properties under which the stats were aggregated. + EdgeTags []string + Hash uint64 + ParentHash uint64 + // These fields specify the stats for the above aggregation. + // those are distributions of latency in seconds. + PathwayLatency []byte + EdgeLatency []byte + PayloadSize []byte + TimestampType TimestampType +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/payload_msgp.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/payload_msgp.go similarity index 94% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/payload_msgp.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/payload_msgp.go index d22fdfea..fe8a9cc8 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/payload_msgp.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/payload_msgp.go @@ -585,6 +585,25 @@ func (z *StatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Version") return } + case "ProcessTags": + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "ProcessTags") + return + } + if cap(z.ProcessTags) >= int(zb0003) { + z.ProcessTags = (z.ProcessTags)[:zb0003] + } else { + z.ProcessTags = make([]string, zb0003) + } + for za0002 := range z.ProcessTags { + z.ProcessTags[za0002], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ProcessTags", za0002) + return + } + } default: err = dc.Skip() if err != nil { @@ -598,9 +617,9 @@ func (z *StatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *StatsPayload) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 6 + // map header, size 7 // write "Env" - err = en.Append(0x86, 0xa3, 0x45, 0x6e, 0x76) + err = en.Append(0x87, 0xa3, 0x45, 0x6e, 0x76) if err != nil { return } @@ -666,6 +685,23 @@ func (z *StatsPayload) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Version") return } + // write "ProcessTags" + err = en.Append(0xab, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.ProcessTags))) + if err != nil { + err = msgp.WrapError(err, "ProcessTags") + return + } + for za0002 := range z.ProcessTags { + err = en.WriteString(z.ProcessTags[za0002]) + if err != nil { + err = msgp.WrapError(err, "ProcessTags", za0002) + return + } + } return } @@ -675,7 +711,10 @@ func (z *StatsPayload) Msgsize() (s int) { for za0001 := range z.Stats { s += z.Stats[za0001].Msgsize() } - s += 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 5 + msgp.StringPrefixSize + len(z.Lang) + 8 + msgp.StringPrefixSize + len(z.Version) + s += 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 5 + msgp.StringPrefixSize + len(z.Lang) + 8 + msgp.StringPrefixSize + len(z.Version) + 12 + msgp.ArrayHeaderSize + for za0002 := range z.ProcessTags { + s += msgp.StringPrefixSize + len(z.ProcessTags[za0002]) + } return } @@ -697,12 +736,6 @@ func (z *StatsPoint) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "Service": - z.Service, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Service") - return - } case "EdgeTags": var zb0002 uint32 zb0002, err = dc.ReadArrayHeader() @@ -775,19 +808,9 @@ func (z *StatsPoint) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *StatsPoint) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 8 - // write "Service" - err = en.Append(0x88, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Service) - if err != nil { - err = msgp.WrapError(err, "Service") - return - } + // map header, size 7 // write "EdgeTags" - err = en.Append(0xa8, 0x45, 0x64, 0x67, 0x65, 0x54, 0x61, 0x67, 0x73) + err = en.Append(0x87, 0xa8, 0x45, 0x64, 0x67, 0x65, 0x54, 0x61, 0x67, 0x73) if err != nil { return } @@ -868,7 +891,7 @@ func (z *StatsPoint) EncodeMsg(en *msgp.Writer) (err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *StatsPoint) Msgsize() (s int) { - s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 9 + msgp.ArrayHeaderSize + s = 1 + 9 + msgp.ArrayHeaderSize for za0001 := range z.EdgeTags { s += msgp.StringPrefixSize + len(z.EdgeTags[za0001]) } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/processor.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/processor.go similarity index 79% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/processor.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/processor.go index 654c72db..469660c3 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/processor.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/processor.go @@ -15,10 +15,11 @@ import ( "sync/atomic" "time" - "gopkg.in/DataDog/dd-trace-go.v1/datastreams/options" - "gopkg.in/DataDog/dd-trace-go.v1/internal" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" - "gopkg.in/DataDog/dd-trace-go.v1/internal/version" + "github.com/DataDog/dd-trace-go/v2/datastreams/options" + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/processtags" + "github.com/DataDog/dd-trace-go/v2/internal/version" "github.com/DataDog/sketches-go/ddsketch" "github.com/DataDog/sketches-go/ddsketch/mapping" @@ -43,11 +44,14 @@ type statsPoint struct { pathwayLatency int64 edgeLatency int64 payloadSize int64 + serviceName string + processTags []string } type statsGroup struct { service string edgeTags []string + processTags []string hash uint64 parentHash uint64 pathwayLatency *ddsketch.DDSketch @@ -80,23 +84,22 @@ func (b bucket) export(timestampType TimestampType) StatsBucket { for _, s := range b.points { pathwayLatency, err := proto.Marshal(s.pathwayLatency.ToProto()) if err != nil { - log.Error("can't serialize pathway latency. Ignoring: %v", err) + log.Error("can't serialize pathway latency. Ignoring: %s", err.Error()) continue } edgeLatency, err := proto.Marshal(s.edgeLatency.ToProto()) if err != nil { - log.Error("can't serialize edge latency. Ignoring: %v", err) + log.Error("can't serialize edge latency. Ignoring: %s", err.Error()) continue } payloadSize, err := proto.Marshal(s.payloadSize.ToProto()) if err != nil { - log.Error("can't serialize payload size. Ignoring: %v", err) + log.Error("can't serialize payload size. Ignoring: %s", err.Error()) continue } stats = append(stats, StatsPoint{ PathwayLatency: pathwayLatency, EdgeLatency: edgeLatency, - Service: s.service, EdgeTags: s.edgeTags, Hash: s.hash, ParentHash: s.parentHash, @@ -172,12 +175,17 @@ type kafkaOffset struct { timestamp int64 } +type bucketKey struct { + serviceName string + btime int64 +} + type Processor struct { in *fastQueue hashCache *hashCache inKafka chan kafkaOffset - tsTypeCurrentBuckets map[int64]bucket - tsTypeOriginBuckets map[int64]bucket + tsTypeCurrentBuckets map[bucketKey]bucket + tsTypeOriginBuckets map[bucketKey]bucket wg sync.WaitGroup stopped uint64 stop chan struct{} // closing this channel triggers shutdown @@ -205,8 +213,8 @@ func NewProcessor(statsd internal.StatsdClient, env, service, version string, ag service = defaultServiceName } p := &Processor{ - tsTypeCurrentBuckets: make(map[int64]bucket), - tsTypeOriginBuckets: make(map[int64]bucket), + tsTypeCurrentBuckets: make(map[bucketKey]bucket), + tsTypeOriginBuckets: make(map[bucketKey]bucket), hashCache: newHashCache(), in: newFastQueue(), stopped: 1, @@ -224,16 +232,17 @@ func NewProcessor(statsd internal.StatsdClient, env, service, version string, ag // It gives us the start time of the time bucket in which such timestamp falls. func alignTs(ts, bucketSize int64) int64 { return ts - ts%bucketSize } -func (p *Processor) getBucket(btime int64, buckets map[int64]bucket) bucket { - b, ok := buckets[btime] +func (p *Processor) getBucket(btime int64, service string, buckets map[bucketKey]bucket) bucket { + k := bucketKey{serviceName: service, btime: btime} + b, ok := buckets[k] if !ok { b = newBucket(uint64(btime), uint64(bucketDuration.Nanoseconds())) - buckets[btime] = b + buckets[k] = b } return b } -func (p *Processor) addToBuckets(point statsPoint, btime int64, buckets map[int64]bucket) { - b := p.getBucket(btime, buckets) +func (p *Processor) addToBuckets(point statsPoint, btime int64, buckets map[bucketKey]bucket) { + b := p.getBucket(btime, point.serviceName, buckets) group, ok := b.points[point.hash] if !ok { group = statsGroup{ @@ -247,13 +256,13 @@ func (p *Processor) addToBuckets(point statsPoint, btime int64, buckets map[int6 b.points[point.hash] = group } if err := group.pathwayLatency.Add(math.Max(float64(point.pathwayLatency)/float64(time.Second), 0)); err != nil { - log.Error("failed to add pathway latency. Ignoring %v.", err) + log.Error("failed to add pathway latency. Ignoring %v.", err.Error()) } if err := group.edgeLatency.Add(math.Max(float64(point.edgeLatency)/float64(time.Second), 0)); err != nil { - log.Error("failed to add edge latency. Ignoring %v.", err) + log.Error("failed to add edge latency. Ignoring %v.", err.Error()) } if err := group.payloadSize.Add(float64(point.payloadSize)); err != nil { - log.Error("failed to add payload size. Ignoring %v.", err) + log.Error("failed to add payload size. Ignoring %v.", err.Error()) } } @@ -267,7 +276,7 @@ func (p *Processor) add(point statsPoint) { func (p *Processor) addKafkaOffset(o kafkaOffset) { btime := alignTs(o.timestamp, bucketDuration.Nanoseconds()) - b := p.getBucket(btime, p.tsTypeCurrentBuckets) + b := p.getBucket(btime, p.service, p.tsTypeCurrentBuckets) if o.offsetType == produceOffset { b.latestProduceOffsets[partitionKey{ partition: o.partition, @@ -311,16 +320,16 @@ func (p *Processor) flushInput() { func (p *Processor) run(tick <-chan time.Time) { for { select { + case <-p.stop: + // drop in flight payloads on the input channel + p.sendToAgent(p.flush(time.Now().Add(bucketDuration * 10))) + return case now := <-tick: p.sendToAgent(p.flush(now)) case done := <-p.flushRequest: p.flushInput() p.sendToAgent(p.flush(time.Now().Add(bucketDuration * 10))) close(done) - case <-p.stop: - // drop in flight payloads on the input channel - p.sendToAgent(p.flush(time.Now().Add(bucketDuration * 10))) - return default: s := p.in.pop() if s == nil { @@ -340,7 +349,11 @@ func (p *Processor) Start() { } p.stop = make(chan struct{}) p.flushRequest = make(chan chan<- struct{}) - go p.reportStats() + p.wg.Add(1) + go func() { + defer p.wg.Done() + p.reportStats() + }() p.wg.Add(1) go func() { defer p.wg.Done() @@ -372,7 +385,14 @@ func (p *Processor) Stop() { } func (p *Processor) reportStats() { - for range time.NewTicker(time.Second * 10).C { + tick := time.NewTicker(time.Second * 10) + defer tick.Stop() + for { + select { + case <-p.stop: + return + case <-tick.C: + } p.statsd.Count("datadog.datastreams.processor.payloads_in", atomic.SwapInt64(&p.stats.payloadsIn, 0), nil, 1) p.statsd.Count("datadog.datastreams.processor.flushed_payloads", atomic.SwapInt64(&p.stats.flushedPayloads, 0), nil, 1) p.statsd.Count("datadog.datastreams.processor.flushed_buckets", atomic.SwapInt64(&p.stats.flushedBuckets, 0), nil, 1) @@ -381,44 +401,55 @@ func (p *Processor) reportStats() { } } -func (p *Processor) flushBucket(buckets map[int64]bucket, bucketStart int64, timestampType TimestampType) StatsBucket { - bucket := buckets[bucketStart] - delete(buckets, bucketStart) +func (p *Processor) flushBucket(buckets map[bucketKey]bucket, bucketKey bucketKey, timestampType TimestampType) StatsBucket { + bucket := buckets[bucketKey] + delete(buckets, bucketKey) return bucket.export(timestampType) } -func (p *Processor) flush(now time.Time) StatsPayload { +func (p *Processor) flush(now time.Time) map[string]StatsPayload { nowNano := now.UnixNano() - sp := StatsPayload{ - Service: p.service, - Version: p.version, - Env: p.env, - Lang: "go", - TracerVersion: version.Tag, - Stats: make([]StatsBucket, 0, len(p.tsTypeCurrentBuckets)+len(p.tsTypeOriginBuckets)), - } - for ts := range p.tsTypeCurrentBuckets { - if ts > nowNano-bucketDuration.Nanoseconds() { + payloads := make(map[string]StatsPayload) + addBucket := func(service string, bucket StatsBucket) { + payload, ok := payloads[service] + if !ok { + payload = StatsPayload{ + Service: service, + Version: p.version, + Env: p.env, + Lang: "go", + TracerVersion: version.Tag, + Stats: make([]StatsBucket, 0, 1), + ProcessTags: processtags.GlobalTags().Slice(), + } + } + payload.Stats = append(payload.Stats, bucket) + payloads[service] = payload + } + for bucketKey := range p.tsTypeCurrentBuckets { + if bucketKey.btime > nowNano-bucketDuration.Nanoseconds() { // do not flush the bucket at the current time continue } - sp.Stats = append(sp.Stats, p.flushBucket(p.tsTypeCurrentBuckets, ts, TimestampTypeCurrent)) + addBucket(bucketKey.serviceName, p.flushBucket(p.tsTypeCurrentBuckets, bucketKey, TimestampTypeCurrent)) } - for ts := range p.tsTypeOriginBuckets { - if ts > nowNano-bucketDuration.Nanoseconds() { + for bucketKey := range p.tsTypeOriginBuckets { + if bucketKey.btime > nowNano-bucketDuration.Nanoseconds() { // do not flush the bucket at the current time continue } - sp.Stats = append(sp.Stats, p.flushBucket(p.tsTypeOriginBuckets, ts, TimestampTypeOrigin)) + addBucket(bucketKey.serviceName, p.flushBucket(p.tsTypeOriginBuckets, bucketKey, TimestampTypeOrigin)) } - return sp + return payloads } -func (p *Processor) sendToAgent(payload StatsPayload) { - atomic.AddInt64(&p.stats.flushedPayloads, 1) - atomic.AddInt64(&p.stats.flushedBuckets, int64(len(payload.Stats))) - if err := p.transport.sendPipelineStats(&payload); err != nil { - atomic.AddInt64(&p.stats.flushErrors, 1) +func (p *Processor) sendToAgent(payloads map[string]StatsPayload) { + for _, payload := range payloads { + atomic.AddInt64(&p.stats.flushedPayloads, 1) + atomic.AddInt64(&p.stats.flushedBuckets, int64(len(payload.Stats))) + if err := p.transport.sendPipelineStats(&payload); err != nil { + atomic.AddInt64(&p.stats.flushErrors, 1) + } } } @@ -437,12 +468,18 @@ func (p *Processor) SetCheckpointWithParams(ctx context.Context, params options. edgeStart = parent.EdgeStart() parentHash = parent.GetHash() } + service := p.service + if params.ServiceOverride != "" { + service = params.ServiceOverride + } + processTags := processtags.GlobalTags().Slice() child := Pathway{ - hash: p.hashCache.get(p.service, p.env, edgeTags, parentHash), + hash: p.hashCache.get(service, p.env, edgeTags, processTags, parentHash), pathwayStart: pathwayStart, edgeStart: now, } dropped := p.in.push(&processorInput{typ: pointTypeStats, point: statsPoint{ + serviceName: service, edgeTags: edgeTags, parentHash: parentHash, hash: child.hash, diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/propagator.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/propagator.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/propagator.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/propagator.go diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/transport.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/transport.go new file mode 100644 index 00000000..d03bb3e7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/datastreams/transport.go @@ -0,0 +1,90 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package datastreams + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "net/url" + "runtime" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal" + + "github.com/tinylib/msgp/msgp" +) + +type httpTransport struct { + url string // the delivery URL for stats + client *http.Client // the HTTP client used in the POST + headers map[string]string // the Transport headers +} + +func newHTTPTransport(agentURL *url.URL, client *http.Client) *httpTransport { + // initialize the default EncoderPool with Encoder headers + defaultHeaders := map[string]string{ + "Datadog-Meta-Lang": "go", + "Datadog-Meta-Lang-Version": strings.TrimPrefix(runtime.Version(), "go"), + "Datadog-Meta-Lang-Interpreter": runtime.Compiler + "-" + runtime.GOARCH + "-" + runtime.GOOS, + "Content-Type": "application/msgpack", + "Content-Encoding": "gzip", + } + if cid := internal.ContainerID(); cid != "" { + defaultHeaders["Datadog-Container-ID"] = cid + } + if entityID := internal.ContainerID(); entityID != "" { + defaultHeaders["Datadog-Entity-ID"] = entityID + } + url := fmt.Sprintf("%s/v0.1/pipeline_stats", agentURL.String()) + return &httpTransport{ + url: url, + client: client, + headers: defaultHeaders, + } +} + +func (t *httpTransport) sendPipelineStats(p *StatsPayload) error { + var buf bytes.Buffer + gzipWriter, err := gzip.NewWriterLevel(&buf, gzip.BestSpeed) + if err != nil { + return err + } + if err := msgp.Encode(gzipWriter, p); err != nil { + return err + } + err = gzipWriter.Close() + if err != nil { + return err + } + req, err := http.NewRequest("POST", t.url, &buf) + if err != nil { + return err + } + for header, value := range t.headers { + req.Header.Set(header, value) + } + resp, err := t.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + defer io.Copy(io.Discard, req.Body) + if code := resp.StatusCode; code >= 400 { + // error, check the body for context information and + // return a nice error. + txt := http.StatusText(code) + msg := make([]byte, 100) + n, _ := resp.Body.Read(msg) + if n > 0 { + return fmt.Errorf("%s (Status: %s)", msg[:n], txt) + } + return fmt.Errorf("%s", txt) + } + return nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/env.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env.go new file mode 100644 index 00000000..bcb45267 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env.go @@ -0,0 +1,165 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package internal + +import ( + "net" + "strconv" + "strings" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// BoolEnv returns the parsed boolean value of an environment variable, or +// def otherwise. +func BoolEnv(key string, def bool) bool { + vv, ok := BoolEnvNoDefault(key) + if !ok { + return def + } + return vv +} + +// BoolEnvNoDefault returns the parsed boolean value of an environment variable. The second returned bool signals if +// the value was set and was a correct boolean value. +func BoolEnvNoDefault(key string) (bool, bool) { + vv, ok := env.Lookup(key) + if !ok { + return false, false + } + v, err := strconv.ParseBool(vv) + if err != nil { + log.Warn("Non-boolean value for env var %s. Parse failed with error: %v", key, err.Error()) + return false, false + } + return v, true +} + +// IntEnv returns the parsed int value of an environment variable, or +// def otherwise. +func IntEnv(key string, def int) int { + vv, ok := env.Lookup(key) + if !ok { + return def + } + v, err := strconv.Atoi(vv) + if err != nil { + log.Warn("Non-integer value for env var %s, defaulting to %d. Parse failed with error: %v", key, def, err.Error()) + return def + } + return v +} + +// DurationEnv returns the parsed duration value of an environment variable, or +// def otherwise. +func DurationEnv(key string, def time.Duration) time.Duration { + vv, ok := env.Lookup(key) + if !ok { + return def + } + v, err := time.ParseDuration(vv) + if err != nil { + log.Warn("Non-duration value for env var %s, defaulting to %d. Parse failed with error: %v", key, def, err.Error()) + return def + } + return v +} + +// DurationEnvWithUnit returns the parsed duration value of an environment +// variable with the specified unit, or def otherwise. +func DurationEnvWithUnit(key string, unit string, def time.Duration) time.Duration { + vv, ok := env.Lookup(key) + if !ok { + return def + } + v, err := time.ParseDuration(vv + unit) + if err != nil { + log.Warn("Non-duration value for env var %s, defaulting to %d. Parse failed with error: %v", key, def, err.Error()) + return def + } + return v +} + +// IPEnv returns the valid IP value of an environment variable, or def otherwise. +func IPEnv(key string, def net.IP) net.IP { + vv, ok := env.Lookup(key) + if !ok { + return def + } + + ip := net.ParseIP(vv) + if ip == nil { + log.Warn("Non-IP value for env var %s, defaulting to %s", key, def.String()) + return def + } + + return ip +} + +// ForEachStringTag runs fn on every key val pair encountered in str. +// str may contain multiple key val pairs separated by either space +// or comma (but not a mixture of both), and each key val pair is separated by a delimiter. +func ForEachStringTag(str string, delimiter string, fn func(key string, val string)) { + sep := " " + if strings.Index(str, ",") > -1 { + // falling back to comma as separator + sep = "," + } + for _, tag := range strings.Split(str, sep) { + tag = strings.TrimSpace(tag) + if tag == "" { + continue + } + kv := strings.SplitN(tag, delimiter, 2) + key := strings.TrimSpace(kv[0]) + if key == "" { + continue + } + var val string + if len(kv) == 2 { + val = strings.TrimSpace(kv[1]) + } + fn(key, val) + } +} + +// ParseTagString returns tags parsed from string as map +func ParseTagString(str string) map[string]string { + res := make(map[string]string) + ForEachStringTag(str, DDTagsDelimiter, func(key, val string) { res[key] = val }) + return res +} + +// FloatEnv returns the parsed float64 value of an environment variable, +// or def otherwise. +func FloatEnv(key string, def float64) float64 { + env, ok := env.Lookup(key) + if !ok { + return def + } + v, err := strconv.ParseFloat(env, 64) + if err != nil { + log.Warn("Non-float value for env var %s, defaulting to %f. Parse failed with error: %v", key, def, err.Error()) + return def + } + return v +} + +// BoolVal returns the parsed boolean value of string val, or def if not parseable +func BoolVal(val string, def bool) bool { + v, err := strconv.ParseBool(val) + if err != nil { + return def + } + return v +} + +// ExternalEnvironment returns the value of the DD_EXTERNAL_ENV environment variable. +func ExternalEnvironment() string { + return env.Get("DD_EXTERNAL_ENV") +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/env.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/env.go new file mode 100644 index 00000000..7734187d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/env.go @@ -0,0 +1,90 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package env + +import ( + "os" + "strings" + "testing" + + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// Get is a wrapper around env.Get that validates the environment variable +// against a list of supported environment variables. +// +// If the environment variable has aliases, the function will also check the aliases +// and return the value of the first alias that is set. +// +// When a environment variable is not supported because it is not +// listed in the list of supported environment variables, the function will log an error +// and behave as if the environment variable was not set. +// +// In testing mode, the reader will automatically add the environment variable +// to the configuration file. +func Get(name string) string { + if !verifySupportedConfiguration(name) { + return "" + } + + if v := os.Getenv(name); v != "" { + return v + } + + for _, alias := range keyAliases[name] { + if v := os.Getenv(alias); v != "" { + return v + } + } + + return "" +} + +// Lookup is a wrapper around os.LookupEnv that validates the environment variable +// against a list of supported environment variables. +// +// If the environment variable has aliases, the function will also check the aliases. +// and return the value of the first alias that is set. +// +// When a environment variable is not supported because it is not +// listed in the list of supported environment variables, the function will log an error +// and behave as if the environment variable was not set. +// +// In testing mode, the reader will automatically add the environment variable +// to the configuration file. +func Lookup(name string) (string, bool) { + if !verifySupportedConfiguration(name) { + return "", false + } + + if v, ok := os.LookupEnv(name); ok { + return v, true + } + + for _, alias := range keyAliases[name] { + if v, ok := os.LookupEnv(alias); ok { + return v, true + } + } + + return "", false +} + +func verifySupportedConfiguration(name string) bool { + if strings.HasPrefix(name, "DD_") || strings.HasPrefix(name, "OTEL_") { + if _, ok := SupportedConfigurations[name]; !ok { + if testing.Testing() { + addSupportedConfigurationToFile(name) + } + + log.Error("config: usage of a unlisted environment variable: %s", name) + + return false + } + } + + return true +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.gen.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.gen.go new file mode 100644 index 00000000..7d64f8d7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.gen.go @@ -0,0 +1,225 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package env + +// Code generated by github.com/DataDog/dd-trace-go/v2/scripts/configinverter. DO NOT EDIT. + +// SupportedConfigurations is a map of supported configuration keys. +var SupportedConfigurations = map[string]struct{}{ + "DD_ACTION_EXECUTION_ID": {}, + "DD_AGENT_HOST": {}, + "DD_API_KEY": {}, + "DD_API_SECURITY_ENABLED": {}, + "DD_API_SECURITY_PROXY_SAMPLE_RATE": {}, + "DD_API_SECURITY_REQUEST_SAMPLE_RATE": {}, + "DD_API_SECURITY_SAMPLE_DELAY": {}, + "DD_APM_TRACING_ENABLED": {}, + "DD_APPSEC_BODY_PARSING_SIZE_LIMIT": {}, + "DD_APPSEC_ENABLED": {}, + "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_HTML": {}, + "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_JSON": {}, + "DD_APPSEC_MAX_STACK_TRACE_DEPTH": {}, + "DD_APPSEC_OBFUSCATION_PARAMETER_KEY_REGEXP": {}, + "DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP": {}, + "DD_APPSEC_RASP_ENABLED": {}, + "DD_APPSEC_RULES": {}, + "DD_APPSEC_SCA_ENABLED": {}, + "DD_APPSEC_STACK_TRACE_ENABLE": {}, + "DD_APPSEC_TRACE_RATE_LIMIT": {}, + "DD_APPSEC_WAF_TIMEOUT": {}, + "DD_APP_KEY": {}, + "DD_CIVISIBILITY_AGENTLESS_ENABLED": {}, + "DD_CIVISIBILITY_AGENTLESS_URL": {}, + "DD_CIVISIBILITY_AUTO_INSTRUMENTATION_PROVIDER": {}, + "DD_CIVISIBILITY_ENABLED": {}, + "DD_CIVISIBILITY_FLAKY_RETRY_COUNT": {}, + "DD_CIVISIBILITY_FLAKY_RETRY_ENABLED": {}, + "DD_CIVISIBILITY_IMPACTED_TESTS_DETECTION_ENABLED": {}, + "DD_CIVISIBILITY_INTERNAL_PARALLEL_EARLY_FLAKE_DETECTION_ENABLED": {}, + "DD_CIVISIBILITY_LOGS_ENABLED": {}, + "DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT": {}, + "DD_CUSTOM_TRACE_ID": {}, + "DD_DATA_STREAMS_ENABLED": {}, + "DD_DBM_PROPAGATION_MODE": {}, + "DD_DOGSTATSD_HOST": {}, + "DD_DOGSTATSD_PORT": {}, + "DD_DYNAMIC_INSTRUMENTATION_ENABLED": {}, + "DD_ENV": {}, + "DD_EXPERIMENTAL_PROPAGATE_PROCESS_TAGS_ENABLED": {}, + "DD_EXTERNAL_ENV": {}, + "DD_GIT_BRANCH": {}, + "DD_GIT_COMMIT_AUTHOR_DATE": {}, + "DD_GIT_COMMIT_AUTHOR_EMAIL": {}, + "DD_GIT_COMMIT_AUTHOR_NAME": {}, + "DD_GIT_COMMIT_COMMITTER_DATE": {}, + "DD_GIT_COMMIT_COMMITTER_EMAIL": {}, + "DD_GIT_COMMIT_COMMITTER_NAME": {}, + "DD_GIT_COMMIT_MESSAGE": {}, + "DD_GIT_COMMIT_SHA": {}, + "DD_GIT_PULL_REQUEST_BASE_BRANCH": {}, + "DD_GIT_PULL_REQUEST_BASE_BRANCH_SHA": {}, + "DD_GIT_REPOSITORY_URL": {}, + "DD_GIT_TAG": {}, + "DD_HOSTNAME": {}, + "DD_INSTRUMENTATION_INSTALL_ID": {}, + "DD_INSTRUMENTATION_INSTALL_TIME": {}, + "DD_INSTRUMENTATION_INSTALL_TYPE": {}, + "DD_INSTRUMENTATION_TELEMETRY_ENABLED": {}, + "DD_KEY": {}, + "DD_LLMOBS_AGENTLESS_ENABLED": {}, + "DD_LLMOBS_ENABLED": {}, + "DD_LLMOBS_ML_APP": {}, + "DD_LLMOBS_PROJECT_NAME": {}, + "DD_LOGGING_RATE": {}, + "DD_PIPELINE_EXECUTION_ID": {}, + "DD_PROFILING_AGENTLESS": {}, + "DD_PROFILING_CODE_HOTSPOTS_COLLECTION_ENABLED": {}, + "DD_PROFILING_DEBUG_COMPRESSION_SETTINGS": {}, + "DD_PROFILING_DELTA": {}, + "DD_PROFILING_ENABLED": {}, + "DD_PROFILING_ENDPOINT_COLLECTION_ENABLED": {}, + "DD_PROFILING_ENDPOINT_COUNT_ENABLED": {}, + "DD_PROFILING_EXECUTION_TRACE_ENABLED": {}, + "DD_PROFILING_EXECUTION_TRACE_LIMIT_BYTES": {}, + "DD_PROFILING_EXECUTION_TRACE_PERIOD": {}, + "DD_PROFILING_FLUSH_ON_EXIT": {}, + "DD_PROFILING_OUTPUT_DIR": {}, + "DD_PROFILING_UPLOAD_TIMEOUT": {}, + "DD_PROFILING_URL": {}, + "DD_PROFILING_WAIT_PROFILE": {}, + "DD_PROFILING_WAIT_PROFILE_MAX_GOROUTINES": {}, + "DD_RC_TUF_ROOT": {}, + "DD_REMOTE_CONFIGURATION_ENABLED": {}, + "DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS": {}, + "DD_REQUEST_MIRROR_HEALTHCHECK_ADDR": {}, + "DD_REQUEST_MIRROR_LISTEN_ADDR": {}, + "DD_RUNTIME_METRICS_ENABLED": {}, + "DD_RUNTIME_METRICS_V2_ENABLED": {}, + "DD_SERVICE": {}, + "DD_SERVICE_EXTENSION_HEALTHCHECK_PORT": {}, + "DD_SERVICE_EXTENSION_HOST": {}, + "DD_SERVICE_EXTENSION_OBSERVABILITY_MODE": {}, + "DD_SERVICE_EXTENSION_PORT": {}, + "DD_SERVICE_MAPPING": {}, + "DD_SITE": {}, + "DD_SPAN_SAMPLING_RULES": {}, + "DD_SPAN_SAMPLING_RULES_FILE": {}, + "DD_TAGS": {}, + "DD_TELEMETRY_DEBUG": {}, + "DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED": {}, + "DD_TELEMETRY_HEARTBEAT_INTERVAL": {}, + "DD_TELEMETRY_LOG_COLLECTION_ENABLED": {}, + "DD_TELEMETRY_METRICS_ENABLED": {}, + "DD_TEST_AGENT_HOST": {}, + "DD_TEST_AGENT_PORT": {}, + "DD_TEST_MANAGEMENT_ATTEMPT_TO_FIX_RETRIES": {}, + "DD_TEST_MANAGEMENT_ENABLED": {}, + "DD_TEST_OPTIMIZATION_ENV_DATA_FILE": {}, + "DD_TEST_SESSION_NAME": {}, + "DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED": {}, + "DD_TRACE_128_BIT_TRACEID_LOGGING_ENABLED": {}, + "DD_TRACE_ABANDONED_SPAN_TIMEOUT": {}, + "DD_TRACE_AGENT_PORT": {}, + "DD_TRACE_AGENT_PROTOCOL_VERSION": {}, + "DD_TRACE_AGENT_URL": {}, + "DD_TRACE_ANALYTICS_ENABLED": {}, + "DD_TRACE_AWS_ANALYTICS_ENABLED": {}, + "DD_TRACE_BAGGAGE_TAG_KEYS": {}, + "DD_TRACE_BUNTDB_ANALYTICS_ENABLED": {}, + "DD_TRACE_CHI_ANALYTICS_ENABLED": {}, + "DD_TRACE_CLIENT_HOSTNAME_COMPAT": {}, + "DD_TRACE_CLIENT_IP_ENABLED": {}, + "DD_TRACE_CLIENT_IP_HEADER": {}, + "DD_TRACE_CONSUL_ANALYTICS_ENABLED": {}, + "DD_TRACE_DEBUG": {}, + "DD_TRACE_DEBUG_ABANDONED_SPANS": {}, + "DD_TRACE_DEBUG_SEELOG_WORKAROUND": {}, + "DD_TRACE_ECHO_ANALYTICS_ENABLED": {}, + "DD_TRACE_ELASTIC_ANALYTICS_ENABLED": {}, + "DD_TRACE_ENABLED": {}, + "DD_TRACE_FASTHTTP_ANALYTICS_ENABLED": {}, + "DD_TRACE_FEATURES": {}, + "DD_TRACE_FIBER_ANALYTICS_ENABLED": {}, + "DD_TRACE_GCP_PUBSUB_ANALYTICS_ENABLED": {}, + "DD_TRACE_GIN_ANALYTICS_ENABLED": {}, + "DD_TRACE_GIT_METADATA_ENABLED": {}, + "DD_TRACE_GOCQL_ANALYTICS_ENABLED": {}, + "DD_TRACE_GOCQL_COMPAT": {}, + "DD_TRACE_GOJI_ANALYTICS_ENABLED": {}, + "DD_TRACE_GOOGLE_API_ANALYTICS_ENABLED": {}, + "DD_TRACE_GOPG_ANALYTICS_ENABLED": {}, + "DD_TRACE_GQLGEN_ANALYTICS_ENABLED": {}, + "DD_TRACE_GRAPHQL_ANALYTICS_ENABLED": {}, + "DD_TRACE_GRAPHQL_ERROR_EXTENSIONS": {}, + "DD_TRACE_GRPC_ANALYTICS_ENABLED": {}, + "DD_TRACE_HEADER_TAGS": {}, + "DD_TRACE_HTTPROUTER_ANALYTICS_ENABLED": {}, + "DD_TRACE_HTTPTREEMUX_ANALYTICS_ENABLED": {}, + "DD_TRACE_HTTP_ANALYTICS_ENABLED": {}, + "DD_TRACE_HTTP_CLIENT_ERROR_STATUSES": {}, + "DD_TRACE_HTTP_CLIENT_RESOURCE_NAME_QUANTIZE": {}, + "DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING": {}, + "DD_TRACE_HTTP_HANDLER_RESOURCE_NAME_QUANTIZE": {}, + "DD_TRACE_HTTP_SERVER_ERROR_STATUSES": {}, + "DD_TRACE_HTTP_URL_QUERY_STRING_DISABLED": {}, + "DD_TRACE_INFERRED_PROXY_SERVICES_ENABLED": {}, + "DD_TRACE_KAFKA_ANALYTICS_ENABLED": {}, + "DD_TRACE_LEVELDB_ANALYTICS_ENABLED": {}, + "DD_TRACE_LOGRUS_ANALYTICS_ENABLED": {}, + "DD_TRACE_LOG_DIRECTORY": {}, + "DD_TRACE_MEMCACHE_ANALYTICS_ENABLED": {}, + "DD_TRACE_MGO_ANALYTICS_ENABLED": {}, + "DD_TRACE_MONGO_ANALYTICS_ENABLED": {}, + "DD_TRACE_MUX_ANALYTICS_ENABLED": {}, + "DD_TRACE_NEGRONI_ANALYTICS_ENABLED": {}, + "DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP": {}, + "DD_TRACE_PARTIAL_FLUSH_ENABLED": {}, + "DD_TRACE_PARTIAL_FLUSH_MIN_SPANS": {}, + "DD_TRACE_PEER_SERVICE_DEFAULTS_ENABLED": {}, + "DD_TRACE_PEER_SERVICE_MAPPING": {}, + "DD_TRACE_PROPAGATION_EXTRACT_FIRST": {}, + "DD_TRACE_PROPAGATION_STYLE": {}, + "DD_TRACE_PROPAGATION_STYLE_EXTRACT": {}, + "DD_TRACE_PROPAGATION_STYLE_INJECT": {}, + "DD_TRACE_RATE_LIMIT": {}, + "DD_TRACE_REDIGO_ANALYTICS_ENABLED": {}, + "DD_TRACE_REDIS_ANALYTICS_ENABLED": {}, + "DD_TRACE_REDIS_RAW_COMMAND": {}, + "DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED": {}, + "DD_TRACE_REPORT_HOSTNAME": {}, + "DD_TRACE_RESTFUL_ANALYTICS_ENABLED": {}, + "DD_TRACE_SAMPLE_RATE": {}, + "DD_TRACE_SAMPLING_RULES": {}, + "DD_TRACE_SAMPLING_RULES_FILE": {}, + "DD_TRACE_SARAMA_ANALYTICS_ENABLED": {}, + "DD_TRACE_SOURCE_HOSTNAME": {}, + "DD_TRACE_SPAN_ATTRIBUTE_SCHEMA": {}, + "DD_TRACE_SQL_ANALYTICS_ENABLED": {}, + "DD_TRACE_SQL_COMMENT_INJECTION_MODE": {}, + "DD_TRACE_STARTUP_LOGS": {}, + "DD_TRACE_STATS_COMPUTATION_ENABLED": {}, + "DD_TRACE_TWIRP_ANALYTICS_ENABLED": {}, + "DD_TRACE_VALKEY_ANALYTICS_ENABLED": {}, + "DD_TRACE_VALKEY_RAW_COMMAND": {}, + "DD_TRACE_VAULT_ANALYTICS_ENABLED": {}, + "DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH": {}, + "DD_TRACE__ANALYTICS_ENABLED": {}, + "DD_VERSION": {}, + "OTEL_LOGS_EXPORTER": {}, + "OTEL_LOG_LEVEL": {}, + "OTEL_METRICS_EXPORTER": {}, + "OTEL_PROPAGATORS": {}, + "OTEL_RESOURCE_ATTRIBUTES": {}, + "OTEL_SERVICE_NAME": {}, + "OTEL_TRACES_EXPORTER": {}, + "OTEL_TRACES_SAMPLER": {}, + "OTEL_TRACES_SAMPLER_ARG": {}, +} + +// keyAliases maps aliases to supported configuration keys. +var keyAliases = map[string][]string{ + "DD_API_KEY": {"DD-API-KEY"}, +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.go new file mode 100644 index 00000000..3950db26 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.go @@ -0,0 +1,102 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package env + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// SupportedConfiguration represents the content of the supported_configurations.json file. +type SupportedConfiguration struct { + SupportedConfigurations map[string][]string `json:"supportedConfigurations"` + Aliases map[string][]string `json:"aliases"` +} + +var ( + configFilePath string + once sync.Once + mu sync.Mutex + skipLock bool +) + +// getConfigFilePath returns the path to the supported_configurations.json file +// in the same directory as this Go file. The path is calculated once and cached. +// +// This needs to be computed, if we use a relative path, the file will be read +// from current working directory of the running process, not the directory of +// this file. +func getConfigFilePath() string { + once.Do(func() { + _, filename, _, _ := runtime.Caller(0) + dir := filepath.Dir(filename) + configFilePath = filepath.Join(dir, "supported_configurations.json") + }) + return configFilePath +} + +// addSupportedConfigurationToFile adds a supported configuration to the json file. +// it is used only in testing mode. +// +// It reads the json file, adds the new configuration, and writes it back to the file. +// The JSON output will have sorted keys since Go's json.Marshal sorts map keys automatically. +// +// When called with DD_CONFIG_INVERSION_UNKNOWN nothing is done as it is a special value +// used in a unit test to verify the behavior of unknown env var. +func addSupportedConfigurationToFile(name string) { + mu.Lock() + defer mu.Unlock() + + filePath := getConfigFilePath() + + cfg, err := readSupportedConfigurations(filePath) + if err != nil { + log.Error("config: failed to read supported configurations: %s", err.Error()) + return + } + + if _, ok := cfg.SupportedConfigurations[name]; !ok { + cfg.SupportedConfigurations[name] = []string{"A"} + } + + if err := writeSupportedConfigurations(filePath, cfg); err != nil { + log.Error("config: failed to write supported configurations: %s", err.Error()) + } +} + +func readSupportedConfigurations(filePath string) (*SupportedConfiguration, error) { + // read the json file + jsonFile, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open supported_configurations.json: %w", err) + } + + var cfg SupportedConfiguration + if err := json.Unmarshal(jsonFile, &cfg); err != nil { + return nil, fmt.Errorf("failed to unmarshal SupportedConfiguration: %w", err) + } + return &cfg, nil +} + +func writeSupportedConfigurations(filePath string, cfg *SupportedConfiguration) error { + // write the json file - Go's json.MarshalIndent automatically sorts map keys + jsonFile, err := json.MarshalIndent(cfg, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal SupportedConfiguration: %w", err) + } + + if err := os.WriteFile(filePath, jsonFile, 0644); err != nil { + return fmt.Errorf("failed to write supported_configurations.json: %w", err) + } + + return nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.json b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.json new file mode 100644 index 00000000..d05b7494 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.json @@ -0,0 +1,633 @@ +{ + "supportedConfigurations": { + "DD_ACTION_EXECUTION_ID": [ + "A" + ], + "DD_AGENT_HOST": [ + "A" + ], + "DD_API_KEY": [ + "A" + ], + "DD_API_SECURITY_ENABLED": [ + "A" + ], + "DD_API_SECURITY_PROXY_SAMPLE_RATE": [ + "A" + ], + "DD_API_SECURITY_REQUEST_SAMPLE_RATE": [ + "A" + ], + "DD_API_SECURITY_SAMPLE_DELAY": [ + "A" + ], + "DD_APM_TRACING_ENABLED": [ + "A" + ], + "DD_APPSEC_BODY_PARSING_SIZE_LIMIT": [ + "A" + ], + "DD_APPSEC_ENABLED": [ + "A" + ], + "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_HTML": [ + "A" + ], + "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_JSON": [ + "A" + ], + "DD_APPSEC_MAX_STACK_TRACE_DEPTH": [ + "A" + ], + "DD_APPSEC_OBFUSCATION_PARAMETER_KEY_REGEXP": [ + "A" + ], + "DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP": [ + "A" + ], + "DD_APPSEC_RASP_ENABLED": [ + "A" + ], + "DD_APPSEC_RULES": [ + "A" + ], + "DD_APPSEC_SCA_ENABLED": [ + "A" + ], + "DD_APPSEC_STACK_TRACE_ENABLE": [ + "A" + ], + "DD_APPSEC_TRACE_RATE_LIMIT": [ + "A" + ], + "DD_APPSEC_WAF_TIMEOUT": [ + "A" + ], + "DD_APP_KEY": [ + "A" + ], + "DD_CIVISIBILITY_AGENTLESS_ENABLED": [ + "A" + ], + "DD_CIVISIBILITY_AGENTLESS_URL": [ + "A" + ], + "DD_CIVISIBILITY_AUTO_INSTRUMENTATION_PROVIDER": [ + "A" + ], + "DD_CIVISIBILITY_ENABLED": [ + "A" + ], + "DD_CIVISIBILITY_FLAKY_RETRY_COUNT": [ + "A" + ], + "DD_CIVISIBILITY_FLAKY_RETRY_ENABLED": [ + "A" + ], + "DD_CIVISIBILITY_IMPACTED_TESTS_DETECTION_ENABLED": [ + "A" + ], + "DD_CIVISIBILITY_INTERNAL_PARALLEL_EARLY_FLAKE_DETECTION_ENABLED": [ + "A" + ], + "DD_CIVISIBILITY_LOGS_ENABLED": [ + "A" + ], + "DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT": [ + "A" + ], + "DD_CUSTOM_TRACE_ID": [ + "A" + ], + "DD_DATA_STREAMS_ENABLED": [ + "A" + ], + "DD_DBM_PROPAGATION_MODE": [ + "A" + ], + "DD_DOGSTATSD_HOST": [ + "A" + ], + "DD_DOGSTATSD_PORT": [ + "A" + ], + "DD_DYNAMIC_INSTRUMENTATION_ENABLED": [ + "A" + ], + "DD_ENV": [ + "A" + ], + "DD_EXPERIMENTAL_PROPAGATE_PROCESS_TAGS_ENABLED": [ + "A" + ], + "DD_EXTERNAL_ENV": [ + "A" + ], + "DD_GIT_BRANCH": [ + "A" + ], + "DD_GIT_COMMIT_AUTHOR_DATE": [ + "A" + ], + "DD_GIT_COMMIT_AUTHOR_EMAIL": [ + "A" + ], + "DD_GIT_COMMIT_AUTHOR_NAME": [ + "A" + ], + "DD_GIT_COMMIT_COMMITTER_DATE": [ + "A" + ], + "DD_GIT_COMMIT_COMMITTER_EMAIL": [ + "A" + ], + "DD_GIT_COMMIT_COMMITTER_NAME": [ + "A" + ], + "DD_GIT_COMMIT_MESSAGE": [ + "A" + ], + "DD_GIT_COMMIT_SHA": [ + "A" + ], + "DD_GIT_PULL_REQUEST_BASE_BRANCH": [ + "A" + ], + "DD_GIT_PULL_REQUEST_BASE_BRANCH_SHA": [ + "A" + ], + "DD_GIT_REPOSITORY_URL": [ + "A" + ], + "DD_GIT_TAG": [ + "A" + ], + "DD_HOSTNAME": [ + "A" + ], + "DD_INSTRUMENTATION_INSTALL_ID": [ + "A" + ], + "DD_INSTRUMENTATION_INSTALL_TIME": [ + "A" + ], + "DD_INSTRUMENTATION_INSTALL_TYPE": [ + "A" + ], + "DD_INSTRUMENTATION_TELEMETRY_ENABLED": [ + "A" + ], + "DD_KEY": [ + "A" + ], + "DD_LLMOBS_AGENTLESS_ENABLED": [ + "A" + ], + "DD_LLMOBS_ENABLED": [ + "A" + ], + "DD_LLMOBS_ML_APP": [ + "A" + ], + "DD_LLMOBS_PROJECT_NAME": [ + "A" + ], + "DD_LOGGING_RATE": [ + "A" + ], + "DD_PIPELINE_EXECUTION_ID": [ + "A" + ], + "DD_PROFILING_AGENTLESS": [ + "A" + ], + "DD_PROFILING_CODE_HOTSPOTS_COLLECTION_ENABLED": [ + "A" + ], + "DD_PROFILING_DEBUG_COMPRESSION_SETTINGS": [ + "A" + ], + "DD_PROFILING_DELTA": [ + "A" + ], + "DD_PROFILING_ENABLED": [ + "A" + ], + "DD_PROFILING_ENDPOINT_COLLECTION_ENABLED": [ + "A" + ], + "DD_PROFILING_ENDPOINT_COUNT_ENABLED": [ + "A" + ], + "DD_PROFILING_EXECUTION_TRACE_ENABLED": [ + "A" + ], + "DD_PROFILING_EXECUTION_TRACE_LIMIT_BYTES": [ + "A" + ], + "DD_PROFILING_EXECUTION_TRACE_PERIOD": [ + "A" + ], + "DD_PROFILING_FLUSH_ON_EXIT": [ + "A" + ], + "DD_PROFILING_OUTPUT_DIR": [ + "A" + ], + "DD_PROFILING_UPLOAD_TIMEOUT": [ + "A" + ], + "DD_PROFILING_URL": [ + "A" + ], + "DD_PROFILING_WAIT_PROFILE": [ + "A" + ], + "DD_PROFILING_WAIT_PROFILE_MAX_GOROUTINES": [ + "A" + ], + "DD_RC_TUF_ROOT": [ + "A" + ], + "DD_REMOTE_CONFIGURATION_ENABLED": [ + "A" + ], + "DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS": [ + "A" + ], + "DD_REQUEST_MIRROR_HEALTHCHECK_ADDR": [ + "A" + ], + "DD_REQUEST_MIRROR_LISTEN_ADDR": [ + "A" + ], + "DD_RUNTIME_METRICS_ENABLED": [ + "A" + ], + "DD_RUNTIME_METRICS_V2_ENABLED": [ + "A" + ], + "DD_SERVICE": [ + "A" + ], + "DD_SERVICE_EXTENSION_HEALTHCHECK_PORT": [ + "A" + ], + "DD_SERVICE_EXTENSION_HOST": [ + "A" + ], + "DD_SERVICE_EXTENSION_OBSERVABILITY_MODE": [ + "A" + ], + "DD_SERVICE_EXTENSION_PORT": [ + "A" + ], + "DD_SERVICE_MAPPING": [ + "A" + ], + "DD_SITE": [ + "A" + ], + "DD_SPAN_SAMPLING_RULES": [ + "A" + ], + "DD_SPAN_SAMPLING_RULES_FILE": [ + "A" + ], + "DD_TAGS": [ + "A" + ], + "DD_TELEMETRY_DEBUG": [ + "A" + ], + "DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED": [ + "A" + ], + "DD_TELEMETRY_HEARTBEAT_INTERVAL": [ + "A" + ], + "DD_TELEMETRY_LOG_COLLECTION_ENABLED": [ + "A" + ], + "DD_TELEMETRY_METRICS_ENABLED": [ + "A" + ], + "DD_TEST_AGENT_HOST": [ + "A" + ], + "DD_TEST_AGENT_PORT": [ + "A" + ], + "DD_TEST_MANAGEMENT_ATTEMPT_TO_FIX_RETRIES": [ + "A" + ], + "DD_TEST_MANAGEMENT_ENABLED": [ + "A" + ], + "DD_TEST_OPTIMIZATION_ENV_DATA_FILE": [ + "A" + ], + "DD_TEST_SESSION_NAME": [ + "A" + ], + "DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED": [ + "A" + ], + "DD_TRACE_128_BIT_TRACEID_LOGGING_ENABLED": [ + "A" + ], + "DD_TRACE_ABANDONED_SPAN_TIMEOUT": [ + "A" + ], + "DD_TRACE_AGENT_PORT": [ + "A" + ], + "DD_TRACE_AGENT_PROTOCOL_VERSION": [ + "A" + ], + "DD_TRACE_AGENT_URL": [ + "A" + ], + "DD_TRACE_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_AWS_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_BAGGAGE_TAG_KEYS": [ + "A" + ], + "DD_TRACE_BUNTDB_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_CHI_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_CLIENT_HOSTNAME_COMPAT": [ + "A" + ], + "DD_TRACE_CLIENT_IP_ENABLED": [ + "A" + ], + "DD_TRACE_CLIENT_IP_HEADER": [ + "A" + ], + "DD_TRACE_CONSUL_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_DEBUG": [ + "A" + ], + "DD_TRACE_DEBUG_ABANDONED_SPANS": [ + "A" + ], + "DD_TRACE_DEBUG_SEELOG_WORKAROUND": [ + "A" + ], + "DD_TRACE_ECHO_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_ELASTIC_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_ENABLED": [ + "A" + ], + "DD_TRACE_FASTHTTP_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_FEATURES": [ + "A" + ], + "DD_TRACE_FIBER_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_GCP_PUBSUB_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_GIN_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_GIT_METADATA_ENABLED": [ + "A" + ], + "DD_TRACE_GOCQL_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_GOCQL_COMPAT": [ + "A" + ], + "DD_TRACE_GOJI_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_GOOGLE_API_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_GOPG_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_GQLGEN_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_GRAPHQL_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_GRAPHQL_ERROR_EXTENSIONS": [ + "A" + ], + "DD_TRACE_GRPC_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_HEADER_TAGS": [ + "A" + ], + "DD_TRACE_HTTPROUTER_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_HTTPTREEMUX_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_HTTP_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_HTTP_CLIENT_ERROR_STATUSES": [ + "A" + ], + "DD_TRACE_HTTP_CLIENT_RESOURCE_NAME_QUANTIZE": [ + "A" + ], + "DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING": [ + "A" + ], + "DD_TRACE_HTTP_HANDLER_RESOURCE_NAME_QUANTIZE": [ + "A" + ], + "DD_TRACE_HTTP_SERVER_ERROR_STATUSES": [ + "A" + ], + "DD_TRACE_HTTP_URL_QUERY_STRING_DISABLED": [ + "A" + ], + "DD_TRACE_INFERRED_PROXY_SERVICES_ENABLED": [ + "A" + ], + "DD_TRACE_KAFKA_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_LEVELDB_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_LOGRUS_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_LOG_DIRECTORY": [ + "A" + ], + "DD_TRACE_MEMCACHE_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_MGO_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_MONGO_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_MUX_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_NEGRONI_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP": [ + "A" + ], + "DD_TRACE_PARTIAL_FLUSH_ENABLED": [ + "A" + ], + "DD_TRACE_PARTIAL_FLUSH_MIN_SPANS": [ + "A" + ], + "DD_TRACE_PEER_SERVICE_DEFAULTS_ENABLED": [ + "A" + ], + "DD_TRACE_PEER_SERVICE_MAPPING": [ + "A" + ], + "DD_TRACE_PROPAGATION_EXTRACT_FIRST": [ + "A" + ], + "DD_TRACE_PROPAGATION_STYLE": [ + "A" + ], + "DD_TRACE_PROPAGATION_STYLE_EXTRACT": [ + "A" + ], + "DD_TRACE_PROPAGATION_STYLE_INJECT": [ + "A" + ], + "DD_TRACE_RATE_LIMIT": [ + "A" + ], + "DD_TRACE_REDIGO_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_REDIS_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_REDIS_RAW_COMMAND": [ + "A" + ], + "DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED": [ + "A" + ], + "DD_TRACE_REPORT_HOSTNAME": [ + "A" + ], + "DD_TRACE_RESTFUL_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_SAMPLE_RATE": [ + "A" + ], + "DD_TRACE_SAMPLING_RULES": [ + "A" + ], + "DD_TRACE_SAMPLING_RULES_FILE": [ + "A" + ], + "DD_TRACE_SARAMA_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_SOURCE_HOSTNAME": [ + "A" + ], + "DD_TRACE_SPAN_ATTRIBUTE_SCHEMA": [ + "A" + ], + "DD_TRACE_SQL_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_SQL_COMMENT_INJECTION_MODE": [ + "A" + ], + "DD_TRACE_STARTUP_LOGS": [ + "A" + ], + "DD_TRACE_STATS_COMPUTATION_ENABLED": [ + "A" + ], + "DD_TRACE_TWIRP_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_VALKEY_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_VALKEY_RAW_COMMAND": [ + "A" + ], + "DD_TRACE_VAULT_ANALYTICS_ENABLED": [ + "A" + ], + "DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH": [ + "A" + ], + "DD_TRACE__ANALYTICS_ENABLED": [ + "A" + ], + "DD_VERSION": [ + "A" + ], + "OTEL_LOGS_EXPORTER": [ + "A" + ], + "OTEL_LOG_LEVEL": [ + "A" + ], + "OTEL_METRICS_EXPORTER": [ + "A" + ], + "OTEL_PROPAGATORS": [ + "A" + ], + "OTEL_RESOURCE_ATTRIBUTES": [ + "A" + ], + "OTEL_SERVICE_NAME": [ + "A" + ], + "OTEL_TRACES_EXPORTER": [ + "A" + ], + "OTEL_TRACES_SAMPLER": [ + "A" + ], + "OTEL_TRACES_SAMPLER_ARG": [ + "A" + ] + }, + "aliases": { + "DD_API_KEY": [ + "DD-API-KEY" + ] + } +} \ No newline at end of file diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadata.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/gitmetadata.go similarity index 94% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadata.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/gitmetadata.go index ce0ea38e..f8c39663 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/gitmetadata.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/gitmetadata.go @@ -7,11 +7,11 @@ package internal import ( "net/url" - "os" "runtime/debug" "sync" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" ) const ( @@ -59,14 +59,14 @@ func updateAllTags(tags map[string]string, newtags map[string]string) { // Get git metadata from environment variables func getTagsFromEnv() map[string]string { return map[string]string{ - TagRepositoryURL: removeCredentials(os.Getenv(EnvGitRepositoryURL)), - TagCommitSha: os.Getenv(EnvGitCommitSha), + TagRepositoryURL: removeCredentials(env.Get(EnvGitRepositoryURL)), + TagCommitSha: env.Get(EnvGitCommitSha), } } // Get git metadata from DD_TAGS func getTagsFromDDTags() map[string]string { - etags := ParseTagString(os.Getenv(EnvDDTags)) + etags := ParseTagString(env.Get(EnvDDTags)) return map[string]string{ TagRepositoryURL: removeCredentials(etags[TagRepositoryURL]), diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/globalconfig/globalconfig.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/globalconfig/globalconfig.go new file mode 100644 index 00000000..24baef7f --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/globalconfig/globalconfig.go @@ -0,0 +1,148 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package globalconfig stores configuration which applies globally to both the tracer +// and integrations. +package globalconfig + +import ( + "math" + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/env" + + "github.com/google/uuid" +) + +var cfg = &config{ + analyticsRate: math.NaN(), + runtimeID: uuid.New().String(), + headersAsTags: internal.NewLockMap(map[string]string{}), +} + +type config struct { + mu sync.RWMutex + analyticsRate float64 + serviceName string + runtimeID string + headersAsTags *internal.LockMap + dogstatsdAddr string + statsTags []string +} + +// AnalyticsRate returns the sampling rate at which events should be marked. It uses +// synchronizing mechanisms, meaning that for optimal performance it's best to read it +// once and store it. +func AnalyticsRate() float64 { + cfg.mu.RLock() + defer cfg.mu.RUnlock() + return cfg.analyticsRate +} + +// SetAnalyticsRate sets the given event sampling rate globally. +func SetAnalyticsRate(rate float64) { + cfg.mu.Lock() + defer cfg.mu.Unlock() + cfg.analyticsRate = rate +} + +// ServiceName returns the default service name used by non-client integrations such as servers and frameworks. +func ServiceName() string { + cfg.mu.RLock() + defer cfg.mu.RUnlock() + return cfg.serviceName +} + +// SetServiceName sets the global service name set for this application. +func SetServiceName(name string) { + cfg.mu.Lock() + defer cfg.mu.Unlock() + cfg.serviceName = name +} + +// DogstatsdAddr returns the destination for tracer and contrib statsd clients +func DogstatsdAddr() string { + cfg.mu.RLock() + defer cfg.mu.RUnlock() + return cfg.dogstatsdAddr +} + +// SetDogstatsdAddr sets the destination for statsd clients to be used by tracer and contrib packages +func SetDogstatsdAddr(addr string) { + cfg.mu.Lock() + defer cfg.mu.Unlock() + cfg.dogstatsdAddr = addr +} + +// StatsTags returns a list of tags that apply to statsd payloads for both tracer and contribs +func StatsTags() []string { + cfg.mu.RLock() + defer cfg.mu.RUnlock() + // Copy the slice before returning it, so that callers cannot pollute the underlying array + tags := make([]string, len(cfg.statsTags)) + copy(tags, cfg.statsTags) + return tags +} + +// SetStatsTags configures the list of tags that should be applied to contribs' statsd.Client as global tags +// It should only be called by the tracer package +func SetStatsTags(tags []string) { + cfg.mu.Lock() + defer cfg.mu.Unlock() + // Copy the slice before setting it, so that any changes to the slice provided to SetStatsTags does not pollute the underlying array of statsTags + statsTags := make([]string, len(tags)) + copy(statsTags, tags) + cfg.statsTags = statsTags +} + +// RuntimeID returns this process's unique runtime id. +func RuntimeID() string { + cfg.mu.RLock() + defer cfg.mu.RUnlock() + return cfg.runtimeID +} + +// HeaderTagMap returns the mappings of headers to their tag values +func HeaderTagMap() *internal.LockMap { + return cfg.headersAsTags +} + +// HeaderTag returns the configured tag for a given header. +// This function exists for testing purposes, for performance you may want to use `HeaderTagMap` +func HeaderTag(header string) string { + return cfg.headersAsTags.Get(header) +} + +// SetHeaderTag adds config for header `from` with tag value `to` +func SetHeaderTag(from, to string) { + cfg.headersAsTags.Set(from, to) +} + +// HeaderTagsLen returns the length of globalconfig's headersAsTags map, 0 for empty map +func HeaderTagsLen() int { + return cfg.headersAsTags.Len() +} + +// ClearHeaderTags assigns headersAsTags to a new, empty map +// It is invoked when WithHeaderTags is called, in order to overwrite the config +func ClearHeaderTags() { + cfg.headersAsTags.Clear() +} + +// InstrumentationInstallID returns the install ID as described in DD_INSTRUMENTATION_INSTALL_ID +func InstrumentationInstallID() string { + return env.Get("DD_INSTRUMENTATION_INSTALL_ID") +} + +// InstrumentationInstallType returns the install type as described in DD_INSTRUMENTATION_INSTALL_TYPE +func InstrumentationInstallType() string { + return env.Get("DD_INSTRUMENTATION_INSTALL_TYPE") +} + +// InstrumentationInstallTime returns the install time as described in DD_INSTRUMENTATION_INSTALL_TIME +func InstrumentationInstallTime() string { + return env.Get("DD_INSTRUMENTATION_INSTALL_TIME") +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/azure/azure.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/azure/azure.go new file mode 100644 index 00000000..953608e6 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/azure/azure.go @@ -0,0 +1,63 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package azure + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/hostname/cachedfetch" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/httputils" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/validate" +) + +// declare these as vars not const to ease testing +var ( + metadataURL = "http://169.254.169.254" + timeout = 300 * time.Millisecond + + // CloudProviderName contains the inventory name of for Azure + CloudProviderName = "Azure" +) + +func getResponse(ctx context.Context, url string) (string, error) { + return httputils.Get(ctx, url, map[string]string{"Metadata": "true"}, timeout) +} + +// GetHostname returns hostname based on Azure instance metadata. +func GetHostname(ctx context.Context) (string, error) { + metadataJSON, err := instanceMetaFetcher.Fetch(ctx) + if err != nil { + return "", err + } + + var metadata struct { + VMID string + } + if err := json.Unmarshal([]byte(metadataJSON), &metadata); err != nil { + return "", fmt.Errorf("failed to parse Azure instance metadata: %s", err.Error()) + } + + if err := validate.ValidHostname(metadata.VMID); err != nil { + return "", err + } + + return metadata.VMID, nil +} + +var instanceMetaFetcher = cachedfetch.Fetcher{ + Name: "Azure Instance Metadata", + Attempt: func(ctx context.Context) (string, error) { + metadataJSON, err := getResponse(ctx, + metadataURL+"/metadata/instance/compute?api-version=2017-08-01") + if err != nil { + return "", fmt.Errorf("failed to get Azure instance metadata: %s", err.Error()) + } + return metadataJSON, nil + }, +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch/fetcher.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/cachedfetch/fetcher.go similarity index 98% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch/fetcher.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/cachedfetch/fetcher.go index 17d1a383..2cff43ca 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch/fetcher.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/cachedfetch/fetcher.go @@ -12,7 +12,7 @@ import ( "context" "sync" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/log" ) // Fetcher supports fetching a value, such as from a cloud service API. An diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/ec2/ec2.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/ec2/ec2.go new file mode 100644 index 00000000..a1087cab --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/ec2/ec2.go @@ -0,0 +1,72 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package ec2 + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/hostname/cachedfetch" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/httputils" +) + +// declare these as vars not const to ease testing +var ( + metadataURL = "http://169.254.169.254/latest/meta-data" + defaultPrefixes = []string{"ip-", "domu", "ec2amaz-"} + + MaxHostnameSize = 255 +) + +var instanceIDFetcher = cachedfetch.Fetcher{ + Name: "EC2 InstanceID", + Attempt: func(ctx context.Context) (string, error) { + return getMetadataItemWithMaxLength(ctx, + "/instance-id", + MaxHostnameSize, + ) + }, +} + +// GetInstanceID fetches the instance id for current host from the EC2 metadata API +func GetInstanceID(ctx context.Context) (string, error) { + return instanceIDFetcher.Fetch(ctx) +} + +func getMetadataItemWithMaxLength(ctx context.Context, endpoint string, maxLength int) (string, error) { + result, err := getMetadataItem(ctx, endpoint) + if err != nil { + return result, err + } + if len(result) > maxLength { + return "", fmt.Errorf("%v gave a response with length > to %v", endpoint, maxLength) + } + return result, err +} + +func getMetadataItem(ctx context.Context, endpoint string) (string, error) { + return doHTTPRequest(ctx, metadataURL+endpoint) +} + +func doHTTPRequest(ctx context.Context, url string) (string, error) { + headers := map[string]string{} + // Note: This assumes IMDS v1. IMDS v2 won't work in a containerized app and requires an API Token + // Users who have disabled IMDS v1 in favor of v2 will get a fallback hostname from a different provider (likely OS). + return httputils.Get(ctx, url, headers, 300*time.Millisecond) +} + +// IsDefaultHostname checks if a hostname is an EC2 default +func IsDefaultHostname(hostname string) bool { + hostname = strings.ToLower(hostname) + isDefault := false + + for _, val := range defaultPrefixes { + isDefault = isDefault || strings.HasPrefix(hostname, val) + } + return isDefault +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/ecs/aws.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/ecs/aws.go new file mode 100644 index 00000000..839a27c0 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/ecs/aws.go @@ -0,0 +1,54 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023 Datadog, Inc. + +package ecs + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/cachedfetch" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/httputils" +) + +// declare these as vars not const to ease testing +var ( + metadataURL = env.Get("ECS_CONTAINER_METADATA_URI_V4") + timeout = 300 * time.Millisecond +) + +var taskFetcher = cachedfetch.Fetcher{ + Name: "ECS LaunchType", + Attempt: func(ctx context.Context) (string, error) { + taskJSON, err := getResponse(ctx, metadataURL+"/task") + if err != nil { + return "", fmt.Errorf("failed to get ECS task metadata: %s", err.Error()) + } + return taskJSON, nil + }, +} + +func getResponse(ctx context.Context, url string) (string, error) { + return httputils.Get(ctx, url, map[string]string{}, timeout) +} + +// GetLaunchType gets the launch-type based on the ECS Task metadata endpoint +func GetLaunchType(ctx context.Context) (string, error) { + taskJSON, err := taskFetcher.Fetch(ctx) + if err != nil { + return "", err + } + + var metadata struct { + LaunchType string + } + if err := json.Unmarshal([]byte(taskJSON), &metadata); err != nil { + return "", fmt.Errorf("failed to parse ecs task metadata: %s", err.Error()) + } + return metadata.LaunchType, nil +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_nix.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/fqdn_nix.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_nix.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/fqdn_nix.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_windows.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/fqdn_windows.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/fqdn_windows.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/fqdn_windows.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/gce/gce.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/gce/gce.go similarity index 92% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/gce/gce.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/gce/gce.go index e6965d55..53036c19 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/gce/gce.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/gce/gce.go @@ -11,8 +11,8 @@ import ( "strings" "time" - "gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/cachedfetch" - "gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/cachedfetch" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/httputils" ) // declare these as vars not const to ease testing @@ -26,7 +26,7 @@ var hostnameFetcher = cachedfetch.Fetcher{ hostname, err := getResponseWithMaxLength(ctx, metadataURL+"/instance/hostname", 255) if err != nil { - return "", fmt.Errorf("unable to retrieve hostname from GCE: %s", err) + return "", fmt.Errorf("unable to retrieve hostname from GCE: %s", err.Error()) } return hostname, nil }, @@ -39,7 +39,7 @@ var projectIDFetcher = cachedfetch.Fetcher{ metadataURL+"/project/project-id", 255) if err != nil { - return "", fmt.Errorf("unable to retrieve project ID from GCE: %s", err) + return "", fmt.Errorf("unable to retrieve project ID from GCE: %s", err.Error()) } return projectID, err }, @@ -76,7 +76,7 @@ func getInstanceAlias(ctx context.Context, hostname string) (string, error) { // of the Compute Engine metadata server. // See https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#gke_mds if hostname == "" { - return "", fmt.Errorf("unable to retrieve instance name and hostname from GCE: %s", err) + return "", fmt.Errorf("unable to retrieve instance name and hostname from GCE: %s", err.Error()) } instanceName = strings.SplitN(hostname, ".", 2)[0] } @@ -108,7 +108,7 @@ func getResponseWithMaxLength(ctx context.Context, endpoint string, maxLength in func getResponse(ctx context.Context, url string) (string, error) { res, err := httputils.Get(ctx, url, map[string]string{"Metadata-Flavor": "Google"}, 1000*time.Millisecond) if err != nil { - return "", fmt.Errorf("GCE metadata API error: %s", err) + return "", fmt.Errorf("GCE metadata API error: %s", err.Error()) } // Some cloud platforms will respond with an empty body, causing the agent to assume a faulty hostname diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils/helpers.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/httputils/helpers.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/httputils/helpers.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/httputils/helpers.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/providers.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/providers.go similarity index 91% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/providers.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/providers.go index 85c685df..9504160f 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/providers.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/providers.go @@ -13,12 +13,13 @@ import ( "sync/atomic" "time" - "gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/azure" - "gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ec2" - "gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/ecs" - "gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/gce" - "gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/azure" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/ec2" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/ecs" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/gce" + "github.com/DataDog/dd-trace-go/v2/internal/hostname/validate" + "github.com/DataDog/dd-trace-go/v2/internal/log" ) // For testing purposes @@ -150,7 +151,7 @@ func updateHostname(now time.Time) { for _, p := range providerCatalog { detectedHostname, err := p.pf(ctx, hostname) if err != nil { - log.Debug("Unable to get hostname from provider %s: %v", p.name, err) + log.Debug("Unable to get hostname from provider %q: %v", p.name, err.Error()) continue } hostname = detectedHostname @@ -171,7 +172,7 @@ func updateHostname(now time.Time) { } func fromConfig(_ context.Context, _ string) (string, error) { - hn := os.Getenv("DD_HOSTNAME") + hn := env.Get("DD_HOSTNAME") err := validate.ValidHostname(hn) if err != nil { return "", err @@ -184,7 +185,7 @@ func fromFargate(ctx context.Context, _ string) (string, error) { } func fargate(ctx context.Context) (string, error) { - if _, ok := os.LookupEnv("ECS_CONTAINER_METADATA_URI_V4"); !ok { + if _, ok := env.Lookup("ECS_CONTAINER_METADATA_URI_V4"); !ok { return "", fmt.Errorf("not running in fargate") } launchType, err := ecs.GetLaunchType(ctx) @@ -210,7 +211,7 @@ func fromFQDN(_ context.Context, _ string) (string, error) { //TODO: test this on windows fqdn, err := getSystemFQDN() if err != nil { - return "", fmt.Errorf("unable to get FQDN from system: %s", err) + return "", fmt.Errorf("unable to get FQDN from system: %s", err.Error()) } return fqdn, nil } @@ -233,11 +234,11 @@ func fromEC2(ctx context.Context, currentHostname string) (string, error) { // If the current hostname is a default one we try to get the instance id instanceID, err := ec2.GetInstanceID(ctx) if err != nil { - return "", fmt.Errorf("unable to determine hostname from EC2: %s", err) + return "", fmt.Errorf("unable to determine hostname from EC2: %s", err.Error()) } err = validate.ValidHostname(instanceID) if err != nil { - return "", fmt.Errorf("EC2 instance id is not a valid hostname: %s", err) + return "", fmt.Errorf("EC2 instance id is not a valid hostname: %s", err.Error()) } return instanceID, nil } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate/validate.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/validate/validate.go similarity index 97% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate/validate.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/validate/validate.go index fa97b1c9..67527a08 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/hostname/validate/validate.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/hostname/validate/validate.go @@ -13,7 +13,7 @@ import ( "regexp" "strings" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/log" ) const maxLength = 255 diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/inmemoryfile.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/inmemoryfile.go new file mode 100644 index 00000000..e1bad3bf --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/inmemoryfile.go @@ -0,0 +1,12 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023 Datadog, Inc. + +//go:build !linux + +package internal + +func CreateMemfd(_ string, _ []byte) (int, error) { + return 0, nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/inmemoryfilelinux.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/inmemoryfilelinux.go new file mode 100644 index 00000000..b7719c83 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/inmemoryfilelinux.go @@ -0,0 +1,36 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023 Datadog, Inc. + +//go:build linux + +package internal + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +func CreateMemfd(name string, data []byte) (int, error) { + fd, err := unix.MemfdCreate(name, unix.MFD_CLOEXEC|unix.MFD_ALLOW_SEALING) + if err != nil { + return 0, fmt.Errorf("failed to create memfd '%s': %v", name, err) + } + + bytesWritten, err := unix.Write(fd, data) + if err != nil { + return 0, fmt.Errorf("failed to write data to memfd (fd: %d): %v", fd, err) + } + if bytesWritten != len(data) { + return 0, fmt.Errorf("data mismatch in memfd (fd: %d): expected to write %d bytes, but wrote %d bytes", fd, len(data), bytesWritten) + } + + _, err = unix.FcntlInt(uintptr(fd), unix.F_ADD_SEALS, unix.F_SEAL_SHRINK|unix.F_SEAL_GROW|unix.F_SEAL_WRITE|unix.F_SEAL_SEAL) + if err != nil { + return 0, fmt.Errorf("failed to seal memfd (fd: %d): %v", fd, err) + } + + return fd, nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/config/config.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/config/config.go new file mode 100644 index 00000000..7a58475e --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/config/config.go @@ -0,0 +1,90 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package config + +import ( + "context" + "net" + "net/http" + "net/url" + "time" +) + +type TracerConfig struct { + DDTags map[string]any + Env string + Service string + Version string + AgentURL *url.URL + APIKey string + APPKey string + HTTPClient *http.Client + Site string +} + +type AgentFeatures struct { + EVPProxyV2 bool +} + +type Config struct { + Enabled bool + MLApp string + AgentlessEnabled *bool + ResolvedAgentlessEnabled bool + ProjectName string + TracerConfig TracerConfig + AgentFeatures AgentFeatures +} + +// We copy the transport to avoid using the default one, as it might be +// augmented with tracing and we don't want these calls to be recorded. +// See https://golang.org/pkg/net/http/#DefaultTransport . +// Note: We don't set a global Timeout on the client; instead, we manage +// timeouts per-request using context.WithTimeout for better control. +func newHTTPClient() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + } +} + +func (c *Config) DefaultHTTPClient() *http.Client { + var cl *http.Client + if c.ResolvedAgentlessEnabled || c.TracerConfig.AgentURL.Scheme != "unix" { + cl = newHTTPClient() + } else { + dialer := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + } + cl = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return dialer.DialContext(ctx, "unix", (&net.UnixAddr{ + Name: c.TracerConfig.AgentURL.Path, + Net: "unix", + }).String()) + }, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + } + } + return cl +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/context.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/context.go new file mode 100644 index 00000000..79ae2dd9 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/context.go @@ -0,0 +1,50 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package llmobs + +import "context" + +type ( + ctxKeyActiveLLMSpan struct{} + ctxKeyPropagatedLLMSpan struct{} +) + +// PropagatedLLMSpan represents LLMObs span context that can be propagated across process boundaries. +type PropagatedLLMSpan struct { + // MLApp is the ML application name. + MLApp string + // TraceID is the LLMObs trace ID. + TraceID string + // SpanID is the span ID. + SpanID string +} + +// PropagatedLLMSpanFromContext retrieves a PropagatedLLMSpan from the context. +// Returns the span and true if found, nil and false otherwise. +func PropagatedLLMSpanFromContext(ctx context.Context) (*PropagatedLLMSpan, bool) { + if val, ok := ctx.Value(ctxKeyPropagatedLLMSpan{}).(*PropagatedLLMSpan); ok { + return val, true + } + return nil, false +} + +// ContextWithPropagatedLLMSpan returns a new context with the given PropagatedLLMSpan attached. +func ContextWithPropagatedLLMSpan(ctx context.Context, span *PropagatedLLMSpan) context.Context { + return context.WithValue(ctx, ctxKeyPropagatedLLMSpan{}, span) +} + +// ActiveLLMSpanFromContext retrieves the active LLMObs span from the context. +// Returns the span and true if found, nil and false otherwise. +func ActiveLLMSpanFromContext(ctx context.Context) (*Span, bool) { + if span, ok := ctx.Value(ctxKeyActiveLLMSpan{}).(*Span); ok { + return span, true + } + return nil, false +} + +func contextWithActiveLLMSpan(ctx context.Context, span *Span) context.Context { + return context.WithValue(ctx, ctxKeyActiveLLMSpan{}, span) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/llmobs.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/llmobs.go new file mode 100644 index 00000000..781b68cc --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/llmobs.go @@ -0,0 +1,828 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package llmobs + +import ( + "context" + "crypto/rand" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "math" + "math/big" + "slices" + "strings" + "sync" + "time" + "unicode" + + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/internal/llmobs/config" + "github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/version" +) + +var ( + mu sync.Mutex + activeLLMObs *LLMObs +) + +var ( + errLLMObsNotEnabled = errors.New("LLMObs is not enabled. Ensure the tracer has been started with the option tracer.WithLLMObsEnabled(true) or set DD_LLMOBS_ENABLED=true") + errAgentlessRequiresAPIKey = errors.New("LLMOBs agentless mode requires a valid API key - set the DD_API_KEY env variable to configure one") + errMLAppRequired = errors.New("ML App is required for sending LLM Observability data") +) + +const ( + baggageKeyExperimentID = "_ml_obs.experiment_id" +) + +const ( + defaultParentID = "undefined" +) + +// SpanKind represents the type of an LLMObs span. +type SpanKind string + +const ( + // SpanKindExperiment represents an experiment span for testing and evaluation. + SpanKindExperiment SpanKind = "experiment" + // SpanKindWorkflow represents a workflow span that orchestrates multiple operations. + SpanKindWorkflow SpanKind = "workflow" + // SpanKindLLM represents a span for Large Language Model operations. + SpanKindLLM SpanKind = "llm" + // SpanKindEmbedding represents a span for embedding generation operations. + SpanKindEmbedding SpanKind = "embedding" + // SpanKindAgent represents a span for AI agent operations. + SpanKindAgent SpanKind = "agent" + // SpanKindRetrieval represents a span for document retrieval operations. + SpanKindRetrieval SpanKind = "retrieval" + // SpanKindTask represents a span for general task operations. + SpanKindTask SpanKind = "task" + // SpanKindTool represents a span for tool usage operations. + SpanKindTool SpanKind = "tool" +) + +const ( + defaultFlushInterval = 2 * time.Second +) + +const ( + sizeLimitEVPEvent = 5_000_000 // 5MB + collectionErrorDroppedIO = "dropped_io" + droppedValueText = "[This value has been dropped because this span's size exceeds the 1MB size limit.]" +) + +// See: https://docs.datadoghq.com/getting_started/site/#access-the-datadog-site +var ddSitesNeedingAppSubdomain = []string{"datadoghq.com", "datadoghq.eu", "ddog-gov.com"} + +type llmobsContext struct { + // apply to all spans + metadata map[string]any + metrics map[string]float64 + tags map[string]string + + // agent specific + agentManifest string + + // llm specific + modelName string + modelProvider string + prompt *Prompt + toolDefinitions []ToolDefinition + + // input + inputDocuments []EmbeddedDocument + inputMessages []LLMMessage + inputText string + + // output + outputDocuments []RetrievedDocument + outputMessages []LLMMessage + outputText string + + // experiment specific + experimentInput any + experimentExpectedOutput any + experimentOutput any +} + +// LLMObs represents the main LLMObs instance that handles span collection and transport. +type LLMObs struct { + // Config contains the LLMObs configuration. + Config *config.Config + // Transport handles sending data to the Datadog backend. + Transport *transport.Transport + // Tracer is the underlying APM tracer. + Tracer Tracer + + // channels used by producers + spanEventsCh chan *transport.LLMObsSpanEvent + evalMetricsCh chan *transport.LLMObsMetric + + // runtime buffers, payloads are accumulated here and flushed periodically + bufSpanEvents []*transport.LLMObsSpanEvent + bufEvalMetrics []*transport.LLMObsMetric + + // lifecycle + mu sync.Mutex + running bool + wg sync.WaitGroup + stopCh chan struct{} // signal stop + flushNowCh chan struct{} + flushInterval time.Duration +} + +func newLLMObs(cfg *config.Config, tracer Tracer) (*LLMObs, error) { + if cfg.AgentlessEnabled != nil { + cfg.ResolvedAgentlessEnabled = *cfg.AgentlessEnabled + } else { + // if agentlessEnabled is not set and evp_proxy is supported in the agent, default to use the agent + cfg.ResolvedAgentlessEnabled = !cfg.AgentFeatures.EVPProxyV2 + if cfg.ResolvedAgentlessEnabled { + log.Debug("llmobs: DD_LLMOBS_AGENTLESS_ENABLED not set, defaulting to true since agent mode is supported") + } else { + log.Debug("llmobs: DD_LLMOBS_AGENTLESS_ENABLED not set, defaulting to false since agent mode is not supported") + } + } + + if cfg.ResolvedAgentlessEnabled && !isAPIKeyValid(cfg.TracerConfig.APIKey) { + return nil, errAgentlessRequiresAPIKey + } + if cfg.MLApp == "" { + return nil, errMLAppRequired + } + if cfg.TracerConfig.HTTPClient == nil { + cfg.TracerConfig.HTTPClient = cfg.DefaultHTTPClient() + } + return &LLMObs{ + Config: cfg, + Transport: transport.New(cfg), + Tracer: tracer, + spanEventsCh: make(chan *transport.LLMObsSpanEvent), + evalMetricsCh: make(chan *transport.LLMObsMetric), + stopCh: make(chan struct{}), + flushNowCh: make(chan struct{}, 1), + flushInterval: defaultFlushInterval, + }, nil +} + +// Start starts the global LLMObs instance with the given configuration and tracer. +// Returns an error if LLMObs is already running or if configuration is invalid. +func Start(cfg config.Config, tracer Tracer) error { + mu.Lock() + defer mu.Unlock() + + if activeLLMObs != nil { + activeLLMObs.Stop() + } + if !cfg.Enabled { + return nil + } + l, err := newLLMObs(&cfg, tracer) + if err != nil { + return err + } + activeLLMObs = l + activeLLMObs.Run() + return nil +} + +// Stop stops the active LLMObs instance and cleans up resources. +func Stop() { + mu.Lock() + defer mu.Unlock() + + if activeLLMObs != nil { + activeLLMObs.Stop() + activeLLMObs = nil + } +} + +// ActiveLLMObs returns the current active LLMObs instance, or an error if LLMObs is not enabled or started. +func ActiveLLMObs() (*LLMObs, error) { + if activeLLMObs == nil || !activeLLMObs.Config.Enabled { + return nil, errLLMObsNotEnabled + } + return activeLLMObs, nil +} + +// Flush forces a flush of all buffered LLMObs data to the transport. +func Flush() { + if activeLLMObs != nil { + activeLLMObs.Flush() + } +} + +// Run starts the worker loop that processes span events and metrics. +func (l *LLMObs) Run() { + l.mu.Lock() + if l.running { + l.mu.Unlock() + return + } + l.running = true + l.mu.Unlock() + + l.wg.Add(1) + go func() { + // this goroutine should be the only one writing to the internal buffers + defer l.wg.Done() + + ticker := time.NewTicker(l.flushInterval) + defer ticker.Stop() + + for { + select { + case ev := <-l.spanEventsCh: + l.bufSpanEvents = append(l.bufSpanEvents, ev) + + case evalMetric := <-l.evalMetricsCh: + l.bufEvalMetrics = append(l.bufEvalMetrics, evalMetric) + + case <-ticker.C: + params := l.clearBuffersNonLocked() + l.wg.Add(1) + go func() { + defer l.wg.Done() + l.batchSend(params) + }() + + case <-l.flushNowCh: + log.Debug("llmobs: on-demand flush signal") + params := l.clearBuffersNonLocked() + l.wg.Add(1) + go func() { + defer l.wg.Done() + l.batchSend(params) + }() + + case <-l.stopCh: + log.Debug("llmobs: stop signal") + l.drainChannels() + params := l.clearBuffersNonLocked() + l.batchSend(params) + return + } + } + }() +} + +// clearBuffersNonLocked clears the internal buffers and returns the corresponding batchSendParams to send to the backend. +// It is meant to be called only from the main Run worker goroutine. +func (l *LLMObs) clearBuffersNonLocked() batchSendParams { + params := batchSendParams{ + spanEvents: l.bufSpanEvents, + evalMetrics: l.bufEvalMetrics, + } + l.bufSpanEvents = nil + l.bufEvalMetrics = nil + return params +} + +// Flush forces an immediate flush of anything currently buffered. +// It does not wait for new items to arrive. +func (l *LLMObs) Flush() { + // non-blocking edge trigger so multiple calls coalesce + select { + case l.flushNowCh <- struct{}{}: + default: + } +} + +// Stop requests shutdown, drains what’s already in the channels, flushes, and waits. +func (l *LLMObs) Stop() { + l.mu.Lock() + if !l.running { + l.mu.Unlock() + return + } + l.running = false + l.mu.Unlock() + + // Stop the sender/flush loop + select { + case <-l.stopCh: + default: + close(l.stopCh) + } + + // Wait for the main worker to exit (it will do a final flush) + l.wg.Wait() +} + +// drainChannels pulls everything currently buffered in the channels into our in-memory buffers. +func (l *LLMObs) drainChannels() { + for { + progress := false + select { + case ev := <-l.spanEventsCh: + l.mu.Lock() + l.bufSpanEvents = append(l.bufSpanEvents, ev) + l.mu.Unlock() + progress = true + default: + } + + select { + case evalMetric := <-l.evalMetricsCh: + l.mu.Lock() + l.bufEvalMetrics = append(l.bufEvalMetrics, evalMetric) + l.mu.Unlock() + progress = true + default: + } + + if !progress { + return + } + } +} + +type batchSendParams struct { + spanEvents []*transport.LLMObsSpanEvent + evalMetrics []*transport.LLMObsMetric +} + +// batchSend sends the buffered payloads to the backend. +func (l *LLMObs) batchSend(params batchSendParams) { + if len(params.spanEvents) == 0 && len(params.evalMetrics) == 0 { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + var wg sync.WaitGroup + + if len(params.spanEvents) > 0 { + wg.Add(1) + events := params.spanEvents + go func() { + defer wg.Done() + log.Debug("llmobs: sending %d LLMObs Span Events", len(events)) + if log.DebugEnabled() { + for _, ev := range events { + if b, err := json.Marshal(ev); err == nil { + log.Debug("llmobs: LLMObs Span Event: %s", b) + } + } + } + if err := l.Transport.PushSpanEvents(ctx, events); err != nil { + log.Error("llmobs: PushSpanEvents failed: %v", err.Error()) + } else { + log.Debug("llmobs: PushSpanEvents success") + } + }() + } + if len(params.evalMetrics) > 0 { + wg.Add(1) + metrics := params.evalMetrics + go func() { + defer wg.Done() + log.Debug("llmobs: sending %d LLMObs Span Eval Metrics", len(metrics)) + if log.DebugEnabled() { + for _, eval := range metrics { + if b, err := json.Marshal(eval); err == nil { + log.Debug("llmobs: LLMObs Span Eval Metric: %s", b) + } + } + } + if err := l.Transport.PushEvalMetrics(ctx, metrics); err != nil { + log.Error("llmobs: PushEvalMetrics failed: %v", err.Error()) + } else { + log.Debug("llmobs: PushEvalMetrics success") + } + }() + } + wg.Wait() +} + +// submitLLMObsSpan generates and submits an LLMObs span event to the LLMObs intake. +func (l *LLMObs) submitLLMObsSpan(span *Span) { + event := l.llmobsSpanEvent(span) + l.spanEventsCh <- event +} + +func (l *LLMObs) llmobsSpanEvent(span *Span) *transport.LLMObsSpanEvent { + meta := make(map[string]any) + + spanKind := span.spanKind + meta["span.kind"] = string(spanKind) + + if (spanKind == SpanKindLLM || spanKind == SpanKindEmbedding) && span.llmCtx.modelName != "" || span.llmCtx.modelProvider != "" { + modelName := span.llmCtx.modelName + if modelName == "" { + modelName = "custom" + } + modelProvider := strings.ToLower(span.llmCtx.modelProvider) + if modelProvider == "" { + modelProvider = "custom" + } + meta["model_name"] = modelName + meta["model_provider"] = modelProvider + } + + metadata := span.llmCtx.metadata + if metadata == nil { + metadata = make(map[string]any) + } + if spanKind == SpanKindAgent && span.llmCtx.agentManifest != "" { + metadata["agent_manifest"] = span.llmCtx.agentManifest + } + if len(metadata) > 0 { + meta["metadata"] = metadata + } + + input := make(map[string]any) + output := make(map[string]any) + + if spanKind == SpanKindLLM && len(span.llmCtx.inputMessages) > 0 { + input["messages"] = span.llmCtx.inputMessages + } else if txt := span.llmCtx.inputText; len(txt) > 0 { + input["value"] = txt + } + + if spanKind == SpanKindLLM && len(span.llmCtx.outputMessages) > 0 { + output["messages"] = span.llmCtx.outputMessages + } else if txt := span.llmCtx.outputText; len(txt) > 0 { + output["value"] = txt + } + + if spanKind == SpanKindExperiment { + if expectedOut := span.llmCtx.experimentExpectedOutput; expectedOut != nil { + meta["expected_output"] = expectedOut + } + if expInput := span.llmCtx.experimentInput; expInput != nil { + meta["input"] = expInput + } + if out := span.llmCtx.experimentOutput; out != nil { + meta["output"] = out + } + } + + if spanKind == SpanKindEmbedding { + if inputDocs := span.llmCtx.inputDocuments; len(inputDocs) > 0 { + input["documents"] = inputDocs + } + } + if spanKind == SpanKindRetrieval { + if outputDocs := span.llmCtx.outputDocuments; len(outputDocs) > 0 { + output["documents"] = outputDocs + } + } + if inputPrompt := span.llmCtx.prompt; inputPrompt != nil { + if spanKind != SpanKindLLM { + log.Warn("llmobs: dropping prompt on non-LLM span kind, annotating prompts is only supported for LLM span kinds") + } else { + input["prompt"] = inputPrompt + } + } else if spanKind == SpanKindLLM { + if span.parent != nil && span.parent.llmCtx.prompt != nil { + input["prompt"] = span.parent.llmCtx.prompt + } + } + + if toolDefinitions := span.llmCtx.toolDefinitions; len(toolDefinitions) > 0 { + meta["tool_definitions"] = toolDefinitions + } + + spanStatus := "ok" + var errMsg *transport.ErrorMessage + if span.error != nil { + spanStatus = "error" + errMsg = transport.NewErrorMessage(span.error) + meta["error.message"] = errMsg.Message + meta["error.stack"] = errMsg.Stack + meta["error.type"] = errMsg.Type + } + + if len(input) > 0 { + meta["input"] = input + } + if len(output) > 0 { + meta["output"] = output + } + + spanID := span.apm.SpanID() + parentID := defaultParentID + if span.parent != nil { + parentID = span.parent.apm.SpanID() + } + if span.llmTraceID == "" { + log.Warn("llmobs: span has no trace ID") + span.llmTraceID = newLLMObsTraceID() + } + + tags := make(map[string]string) + for k, v := range l.Config.TracerConfig.DDTags { + tags[k] = fmt.Sprintf("%v", v) + } + tags["version"] = l.Config.TracerConfig.Version + tags["env"] = l.Config.TracerConfig.Env + tags["service"] = l.Config.TracerConfig.Service + tags["source"] = "integration" + tags["ml_app"] = span.mlApp + tags["ddtrace.version"] = version.Tag + tags["language"] = "go" + + errTag := "0" + if span.error != nil { + errTag = "1" + } + tags["error"] = errTag + + if errMsg != nil { + tags["error_type"] = errMsg.Type + } + if span.integration != "" { + tags["integration"] = span.integration + } + + for k, v := range span.llmCtx.tags { + tags[k] = v + } + tagsSlice := make([]string, 0, len(tags)) + for k, v := range tags { + tagsSlice = append(tagsSlice, fmt.Sprintf("%s:%s", k, v)) + } + + ev := &transport.LLMObsSpanEvent{ + SpanID: spanID, + TraceID: span.llmTraceID, + ParentID: parentID, + SessionID: span.propagatedSessionID(), + Tags: tagsSlice, + Name: span.name, + StartNS: span.startTime.UnixNano(), + Duration: span.finishTime.Sub(span.startTime).Nanoseconds(), + Status: spanStatus, + StatusMessage: "", + Meta: meta, + Metrics: span.llmCtx.metrics, + CollectionErrors: nil, + SpanLinks: span.spanLinks, + Scope: span.scope, + } + if b, err := json.Marshal(ev); err == nil { + if len(b) > sizeLimitEVPEvent { + log.Warn( + "llmobs: dropping llmobs span event input/output because its size (%s) exceeds the event size limit (5MB)", + readableBytes(len(b)), + ) + dropSpanEventIO(ev) + } + } + return ev +} + +func dropSpanEventIO(ev *transport.LLMObsSpanEvent) { + if ev == nil { + return + } + droppedIO := false + if _, ok := ev.Meta["input"]; ok { + ev.Meta["input"] = map[string]any{"value": droppedValueText} + droppedIO = true + } + if _, ok := ev.Meta["output"]; ok { + ev.Meta["output"] = map[string]any{"value": droppedValueText} + droppedIO = true + } + if droppedIO { + ev.CollectionErrors = []string{collectionErrorDroppedIO} + } else { + log.Debug("llmobs: attempted to drop span event IO but it was not present") + } +} + +// StartSpan starts a new LLMObs span with the given kind, name, and configuration. +// Returns the created span and a context containing the span. +func (l *LLMObs) StartSpan(ctx context.Context, kind SpanKind, name string, cfg StartSpanConfig) (*Span, context.Context) { + spanName := name + if spanName == "" { + spanName = string(kind) + } + + if cfg.StartTime.IsZero() { + cfg.StartTime = time.Now() + } + + startCfg := StartAPMSpanConfig{ + SpanType: ext.SpanTypeLLM, + StartTime: cfg.StartTime, + } + apmSpan, ctx := l.Tracer.StartSpan(ctx, spanName, startCfg) + span := &Span{ + name: spanName, + apm: apmSpan, + startTime: cfg.StartTime, + } + if !l.Config.Enabled { + log.Warn("llmobs: LLMObs span was started without enabling LLMObs") + return span, ctx + } + + if parent, ok := ActiveLLMSpanFromContext(ctx); ok { + log.Debug("llmobs: found active llm span in context: (trace_id: %q, span_id: %q, ml_app: %q)", + parent.TraceID(), parent.SpanID(), parent.MLApp()) + span.parent = parent + span.llmTraceID = parent.llmTraceID + } else if propagated, ok := PropagatedLLMSpanFromContext(ctx); ok { + log.Debug("llmobs: found propagated llm span in context: (trace_id: %q, span_id: %q, ml_app: %q)", + propagated.TraceID, propagated.SpanID, propagated.MLApp) + span.propagated = propagated + span.llmTraceID = propagated.TraceID + } else { + span.llmTraceID = newLLMObsTraceID() + } + + span.mlApp = cfg.MLApp + span.spanKind = kind + span.sessionID = cfg.SessionID + + span.llmCtx = llmobsContext{ + modelName: cfg.ModelName, + modelProvider: cfg.ModelProvider, + } + + if span.sessionID == "" { + span.sessionID = span.propagatedSessionID() + } + if span.mlApp == "" { + span.mlApp = span.propagatedMLApp() + if span.mlApp == "" { + // We should ensure there's always an ML App to fall back to during startup, so in theory this should never happen. + log.Warn("llmobs: ML App is required for sending LLM Observability data.") + } + } + log.Debug("llmobs: starting LLMObs span: %s, span_kind: %s, ml_app: %s", spanName, kind, span.mlApp) + return span, contextWithActiveLLMSpan(ctx, span) +} + +// StartExperimentSpan starts a new experiment span with the given name, experiment ID, and configuration. +// Returns the created span and a context containing the span. +func (l *LLMObs) StartExperimentSpan(ctx context.Context, name string, experimentID string, cfg StartSpanConfig) (*Span, context.Context) { + span, ctx := l.StartSpan(ctx, SpanKindExperiment, name, cfg) + + if experimentID != "" { + span.apm.SetBaggageItem(baggageKeyExperimentID, experimentID) + span.scope = "experiments" + } + return span, ctx +} + +// SubmitEvaluation submits an evaluation metric for a span. +// The span can be identified either by span/trace IDs or by tag key-value pairs. +func (l *LLMObs) SubmitEvaluation(cfg EvaluationConfig) error { + // Validate exactly one join method is provided + hasSpanJoin := cfg.SpanID != "" && cfg.TraceID != "" + hasTagJoin := cfg.TagKey != "" && cfg.TagValue != "" + + if hasSpanJoin && hasTagJoin { + return errors.New("provide either span/trace IDs or tag key/value, not both") + } + if !hasSpanJoin && !hasTagJoin { + return errors.New("must provide either span/trace IDs or tag key/value for joining") + } + if cfg.Label == "" { + return errors.New("label is required for evaluation metrics") + } + numValues := 0 + if cfg.CategoricalValue != nil { + numValues++ + } + if cfg.ScoreValue != nil { + numValues++ + } + if cfg.BooleanValue != nil { + numValues++ + } + if numValues != 1 { + return errors.New("exactly one metric value (categorical, score, or boolean) must be provided") + } + + mlApp := cfg.MLApp + if mlApp == "" { + mlApp = l.Config.MLApp + } + + timestampMS := cfg.TimestampMS + if timestampMS == 0 { + timestampMS = time.Now().UnixMilli() + } + + // Build the appropriate join condition + var joinOn transport.EvaluationJoinOn + if hasSpanJoin { + joinOn.Span = &transport.EvaluationSpanJoin{ + SpanID: cfg.SpanID, + TraceID: cfg.TraceID, + } + } else { + joinOn.Tag = &transport.EvaluationTagJoin{ + Key: cfg.TagKey, + Value: cfg.TagValue, + } + } + + metric := &transport.LLMObsMetric{ + JoinOn: joinOn, + Label: cfg.Label, + MLApp: mlApp, + TimestampMS: timestampMS, + Tags: cfg.Tags, + } + + if cfg.CategoricalValue != nil { + metric.CategoricalValue = cfg.CategoricalValue + metric.MetricType = "categorical" + } else if cfg.ScoreValue != nil { + metric.ScoreValue = cfg.ScoreValue + metric.MetricType = "score" + } else if cfg.BooleanValue != nil { + metric.BooleanValue = cfg.BooleanValue + metric.MetricType = "boolean" + } else { + return errors.New("a metric value (categorical, score, or boolean) is required for evaluation metrics") + } + + l.evalMetricsCh <- metric + return nil +} + +// PublicResourceBaseURL returns the base URL to access a resource (experiments, projects, etc.) +func PublicResourceBaseURL() string { + site := "datadoghq.com" + if activeLLMObs != nil && activeLLMObs.Config.TracerConfig.Site != "" { + site = activeLLMObs.Config.TracerConfig.Site + } + + baseURL := "https://" + if slices.Contains(ddSitesNeedingAppSubdomain, site) { + baseURL += "app." + } + baseURL += site + return baseURL +} + +func newLLMObsTraceID() string { + var b [16]byte + + // High 32 bits: Unix seconds + secs := uint32(time.Now().Unix()) + binary.BigEndian.PutUint32(b[0:4], secs) + + // Middle 32 bits: zero + // (already zeroed by array initialization) + + // Low 64 bits: random + if _, err := rand.Read(b[8:16]); err != nil { + panic(err) + } + + // Turn into a big.Int + x := new(big.Int).SetBytes(b[:]) + + // 32-byte hex string + return fmt.Sprintf("%032x", x) +} + +// isAPIKeyValid reports whether the given string is a structurally valid API key +func isAPIKeyValid(key string) bool { + if len(key) != 32 { + return false + } + for _, c := range key { + if c > unicode.MaxASCII || (!unicode.IsLower(c) && !unicode.IsNumber(c)) { + return false + } + } + return true +} + +func readableBytes(s int) string { + const base = 1000 + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + + if s < 10 { + return fmt.Sprintf("%dB", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f%s" + if val < 10 { + f = "%.1f%s" + } + return fmt.Sprintf(f, val, suffix) +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/span.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/span.go new file mode 100644 index 00000000..220b9e5b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/span.go @@ -0,0 +1,506 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package llmobs + +import ( + "encoding/json" + "sync" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +const ( + // TagKeySessionID is the tag key used to set the session ID for LLMObs spans. + TagKeySessionID = "session_id" +) + +// StartSpanConfig contains configuration options for starting an LLMObs span. +type StartSpanConfig struct { + // SessionID sets the session ID for the span. + SessionID string + // ModelName sets the model name for LLM and embedding spans. + ModelName string + // ModelProvider sets the model provider for LLM and embedding spans. + ModelProvider string + // MLApp sets the ML application name for the span. + MLApp string + // StartTime sets a custom start time for the span. If zero, uses current time. + StartTime time.Time +} + +// FinishSpanConfig contains configuration options for finishing an LLMObs span. +type FinishSpanConfig struct { + // FinishTime sets a custom finish time for the span. If zero, uses current time. + FinishTime time.Time + // Error sets an error on the span when finishing. + Error error +} + +// EvaluationConfig contains configuration for submitting evaluation metrics. +type EvaluationConfig struct { + // Method 1: Direct span/trace ID join + // SpanID is the span ID to evaluate. + SpanID string + // TraceID is the trace ID to evaluate. + TraceID string + + // Method 2: Tag-based join + // TagKey is the tag key to search for spans. + TagKey string + // TagValue is the tag value to match for spans. + TagValue string + + // Required fields + // Label is the name of the evaluation metric. + Label string + + // Value fields (exactly one must be provided) + // CategoricalValue is the categorical value of the evaluation metric. + CategoricalValue *string + // ScoreValue is the score value of the evaluation metric. + ScoreValue *float64 + // BooleanValue is the boolean value of the evaluation metric. + BooleanValue *bool + + // Optional fields + // Tags are optional string key-value pairs to tag the evaluation metric. + Tags []string + // MLApp is the ML application name. If empty, uses the global config. + MLApp string + // TimestampMS is the timestamp in milliseconds. If zero, uses current time. + TimestampMS int64 +} + +// Prompt represents a prompt template used with LLM spans. +type Prompt struct { + // Template is the prompt template string. + Template string `json:"template,omitempty"` + // ID is the unique identifier for the prompt. + ID string `json:"id,omitempty"` + // Version is the version of the prompt. + Version string `json:"version,omitempty"` + // Variables contains the variables used in the prompt template. + Variables map[string]string `json:"variables,omitempty"` + // RAGContextVariables specifies which variables contain RAG context. + RAGContextVariables []string `json:"rag_context_variables,omitempty"` + // RAGQueryVariables specifies which variables contain RAG queries. + RAGQueryVariables []string `json:"rag_query_variables,omitempty"` +} + +// ToolDefinition represents a tool definition for LLM spans. +type ToolDefinition struct { + // Name is the name of the tool. + Name string `json:"name"` + // Description is the description of what the tool does. + Description string `json:"description,omitempty"` + // Schema is the JSON schema defining the tool's parameters. + Schema json.RawMessage `json:"schema,omitempty"` +} + +// ToolCall represents a call to a tool within an LLM message. +type ToolCall struct { + // Name is the name of the tool being called. + Name string `json:"name"` + // Arguments are the JSON-encoded arguments passed to the tool. + Arguments json.RawMessage `json:"arguments"` + // ToolID is the unique identifier for this tool call. + ToolID string `json:"tool_id,omitempty"` + // Type is the type of the tool call. + Type string `json:"type,omitempty"` +} + +// ToolResult represents the result of a tool call within an LLM message. +type ToolResult struct { + // Result is the result returned by the tool. + Result any `json:"result"` + // Name is the name of the tool that was called. + Name string `json:"name,omitempty"` + // ToolID is the unique identifier for the tool call this result corresponds to. + ToolID string `json:"tool_id,omitempty"` + // Type is the type of the tool result. + Type string `json:"type,omitempty"` +} + +// LLMMessage represents a message in an LLM conversation. +type LLMMessage struct { + // Role is the role of the message sender (e.g., "user", "assistant", "system"). + Role string `json:"role"` + // Content is the text content of the message. + Content string `json:"content"` + // ToolCalls are the tool calls made in this message. + ToolCalls []ToolCall `json:"tool_calls,omitempty"` + // ToolResults are the results of tool calls in this message. + ToolResults []ToolResult `json:"tool_results,omitempty"` +} + +// EmbeddedDocument represents a document used for embedding operations. +type EmbeddedDocument struct { + // Text is the text content of the document. + Text string `json:"text"` +} + +// RetrievedDocument represents a document retrieved from a search operation. +type RetrievedDocument struct { + // Text is the text content of the retrieved document. + Text string `json:"text"` + // Name is the name or title of the document. + Name string `json:"name,omitempty"` + // Score is the relevance score of the document (typically 0.0-1.0). + Score float64 `json:"score,omitempty"` + // ID is the unique identifier of the document. + ID string `json:"id,omitempty"` +} + +// SpanAnnotations contains data to annotate an LLMObs span with. +type SpanAnnotations struct { + // InputText is the text input for the span. + InputText string + // InputMessages are the input messages for LLM spans. + InputMessages []LLMMessage + // InputEmbeddedDocs are the input documents for embedding spans. + InputEmbeddedDocs []EmbeddedDocument + + // OutputText is the text output for the span. + OutputText string + // OutputMessages are the output messages for LLM spans. + OutputMessages []LLMMessage + // OutputRetrievedDocs are the output documents for retrieval spans. + OutputRetrievedDocs []RetrievedDocument + + // ExperimentInput is the input data for experiment spans. + ExperimentInput any + // ExperimentOutput is the output data for experiment spans. + ExperimentOutput any + // ExperimentExpectedOutput is the expected output for experiment spans. + ExperimentExpectedOutput any + + // Prompt is the prompt information for LLM spans. + Prompt *Prompt + // ToolDefinitions are the tool definitions for LLM spans. + ToolDefinitions []ToolDefinition + + // AgentManifest is the agent manifest for agent spans. + AgentManifest string + + // Metadata contains arbitrary metadata key-value pairs. + Metadata map[string]any + // Metrics contains numeric metrics key-value pairs. + Metrics map[string]float64 + // Tags contains string tags key-value pairs. + Tags map[string]string +} + +// Span represents an LLMObs span with its associated metadata and context. +type Span struct { + mu sync.RWMutex + + apm APMSpan + parent *Span + propagated *PropagatedLLMSpan + + llmCtx llmobsContext + + llmTraceID string + name string + mlApp string + spanKind SpanKind + sessionID string + + integration string + scope string + error error + finished bool + + startTime time.Time + finishTime time.Time + + spanLinks []SpanLink +} + +func (s *Span) Name() string { + return s.name +} + +// SpanID returns the span ID of the underlying APM span. +func (s *Span) SpanID() string { + return s.apm.SpanID() +} + +func (s *Span) Kind() string { + return string(s.spanKind) +} + +// APMTraceID returns the trace ID of the underlying APM span. +func (s *Span) APMTraceID() string { + return s.apm.TraceID() +} + +// TraceID returns the LLMObs trace ID for this span. +func (s *Span) TraceID() string { + return s.llmTraceID +} + +// MLApp returns the ML application name for this span. +func (s *Span) MLApp() string { + return s.mlApp +} + +// AddLink adds a span link to this span. +func (s *Span) AddLink(link SpanLink) { + s.mu.Lock() + defer s.mu.Unlock() + + s.apm.AddLink(link) + s.spanLinks = append(s.spanLinks, link) +} + +// StartTime returns the start time of this span. +func (s *Span) StartTime() time.Time { + return s.startTime +} + +// FinishTime returns the finish time of this span. +func (s *Span) FinishTime() time.Time { + return s.finishTime +} + +// Finish finishes the span with the provided configuration. +func (s *Span) Finish(cfg FinishSpanConfig) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.finished { + log.Debug("llmobs: attempted to finish an already finished span") + return + } + + if cfg.FinishTime.IsZero() { + cfg.FinishTime = time.Now() + } + s.finishTime = cfg.FinishTime + apmFinishCfg := FinishAPMSpanConfig{ + FinishTime: cfg.FinishTime, + } + if cfg.Error != nil { + s.error = cfg.Error + apmFinishCfg.Error = cfg.Error + } + + s.apm.Finish(apmFinishCfg) + l, err := ActiveLLMObs() + if err != nil { + return + } + l.submitLLMObsSpan(s) + s.finished = true + + //TODO: telemetry.record_span_created(span) +} + +// Annotate adds annotations to the span using the provided SpanAnnotations. +func (s *Span) Annotate(a SpanAnnotations) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.finished { + log.Warn("llmobs: cannot annotate a finished span") + return + } + + s.llmCtx.metadata = updateMapKeys(s.llmCtx.metadata, a.Metadata) + s.llmCtx.metrics = updateMapKeys(s.llmCtx.metrics, a.Metrics) + + if len(a.Tags) > 0 { + s.llmCtx.tags = updateMapKeys(s.llmCtx.tags, a.Tags) + if sessionID, ok := a.Tags[TagKeySessionID]; ok { + s.sessionID = sessionID + } + } + + if a.Prompt != nil { + if s.spanKind != SpanKindLLM { + log.Warn("llmobs: input prompt can only be annotated on llm spans, ignoring") + } else { + if a.Prompt.RAGContextVariables == nil { + a.Prompt.RAGContextVariables = []string{"context"} + } + if a.Prompt.RAGQueryVariables == nil { + a.Prompt.RAGQueryVariables = []string{"question"} + } + s.llmCtx.prompt = a.Prompt + } + } + + if len(a.ToolDefinitions) > 0 { + if s.spanKind != SpanKindLLM { + log.Warn("llmobs: tool definitions can only be annotated on llm spans, ignoring") + } else { + s.llmCtx.toolDefinitions = a.ToolDefinitions + } + } + + if a.AgentManifest != "" { + if s.spanKind != SpanKindAgent { + log.Warn("llmobs: agent manifest can only be annotated on agent spans, ignoring") + } else { + s.llmCtx.agentManifest = a.AgentManifest + } + } + + s.annotateIO(a) +} + +func (s *Span) annotateIO(a SpanAnnotations) { + if a.OutputRetrievedDocs != nil && s.spanKind != SpanKindRetrieval { + log.Warn("llmobs: retrieve docs can only be used to annotate outputs for retrieval spans, ignoring") + } + if a.InputEmbeddedDocs != nil && s.spanKind != SpanKindEmbedding { + log.Warn("llmobs: embedding docs can only be used to annotate inputs for embedding spans, ignoring") + } + switch s.spanKind { + case SpanKindLLM: + s.annotateIOLLM(a) + case SpanKindEmbedding: + s.annotateIOEmbedding(a) + case SpanKindRetrieval: + s.annotateIORetrieval(a) + case SpanKindExperiment: + s.annotateIOExperiment(a) + default: + s.annotateIOText(a) + } +} + +func (s *Span) annotateIOLLM(a SpanAnnotations) { + if a.InputMessages != nil { + s.llmCtx.inputMessages = a.InputMessages + } else if a.InputText != "" { + s.llmCtx.inputMessages = []LLMMessage{{Content: a.InputText}} + } + if a.OutputMessages != nil { + s.llmCtx.outputMessages = a.OutputMessages + } else if a.OutputText != "" { + s.llmCtx.outputMessages = []LLMMessage{{Content: a.OutputText}} + } +} + +func (s *Span) annotateIOEmbedding(a SpanAnnotations) { + if a.InputText != "" || a.InputMessages != nil { + log.Warn("llmobs: embedding spans can only be annotated with input embedded docs, ignoring other inputs") + } + if a.OutputMessages != nil || a.OutputRetrievedDocs != nil { + log.Warn("llmobs: embedding spans can only be annotated with output text, ignoring other outputs") + } + if a.InputEmbeddedDocs != nil { + s.llmCtx.inputDocuments = a.InputEmbeddedDocs + } + if a.OutputText != "" { + s.llmCtx.outputText = a.OutputText + } +} + +func (s *Span) annotateIORetrieval(a SpanAnnotations) { + if a.InputMessages != nil || a.InputEmbeddedDocs != nil { + log.Warn("llmobs: retrieval spans can only be annotated with input text, ignoring other inputs") + } + if a.OutputText != "" || a.OutputMessages != nil { + log.Warn("llmobs: retrieval spans can only be annotated with output retrieved docs, ignoring other outputs") + } + if a.InputText != "" { + s.llmCtx.inputText = a.InputText + } + if a.OutputRetrievedDocs != nil { + s.llmCtx.outputDocuments = a.OutputRetrievedDocs + } +} + +func (s *Span) annotateIOExperiment(a SpanAnnotations) { + if a.ExperimentInput != nil { + s.llmCtx.experimentInput = a.ExperimentInput + } + if a.ExperimentOutput != nil { + s.llmCtx.experimentOutput = a.ExperimentOutput + } + if a.ExperimentExpectedOutput != nil { + s.llmCtx.experimentExpectedOutput = a.ExperimentExpectedOutput + } +} + +func (s *Span) annotateIOText(a SpanAnnotations) { + if a.InputMessages != nil || a.InputEmbeddedDocs != nil { + log.Warn("llmobs: %s spans can only be annotated with input text, ignoring other inputs", s.spanKind) + } + if a.OutputMessages != nil || a.OutputRetrievedDocs != nil { + log.Warn("llmobs: %s spans can only be annotated with output text, ignoring other outputs", s.spanKind) + } + if a.InputText != "" { + s.llmCtx.inputText = a.InputText + } + if a.OutputText != "" { + s.llmCtx.outputText = a.OutputText + } +} + +// sessionID returns the session ID for a given span, by checking the span's nearest LLMObs span ancestor. +func (s *Span) propagatedSessionID() string { + curSpan := s + usingParent := false + + for curSpan != nil { + if curSpan.sessionID != "" { + if usingParent { + log.Debug("llmobs: using session_id from parent span: %s", curSpan.sessionID) + } + return curSpan.sessionID + } + curSpan = curSpan.parent + usingParent = true + } + return "" +} + +// propagatedMLApp returns the ML App name for a given span, by checking the span's nearest LLMObs span ancestor. +// It defaults to the global config LLMObs ML App name. +func (s *Span) propagatedMLApp() string { + curSpan := s + usingParent := false + + for curSpan != nil { + if curSpan.mlApp != "" { + if usingParent { + log.Debug("llmobs: using ml_app from parent span: %s", curSpan.mlApp) + } + return curSpan.mlApp + } + curSpan = curSpan.parent + usingParent = true + } + + if s.propagated != nil && s.propagated.MLApp != "" { + log.Debug("llmobs: using ml_app from propagated span: %s", s.propagated.MLApp) + return s.propagated.MLApp + } + if activeLLMObs != nil { + log.Debug("llmobs: using ml_app from global config: %s", activeLLMObs.Config.MLApp) + return activeLLMObs.Config.MLApp + } + return "" +} + +// updateMapKeys adds key/values from updates into src, overriding existing keys. +func updateMapKeys[K comparable, V any](src map[K]V, updates map[K]V) map[K]V { + if len(updates) == 0 { + return src + } + if src == nil { + src = make(map[K]V, len(updates)) + } + for k, v := range updates { + src[k] = v + } + return src +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/tracer.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/tracer.go new file mode 100644 index 00000000..2cdb6bda --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/tracer.go @@ -0,0 +1,52 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package llmobs + +import ( + "context" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport" +) + +// Tracer represents the interface for the underlying APM tracer. +type Tracer interface { + // StartSpan starts a new APM span with the given name and configuration. + StartSpan(ctx context.Context, name string, cfg StartAPMSpanConfig) (APMSpan, context.Context) +} + +// StartAPMSpanConfig contains configuration options for starting an APM span. +type StartAPMSpanConfig struct { + // SpanType is the type of the APM span. + SpanType string + // StartTime is the start time for the span. + StartTime time.Time +} + +// FinishAPMSpanConfig contains configuration options for finishing an APM span. +type FinishAPMSpanConfig struct { + // FinishTime is the finish time for the span. + FinishTime time.Time + // Error is an error to set on the span when finishing. + Error error +} + +// APMSpan represents the interface for an APM span. +type APMSpan interface { + // Finish finishes the span with the given configuration. + Finish(cfg FinishAPMSpanConfig) + // AddLink adds a span link to this span. + AddLink(link SpanLink) + // SpanID returns the span ID. + SpanID() string + // TraceID returns the trace ID. + TraceID() string + // SetBaggageItem sets a baggage item on the span. + SetBaggageItem(key string, value string) +} + +// SpanLink represents a link between spans, aliased from the transport package. +type SpanLink = transport.SpanLink diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/dne.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/dne.go new file mode 100644 index 00000000..b545ee77 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/dne.go @@ -0,0 +1,562 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +import ( + "bytes" + "context" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +const ( + resourceTypeDatasets = "datasets" + resourceTypeExperiments = "experiments" + resourceTypeProjects = "projects" +) + +// ---------- Resources ---------- + +type DatasetView struct { + ID string + Name string `json:"name"` + Description string `json:"description"` + Metadata map[string]any `json:"metadata"` + CurrentVersion int `json:"current_version"` +} + +type DatasetCreate struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` +} + +type DatasetRecordView struct { + ID string + Input any `json:"input"` + ExpectedOutput any `json:"expected_output"` + Metadata any `json:"metadata"` + Version int `json:"version"` +} + +type ProjectView struct { + ID string + Name string `json:"name"` +} + +type ExperimentView struct { + ID string + ProjectID string `json:"project_id"` + DatasetID string `json:"dataset_id"` + Name string `json:"name"` + Description string `json:"description"` + Metadata map[string]any `json:"metadata"` + Config map[string]any `json:"config"` + DatasetVersion int `json:"dataset_version"` + EnsureUnique bool `json:"ensure_unique"` +} + +type DatasetRecordCreate struct { + Input any `json:"input,omitempty"` + ExpectedOutput any `json:"expected_output,omitempty"` + Metadata any `json:"metadata,omitempty"` +} + +type DatasetRecordUpdate struct { + ID string `json:"id"` + Input any `json:"input,omitempty"` + ExpectedOutput *any `json:"expected_output,omitempty"` + Metadata any `json:"metadata,omitempty"` +} + +type ErrorMessage struct { + Message string `json:"message,omitempty"` + Type string `json:"type,omitempty"` + Stack string `json:"stack,omitempty"` +} + +// ---------- Requests ---------- + +type Request[T any] struct { + Data RequestData[T] `json:"data"` +} + +type RequestData[T any] struct { + Type string `json:"type"` + Attributes T `json:"attributes"` +} + +type RequestAttributesDatasetCreateRecords struct { + Records []DatasetRecordCreate `json:"records,omitempty"` +} + +type RequestAttributesDatasetDelete struct { + DatasetIDs []string `json:"dataset_ids,omitempty"` +} + +type RequestAttributesDatasetBatchUpdate struct { + InsertRecords []DatasetRecordCreate `json:"insert_records,omitempty"` + UpdateRecords []DatasetRecordUpdate `json:"update_records,omitempty"` + DeleteRecords []string `json:"delete_records,omitempty"` + Deduplicate *bool `json:"deduplicate,omitempty"` +} + +type RequestAttributesProjectCreate struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` +} + +type RequestAttributesExperimentCreate struct { + ProjectID string `json:"project_id,omitempty"` + DatasetID string `json:"dataset_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` + Config map[string]any `json:"config,omitempty"` + DatasetVersion int `json:"dataset_version,omitempty"` + EnsureUnique bool `json:"ensure_unique,omitempty"` +} + +type RequestAttributesExperimentPushEvents struct { + Scope string `json:"scope,omitempty"` + Metrics []ExperimentEvalMetricEvent `json:"metrics,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +type ExperimentEvalMetricEvent struct { + MetricSource string `json:"metric_source,omitempty"` + SpanID string `json:"span_id,omitempty"` + TraceID string `json:"trace_id,omitempty"` + TimestampMS int64 `json:"timestamp_ms,omitempty"` + MetricType string `json:"metric_type,omitempty"` + Label string `json:"label,omitempty"` + CategoricalValue *string `json:"categorical_value,omitempty"` + ScoreValue *float64 `json:"score_value,omitempty"` + BooleanValue *bool `json:"boolean_value,omitempty"` + Error *ErrorMessage `json:"error,omitempty"` + Tags []string `json:"tags,omitempty"` + ExperimentID string `json:"experiment_id,omitempty"` +} + +type ( + CreateDatasetRequest = Request[DatasetCreate] + DeleteDatasetRequest = Request[RequestAttributesDatasetDelete] + CreateDatasetRecordsRequest = Request[RequestAttributesDatasetCreateRecords] + BatchUpdateDatasetRequest = Request[RequestAttributesDatasetBatchUpdate] + + CreateProjectRequest = Request[RequestAttributesProjectCreate] + + CreateExperimentRequest = Request[RequestAttributesExperimentCreate] + PushExperimentEventsRequest = Request[RequestAttributesExperimentPushEvents] +) + +// ---------- Responses ---------- + +type Response[T any] struct { + Data ResponseData[T] `json:"data"` +} + +type ResponseMeta struct { + After string `json:"after,omitempty"` // Cursor for next page +} + +type ResponseList[T any] struct { + Data []ResponseData[T] `json:"data"` + Meta ResponseMeta `json:"meta,omitempty"` +} + +type ResponseData[T any] struct { + ID string `json:"id"` + Type string `json:"type"` + Attributes T `json:"attributes"` +} + +type ( + GetDatasetResponse = ResponseList[DatasetView] + CreateDatasetResponse = Response[DatasetView] + UpdateDatasetResponse = Response[DatasetView] + + GetDatasetRecordsResponse = ResponseList[DatasetRecordView] + CreateDatasetRecordsResponse = ResponseList[DatasetRecordView] + UpdateDatasetRecordsResponse = ResponseList[DatasetRecordView] + BatchUpdateDatasetResponse = ResponseList[DatasetRecordView] + + CreateProjectResponse = Response[ProjectView] + + CreateExperimentResponse = Response[ExperimentView] +) + +func (c *Transport) GetDatasetByName(ctx context.Context, name, projectID string) (*DatasetView, error) { + q := url.Values{} + q.Set("filter[name]", name) + datasetPath := fmt.Sprintf("%s/%s/datasets?%s", endpointPrefixDNE, url.PathEscape(projectID), q.Encode()) + method := http.MethodGet + + status, b, err := c.jsonRequest(ctx, method, datasetPath, subdomainDNE, nil, defaultTimeout) + if err != nil || status != http.StatusOK { + return nil, fmt.Errorf("get dataset by name %q failed: %v", name, err) + } + + var datasetResp GetDatasetResponse + if err := json.Unmarshal(b, &datasetResp); err != nil { + return nil, fmt.Errorf("decode datasets list: %w", err) + } + if len(datasetResp.Data) == 0 { + return nil, ErrDatasetNotFound + } + ds := datasetResp.Data[0].Attributes + ds.ID = datasetResp.Data[0].ID + return &ds, nil +} + +func (c *Transport) CreateDataset(ctx context.Context, name, description, projectID string) (*DatasetView, error) { + _, err := c.GetDatasetByName(ctx, name, projectID) + if err == nil { + return nil, errors.New("dataset already exists") + } + if !errors.Is(err, ErrDatasetNotFound) { + return nil, err + } + + path := fmt.Sprintf("%s/%s/datasets", endpointPrefixDNE, url.PathEscape(projectID)) + method := http.MethodPost + body := CreateDatasetRequest{ + Data: RequestData[DatasetCreate]{ + Type: resourceTypeDatasets, + Attributes: DatasetCreate{ + Name: name, + Description: description, + }, + }, + } + status, b, err := c.jsonRequest(ctx, method, path, subdomainDNE, body, defaultTimeout) + if err != nil { + return nil, fmt.Errorf("create dataset %q failed: %v", name, err) + } + log.Debug("llmobs/internal/transport.DatasetGetOrCreate: create dataset success (status code: %d)", status) + + var resp CreateDatasetResponse + if err := json.Unmarshal(b, &resp); err != nil { + return nil, fmt.Errorf("decode create dataset response: %w", err) + } + id := resp.Data.ID + dataset := resp.Data.Attributes + dataset.ID = id + return &dataset, nil +} + +func (c *Transport) DeleteDataset(ctx context.Context, datasetIDs ...string) error { + path := endpointPrefixDNE + "/datasets/delete" + method := http.MethodPost + body := DeleteDatasetRequest{ + Data: RequestData[RequestAttributesDatasetDelete]{ + Type: resourceTypeDatasets, + Attributes: RequestAttributesDatasetDelete{ + DatasetIDs: datasetIDs, + }, + }, + } + + status, _, err := c.jsonRequest(ctx, method, path, subdomainDNE, body, defaultTimeout) + if err != nil || status != http.StatusOK { + return fmt.Errorf("delete dataset %v failed: %v", datasetIDs, err) + } + return nil +} + +func (c *Transport) BatchUpdateDataset( + ctx context.Context, + datasetID string, + insert []DatasetRecordCreate, + update []DatasetRecordUpdate, + delete []string, +) (int, []string, error) { + path := fmt.Sprintf("%s/datasets/%s/batch_update", endpointPrefixDNE, url.PathEscape(datasetID)) + method := http.MethodPost + body := BatchUpdateDatasetRequest{ + Data: RequestData[RequestAttributesDatasetBatchUpdate]{ + Type: resourceTypeDatasets, + Attributes: RequestAttributesDatasetBatchUpdate{ + InsertRecords: insert, + UpdateRecords: update, + DeleteRecords: delete, + Deduplicate: AnyPtr(false), + }, + }, + } + + status, b, err := c.jsonRequest(ctx, method, path, subdomainDNE, body, defaultTimeout) + if err != nil || status != http.StatusOK { + return -1, nil, fmt.Errorf("batch_update for dataset %q failed: %v", datasetID, err) + } + + var resp BatchUpdateDatasetResponse + if err := json.Unmarshal(b, &resp); err != nil { + return -1, nil, fmt.Errorf("decode batch_update response: %w", err) + } + + // FIXME: we don't get version numbers in responses to deletion requests + // TODO(rarguelloF): the backend could return a better response here... + var ( + newDatasetVersion = -1 + newRecordIDs []string + ) + if len(resp.Data) > 0 { + if resp.Data[0].Attributes.Version > 0 { + newDatasetVersion = resp.Data[0].Attributes.Version + } + } + if len(resp.Data) == len(insert)+len(update) { + // new records are at the end of the slice + for _, rec := range resp.Data[len(update):] { + newRecordIDs = append(newRecordIDs, rec.ID) + } + } else { + log.Warn("llmobs/internal/transport: BatchUpdateDataset: expected %d records in response, got %d", len(insert)+len(update), len(resp.Data)) + } + return newDatasetVersion, newRecordIDs, nil +} + +// GetDatasetRecordsPage fetches a single page of records for the given dataset. +// Returns the records, the cursor for the next page (empty string if no more pages), and any error. +func (c *Transport) GetDatasetRecordsPage(ctx context.Context, datasetID, cursor string) ([]DatasetRecordView, string, error) { + method := http.MethodGet + recordsPath := fmt.Sprintf("%s/datasets/%s/records", endpointPrefixDNE, url.PathEscape(datasetID)) + + if cursor != "" { + recordsPath = fmt.Sprintf("%s?page[cursor]=%s", recordsPath, url.QueryEscape(cursor)) + } + + status, b, err := c.jsonRequest(ctx, method, recordsPath, subdomainDNE, nil, getDatasetRecordsTimeout) + if err != nil || status != http.StatusOK { + return nil, "", fmt.Errorf("get dataset records page failed: %v (datasetID=%q, status=%d)", err, datasetID, status) + } + + var recordsResp GetDatasetRecordsResponse + if err := json.Unmarshal(b, &recordsResp); err != nil { + return nil, "", fmt.Errorf("decode dataset records: %w", err) + } + + records := make([]DatasetRecordView, 0, len(recordsResp.Data)) + for _, r := range recordsResp.Data { + rec := r.Attributes + rec.ID = r.ID + records = append(records, rec) + } + + return records, recordsResp.Meta.After, nil +} + +// GetDatasetWithRecords fetches the given Dataset and all its records from DataDog. +// This eagerly fetches all pages of records. +func (c *Transport) GetDatasetWithRecords(ctx context.Context, name, projectID string) (*DatasetView, []DatasetRecordView, error) { + // 1) Fetch dataset by name + ds, err := c.GetDatasetByName(ctx, name, projectID) + if err != nil { + return nil, nil, err + } + + // 2) Fetch all records with pagination support + var allRecords []DatasetRecordView + nextCursor := "" + pageNum := 0 + + for { + log.Debug("llmobs/transport: fetching dataset records page %d", pageNum) + + records, cursor, err := c.GetDatasetRecordsPage(ctx, ds.ID, nextCursor) + if err != nil { + return nil, nil, fmt.Errorf("get dataset records failed on page %d: %w", pageNum, err) + } + + allRecords = append(allRecords, records...) + + nextCursor = cursor + if nextCursor == "" { + break + } + pageNum++ + } + + log.Debug("llmobs/transport: fetched %d records across %d pages for dataset %q", len(allRecords), pageNum+1, name) + return ds, allRecords, nil +} + +func (c *Transport) GetOrCreateProject(ctx context.Context, name string) (*ProjectView, error) { + path := endpointPrefixDNE + "/projects" + method := http.MethodPost + + body := CreateProjectRequest{ + Data: RequestData[RequestAttributesProjectCreate]{ + Type: resourceTypeProjects, + Attributes: RequestAttributesProjectCreate{ + Name: name, + Description: "", + }, + }, + } + status, b, err := c.jsonRequest(ctx, method, path, subdomainDNE, body, defaultTimeout) + if err != nil || status != http.StatusOK { + return nil, fmt.Errorf("create project %q failed: %v", name, err) + } + + var resp CreateProjectResponse + if err := json.Unmarshal(b, &resp); err != nil { + return nil, fmt.Errorf("decode project response: %w", err) + } + + project := resp.Data.Attributes + project.ID = resp.Data.ID + return &project, nil +} + +func (c *Transport) CreateExperiment( + ctx context.Context, + name, datasetID, projectID string, + datasetVersion int, + expConfig map[string]any, + tags []string, + description string, +) (*ExperimentView, error) { + path := endpointPrefixDNE + "/experiments" + method := http.MethodPost + + if expConfig == nil { + expConfig = map[string]interface{}{} + } + meta := map[string]interface{}{"tags": tags} + body := CreateExperimentRequest{ + Data: RequestData[RequestAttributesExperimentCreate]{ + Type: resourceTypeExperiments, + Attributes: RequestAttributesExperimentCreate{ + ProjectID: projectID, + DatasetID: datasetID, + Name: name, + Description: description, + Metadata: meta, + Config: expConfig, + DatasetVersion: datasetVersion, + EnsureUnique: true, + }, + }, + } + + status, b, err := c.jsonRequest(ctx, method, path, subdomainDNE, body, defaultTimeout) + if err != nil || status != http.StatusOK { + return nil, fmt.Errorf("create experiment %q failed: %v", name, err) + } + + var resp CreateExperimentResponse + if err := json.Unmarshal(b, &resp); err != nil { + return nil, fmt.Errorf("decode experiment response: %w", err) + } + exp := resp.Data.Attributes + exp.ID = resp.Data.ID + + return &exp, nil +} + +func (c *Transport) PushExperimentEvents( + ctx context.Context, + experimentID string, + metrics []ExperimentEvalMetricEvent, + tags []string, +) error { + path := fmt.Sprintf("%s/experiments/%s/events", endpointPrefixDNE, url.PathEscape(experimentID)) + method := http.MethodPost + + body := PushExperimentEventsRequest{ + Data: RequestData[RequestAttributesExperimentPushEvents]{ + Type: resourceTypeExperiments, + Attributes: RequestAttributesExperimentPushEvents{ + Scope: resourceTypeExperiments, + Metrics: metrics, + Tags: tags, + }, + }, + } + + status, b, err := c.jsonRequest(ctx, method, path, subdomainDNE, body, defaultTimeout) + if err != nil { + return fmt.Errorf("post experiment eval metrics failed: %v", err) + } + if status != http.StatusOK && status != http.StatusAccepted { + return fmt.Errorf("unexpected status %d: %s", status, string(b)) + } + return nil +} + +// BulkUploadDataset uploads dataset records via CSV file upload. +// This is more efficient for large datasets (>5MB of changes). +func (c *Transport) BulkUploadDataset(ctx context.Context, datasetID string, records []DatasetRecordView) error { + // Create CSV in memory + var csvBuf bytes.Buffer + csvWriter := csv.NewWriter(&csvBuf) + + // Write header + if err := csvWriter.Write([]string{"input", "expected_output", "metadata"}); err != nil { + return fmt.Errorf("failed to write CSV header: %w", err) + } + + // Write records + for _, rec := range records { + inputJSON, err := json.Marshal(rec.Input) + if err != nil { + return fmt.Errorf("failed to marshal input: %w", err) + } + outputJSON, err := json.Marshal(rec.ExpectedOutput) + if err != nil { + return fmt.Errorf("failed to marshal expected_output: %w", err) + } + metadataJSON, err := json.Marshal(rec.Metadata) + if err != nil { + return fmt.Errorf("failed to marshal metadata: %w", err) + } + + if err := csvWriter.Write([]string{ + string(inputJSON), + string(outputJSON), + string(metadataJSON), + }); err != nil { + return fmt.Errorf("failed to write CSV record: %w", err) + } + } + csvWriter.Flush() + if err := csvWriter.Error(); err != nil { + return fmt.Errorf("CSV writer error: %w", err) + } + + // Create multipart body + boundary := "----------boundary------" + crlf := "\r\n" + filename := "dataset_upload.csv" + + var body bytes.Buffer + body.WriteString("--" + boundary + crlf) + body.WriteString(fmt.Sprintf(`Content-Disposition: form-data; name="file"; filename="%s"`, filename) + crlf) + body.WriteString("Content-Type: text/csv" + crlf) + body.WriteString(crlf) + body.Write(csvBuf.Bytes()) + body.WriteString(crlf) + body.WriteString("--" + boundary + "--" + crlf) + + path := fmt.Sprintf("%s/datasets/%s/records/upload", endpointPrefixDNE, url.PathEscape(datasetID)) + contentType := fmt.Sprintf("multipart/form-data; boundary=%s", boundary) + + status, respBody, err := c.request(ctx, http.MethodPost, path, subdomainDNE, bytes.NewReader(body.Bytes()), contentType, bulkUploadTimeout) + if err != nil || status != http.StatusOK { + return fmt.Errorf("bulk upload failed: %w", err) + } + + log.Debug("llmobs/transport: successfully bulk uploaded %d records to dataset %q: %s", len(records), datasetID, string(respBody)) + return nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/eval_metric.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/eval_metric.go new file mode 100644 index 00000000..6e1bda46 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/eval_metric.go @@ -0,0 +1,91 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +import ( + "context" + "fmt" + "net/http" +) + +// EvaluationJoinOn represents how to join evaluation metrics to spans. +// Exactly one of Span or Tag should be provided. +type EvaluationJoinOn struct { + // Span contains span and trace IDs for direct span joining. + Span *EvaluationSpanJoin `json:"span,omitempty"` + // Tag contains tag key-value for tag-based joining. + Tag *EvaluationTagJoin `json:"tag,omitempty"` +} + +// EvaluationSpanJoin represents joining by span and trace ID. +type EvaluationSpanJoin struct { + // SpanID is the span ID to join on. + SpanID string `json:"span_id"` + // TraceID is the trace ID to join on. + TraceID string `json:"trace_id"` +} + +// EvaluationTagJoin represents joining by tag key-value pairs. +type EvaluationTagJoin struct { + // Key is the tag key to search for. + Key string `json:"key"` + // Value is the tag value to match. + Value string `json:"value"` +} + +// LLMObsMetric represents an evaluation metric for LLMObs spans. +type LLMObsMetric struct { + JoinOn EvaluationJoinOn `json:"join_on"` + MetricType string `json:"metric_type,omitempty"` + Label string `json:"label,omitempty"` + CategoricalValue *string `json:"categorical_value,omitempty"` + ScoreValue *float64 `json:"score_value,omitempty"` + BooleanValue *bool `json:"boolean_value,omitempty"` + MLApp string `json:"ml_app,omitempty"` + TimestampMS int64 `json:"timestamp_ms,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +type PushMetricsRequest struct { + Data PushMetricsRequestData `json:"data"` +} + +type PushMetricsRequestData struct { + Type string `json:"type"` + Attributes PushMetricsRequestDataAttributes `json:"attributes"` +} + +type PushMetricsRequestDataAttributes struct { + Metrics []*LLMObsMetric `json:"metrics"` +} + +func (c *Transport) PushEvalMetrics( + ctx context.Context, + metrics []*LLMObsMetric, +) error { + if len(metrics) == 0 { + return nil + } + path := endpointEvalMetric + method := http.MethodPost + body := &PushMetricsRequest{ + Data: PushMetricsRequestData{ + Type: "evaluation_metric", + Attributes: PushMetricsRequestDataAttributes{ + Metrics: metrics, + }, + }, + } + + status, b, err := c.jsonRequest(ctx, method, path, subdomainEvalMetric, body, defaultTimeout) + if err != nil { + return fmt.Errorf("post llmobs eval metrics failed: %v", err) + } + if status != http.StatusOK && status != http.StatusAccepted { + return fmt.Errorf("unexpected status %d: %s", status, string(b)) + } + return nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/span.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/span.go new file mode 100644 index 00000000..f30c319d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/span.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +import ( + "context" + "fmt" + "net/http" + + "github.com/DataDog/dd-trace-go/v2/internal/version" +) + +type SpanLink struct { + TraceID uint64 `json:"trace_id"` + TraceIDHigh uint64 `json:"trace_id_high,omitempty"` + SpanID uint64 `json:"span_id"` + Attributes map[string]string `json:"attributes,omitempty"` + Tracestate string `json:"tracestate,omitempty"` + Flags uint32 `json:"flags,omitempty"` +} + +type LLMObsSpanEvent struct { + SpanID string `json:"span_id,omitempty"` + TraceID string `json:"trace_id,omitempty"` + ParentID string `json:"parent_id,omitempty"` + SessionID string `json:"session_id,omitempty"` + Tags []string `json:"tags,omitempty"` + Name string `json:"name,omitempty"` + StartNS int64 `json:"start_ns,omitempty"` + Duration int64 `json:"duration,omitempty"` + Status string `json:"status,omitempty"` + StatusMessage string `json:"status_message,omitempty"` + Meta map[string]any `json:"meta,omitempty"` + Metrics map[string]float64 `json:"metrics,omitempty"` + CollectionErrors []string `json:"collection_errors,omitempty"` + SpanLinks []SpanLink `json:"span_links,omitempty"` + Scope string `json:"-"` +} + +type PushSpanEventsRequest struct { + Stage string `json:"_dd.stage,omitempty"` + TracerVersion string `json:"_dd.tracer_version,omitempty"` + Scope string `json:"_dd.scope,omitempty"` + EventType string `json:"event_type,omitempty"` + Spans []*LLMObsSpanEvent `json:"spans,omitempty"` +} + +func (c *Transport) PushSpanEvents( + ctx context.Context, + events []*LLMObsSpanEvent, +) error { + if len(events) == 0 { + return nil + } + path := endpointLLMSpan + method := http.MethodPost + body := make([]*PushSpanEventsRequest, 0, len(events)) + for _, ev := range events { + req := &PushSpanEventsRequest{ + Stage: "raw", + TracerVersion: version.Tag, + EventType: "span", + Spans: []*LLMObsSpanEvent{ev}, + } + if ev.Scope != "" { + req.Scope = ev.Scope + } + body = append(body, req) + } + + status, b, err := c.jsonRequest(ctx, method, path, subdomainLLMSpan, body, defaultTimeout) + if err != nil { + return fmt.Errorf("post llmobs spans failed: %w", err) + } + if status != http.StatusOK && status != http.StatusAccepted { + return fmt.Errorf("unexpected status %d: %s", status, string(b)) + } + return nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/transport.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/transport.go new file mode 100644 index 00000000..39f9a840 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/transport.go @@ -0,0 +1,305 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/errortrace" + "github.com/cenkalti/backoff/v5" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/llmobs/config" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +const ( + headerEVPSubdomain = "X-Datadog-EVP-Subdomain" + headerRateLimitReset = "x-ratelimit-reset" +) + +const ( + endpointEvalMetric = "/api/intake/llm-obs/v2/eval-metric" + endpointLLMSpan = "/api/v2/llmobs" + + endpointPrefixEVPProxy = "/evp_proxy/v2" + endpointPrefixDNE = "/api/unstable/llm-obs/v1" + + subdomainLLMSpan = "llmobs-intake" + subdomainEvalMetric = "api" + subdomainDNE = "api" +) + +const ( + defaultSite = "datadoghq.com" + defaultMaxRetries uint = 3 + + defaultTimeout time.Duration = 5 * time.Second + bulkUploadTimeout time.Duration = 60 * time.Second + getDatasetRecordsTimeout time.Duration = 20 * time.Second +) + +var ( + ErrDatasetNotFound = errors.New("dataset not found") +) + +func defaultBackoffStrategy() *backoff.ExponentialBackOff { + return &backoff.ExponentialBackOff{ + InitialInterval: 100 * time.Millisecond, + RandomizationFactor: 0.5, + Multiplier: 1.5, + MaxInterval: 1 * time.Second, + } +} + +type Transport struct { + httpClient *http.Client + defaultHeaders map[string]string + site string + agentURL *url.URL + agentless bool + appKey string +} + +// New builds a new Transport for LLM Observability endpoints. +func New(cfg *config.Config) *Transport { + site := defaultSite + if cfg.TracerConfig.Site != "" { + site = cfg.TracerConfig.Site + } + + defaultHeaders := make(map[string]string) + if cfg.ResolvedAgentlessEnabled { + defaultHeaders["DD-API-KEY"] = cfg.TracerConfig.APIKey + } + + // Clone the HTTP client and remove its global timeout + // We manage timeouts per-request using context.WithTimeout + httpClient := cfg.TracerConfig.HTTPClient + if httpClient != nil && httpClient.Timeout > 0 { + clientCopy := *httpClient + clientCopy.Timeout = 0 + httpClient = &clientCopy + } + + return &Transport{ + httpClient: httpClient, + defaultHeaders: defaultHeaders, + site: site, + agentURL: cfg.TracerConfig.AgentURL, + agentless: cfg.ResolvedAgentlessEnabled, + appKey: cfg.TracerConfig.APPKey, + } +} + +// AnyPtr returns a pointer to the given value. This is used to create payloads that require pointers instead of values. +func AnyPtr[T any](v T) *T { + return &v +} + +// NewErrorMessage returns the payload representation of an error. +func NewErrorMessage(err error) *ErrorMessage { + if err == nil { + return nil + } + return &ErrorMessage{ + Message: err.Error(), + Type: errType(err), + Stack: errStackTrace(err), + } +} + +func errType(err error) string { + var originalErr error + var wErr *errortrace.TracerError + if !errors.As(err, &wErr) { + originalErr = err + } else { + originalErr = wErr.Unwrap() + } + return reflect.TypeOf(originalErr).String() +} + +func errStackTrace(err error) string { + var wErr *errortrace.TracerError + if !errors.As(err, &wErr) { + return "" + } + return wErr.Format() +} + +func (c *Transport) baseURL(subdomain string) string { + if c.agentless { + return fmt.Sprintf("https://%s.%s", subdomain, c.site) + } + u := "" + if c.agentURL.Scheme == "unix" { + u = internal.UnixDataSocketURL(c.agentURL.Path).String() + } else { + u = c.agentURL.String() + } + u += endpointPrefixEVPProxy + return u +} + +func (c *Transport) jsonRequest(ctx context.Context, method, path, subdomain string, body any, timeout time.Duration) (int, []byte, error) { + var jsonBody io.Reader + if body != nil { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + enc.SetEscapeHTML(false) + if err := enc.Encode(body); err != nil { + return 0, nil, fmt.Errorf("encode body: %w", err) + } + jsonBody = bytes.NewReader(buf.Bytes()) + } + return c.request(ctx, method, path, subdomain, jsonBody, "application/json", timeout) +} + +func (c *Transport) request(ctx context.Context, method, path, subdomain string, body io.Reader, contentType string, timeout time.Duration) (int, []byte, error) { + if timeout == 0 { + timeout = defaultTimeout + } + urlStr := c.baseURL(subdomain) + path + backoffStrat := defaultBackoffStrategy() + + doRequest := func() (resp *http.Response, err error) { + log.Debug("llmobs: sending request (method: %s | url: %s)", method, urlStr) + defer func() { + if err != nil { + log.Debug("llmobs: request failed: %s", err.Error()) + } + }() + + // Reset body reader if it's seekable (for retries) + if body != nil { + if seeker, ok := body.(io.Seeker); ok { + if _, err := seeker.Seek(0, io.SeekStart); err != nil { + return nil, fmt.Errorf("failed to reset body reader: %w", err) + } + } + } + + req, err := http.NewRequestWithContext(ctx, method, urlStr, body) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", contentType) + for key, val := range c.defaultHeaders { + req.Header.Set(key, val) + } + if !c.agentless { + req.Header.Set(headerEVPSubdomain, subdomain) + } + + // Set headers for datasets and experiments endpoints + if strings.HasPrefix(path, endpointPrefixDNE) { + if c.agentless && c.appKey != "" { + // In agentless mode, set the app key header if available + req.Header.Set("DD-APPLICATION-KEY", c.appKey) + } else if !c.agentless { + // In agent mode, always set the NeedsAppKey header (app key is ignored) + req.Header.Set("X-Datadog-NeedsAppKey", "true") + } + } + timeoutCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + req = req.WithContext(timeoutCtx) + + resp, err = c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer func() { + if err != nil && resp != nil { + _ = resp.Body.Close() + } + }() + + code := resp.StatusCode + if code >= 200 && code <= 299 { + return resp, nil + } + if isRetriableStatus(code) { + return nil, fmt.Errorf("request failed with transient http status code: %d", code) + } + if code == http.StatusTooManyRequests { + wait := parseRetryAfter(resp.Header) + log.Debug("llmobs: status code 429, waiting %s before retry...", wait.String()) + drainAndClose(resp.Body) + return nil, backoff.RetryAfter(int(wait.Seconds())) + } + drainAndClose(resp.Body) + return nil, backoff.Permanent(fmt.Errorf("request failed with http status code: %d", resp.StatusCode)) + } + + resp, err := backoff.Retry(ctx, doRequest, backoff.WithBackOff(backoffStrat), backoff.WithMaxTries(defaultMaxRetries)) + if err != nil { + return 0, nil, err + } + defer resp.Body.Close() + + b, err := io.ReadAll(resp.Body) + if err != nil { + return resp.StatusCode, nil, err + } + log.Debug("llmobs: got success response: %s", string(b)) + + return resp.StatusCode, b, nil +} + +func drainAndClose(b io.ReadCloser) { + if b == nil { + return + } + io.Copy(io.Discard, io.LimitReader(b, 1<<20)) // drain up to 1MB to reuse conn + _ = b.Close() +} + +func parseRetryAfter(h http.Header) time.Duration { + rateLimitReset := h.Get(headerRateLimitReset) + waitSeconds := int64(1) + if rateLimitReset != "" { + if resetTime, err := strconv.ParseInt(rateLimitReset, 10, 64); err == nil { + seconds := int64(0) + if resetTime > time.Now().Unix() { + // Assume it's a Unix timestamp + seconds = int64(time.Until(time.Unix(resetTime, 0)).Seconds()) + } else { + // Assume it's a duration in seconds + seconds = resetTime + } + if seconds > 0 { + waitSeconds = seconds + } + } + } + return time.Duration(waitSeconds) * time.Second +} + +func isRetriableStatus(code int) bool { + switch code { + case http.StatusRequestTimeout, + http.StatusTooEarly: + return true + } + if code >= 500 && code <= 599 { + return true + } + return false +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/log/log.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/log/log.go new file mode 100644 index 00000000..6c1f5499 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/log/log.go @@ -0,0 +1,364 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package log provides logging utilities for the tracer. +package log + +import ( + "fmt" + "log" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo" + "github.com/DataDog/dd-trace-go/v2/internal/version" +) + +// Level specifies the logging level that the log package prints at. +type Level int + +func (l Level) String() string { + switch l { + case LevelDebug: + return "DEBUG" + case LevelInfo: + return "INFO" + case LevelWarn: + return "WARN" + case LevelError: + return "ERROR" + default: + return "UNKNOWN" + } +} + +const ( + // LevelDebug represents debug level messages. + LevelDebug Level = iota + // LevelInfo represents informational messages. + LevelInfo + // LevelWarn represents warning messages. + LevelWarn + // LevelError represents error messages. + LevelError +) + +var prefixMsg = fmt.Sprintf("Datadog Tracer %s", version.Tag) + +// Logger implementations are able to log given messages that the tracer might +// output. This interface is duplicated here to avoid a cyclic dependency +// between this package and ddtrace +type Logger interface { + // Log prints the given message. + Log(msg string) +} + +// File name for writing tracer logs, if DD_TRACE_LOG_DIRECTORY has been configured +const LoggerFile = "ddtrace.log" + +// ManagedFile functions like a *os.File but is safe for concurrent use +type ManagedFile struct { + mu sync.RWMutex + file *os.File + closed bool +} + +// Close closes the ManagedFile's *os.File in a concurrent-safe manner, ensuring the file is closed only once +func (m *ManagedFile) Close() error { + m.mu.Lock() + defer m.mu.Unlock() + if m.file == nil || m.closed { + return nil + } + err := m.file.Close() + if err != nil { + return err + } + m.closed = true + return nil +} + +func (m *ManagedFile) Name() string { + m.mu.RLock() + defer m.mu.RUnlock() + if m.file == nil { + return "" + } + return m.file.Name() +} + +var ( + mu sync.RWMutex // guards below fields + levelThreshold = LevelWarn + logger Logger = &defaultLogger{l: log.New(os.Stderr, "", log.LstdFlags)} +) + +// UseLogger sets l as the active logger and returns a function to restore the +// previous logger. The return value is mostly useful when testing. +func UseLogger(l Logger) (undo func()) { + Flush() + mu.Lock() + defer mu.Unlock() + old := logger + logger = l + return func() { + mu.Lock() + defer mu.Unlock() + logger = old + } +} + +// OpenFileAtPath creates a new file at the specified dirPath and configures the logger to write to this file. The dirPath must already exist on the underlying os. +// It returns the file that was created, or nil and an error if the file creation was unsuccessful. +// The caller of OpenFileAtPath is responsible for calling Close() on the ManagedFile +func OpenFileAtPath(dirPath string) (*ManagedFile, error) { + path, err := os.Stat(dirPath) + if err != nil || !path.IsDir() { + return nil, fmt.Errorf("file path %v invalid or does not exist on the underlying os; using default logger to stderr", dirPath) + } + filepath := dirPath + "/" + LoggerFile + f, err := os.OpenFile(filepath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + return nil, fmt.Errorf("using default logger to stderr due to error creating or opening log file: %s", err.Error()) + } + UseLogger(&defaultLogger{l: log.New(f, "", log.LstdFlags)}) + return &ManagedFile{ + file: f, + }, nil +} + +// SetLevel sets the given lvl as log threshold for logging. +func SetLevel(lvl Level) { + mu.Lock() + defer mu.Unlock() + levelThreshold = lvl +} + +func DefaultLevel() Level { + mu.RLock() + defer mu.RUnlock() + return levelThreshold +} + +// GetLevel returns the currrent log level. +func GetLevel() Level { + mu.Lock() + defer mu.Unlock() + return levelThreshold +} + +// DebugEnabled returns true if debug log messages are enabled. This can be used in extremely +// hot code paths to avoid allocating the ...interface{} argument. +func DebugEnabled() bool { + mu.RLock() + lvl := levelThreshold + mu.RUnlock() + return lvl == LevelDebug +} + +// Debug prints the given message if the level is LevelDebug. +func Debug(fmt string, a ...interface{}) { + if !DebugEnabled() { + return + } + printMsg(LevelDebug, fmt, a...) +} + +// Warn prints a warning message. +func Warn(fmt string, a ...interface{}) { + printMsg(LevelWarn, fmt, a...) +} + +// Info prints an informational message. +func Info(fmt string, a ...interface{}) { + printMsg(LevelInfo, fmt, a...) +} + +var ( + errmu sync.RWMutex // guards below fields + erragg = map[string]*errorReport{} // aggregated errors + errrate = time.Minute // the rate at which errors are reported + erron bool // true if errors are being aggregated +) + +func init() { + // This cannot use env.Get because it would cause a cyclic import + if v := os.Getenv("DD_LOGGING_RATE"); v != "" { + setLoggingRate(v) + } + + // This is required because we really want to be able to log errors from dyngo + // but the log package depend on too much packages that we want to instrument. + // So we need to do this to avoid dependency cycles. + dyngo.LogError = Error +} + +func setLoggingRate(v string) { + if sec, err := strconv.ParseInt(v, 10, 64); err != nil { + Warn("Invalid value for DD_LOGGING_RATE: %s", err.Error()) + } else { + if sec < 0 { + Warn("Invalid value for DD_LOGGING_RATE: negative value") + } else { + // DD_LOGGING_RATE = 0 allows to log errors immediately. + errrate = time.Duration(sec) * time.Second + } + } +} + +type errorReport struct { + first time.Time // time when first error occurred + err error + count uint64 +} + +// Error reports an error. Errors get aggregated and logged periodically. The +// default is once per minute or once every DD_LOGGING_RATE number of seconds. +func Error(format string, a ...interface{}) { + key := format // format should 99.9% of the time be constant + if reachedLimit(key) { + // avoid too much lock contention on spammy errors + return + } + errmu.Lock() + defer errmu.Unlock() + report, ok := erragg[key] + if !ok { + erragg[key] = &errorReport{ + err: fmt.Errorf(format, a...), + first: time.Now(), + } + report = erragg[key] + } + report.count++ + if errrate == 0 { + flushLocked() + return + } + if !erron { + erron = true + time.AfterFunc(errrate, Flush) + } +} + +// defaultErrorLimit specifies the maximum number of errors gathered in a report. +const defaultErrorLimit = 200 + +// reachedLimit reports whether the maximum count has been reached for this key. +func reachedLimit(key string) bool { + errmu.RLock() + e, ok := erragg[key] + confirm := ok && e.count > defaultErrorLimit + errmu.RUnlock() + return confirm +} + +// Flush flushes and resets all aggregated errors to the logger. +func Flush() { + errmu.Lock() + defer errmu.Unlock() + flushLocked() +} + +func flushLocked() { + for _, report := range erragg { + var extra string + if report.count > defaultErrorLimit { + extra = fmt.Sprintf(", %d+ additional messages skipped (first occurrence: %s)", defaultErrorLimit, report.first.Format(time.RFC822)) + } else if report.count > 1 { + extra = fmt.Sprintf(", %d additional messages skipped (first occurrence: %s)", report.count-1, report.first.Format(time.RFC822)) + } else { + extra = fmt.Sprintf(" (occurred: %s)", report.first.Format(time.RFC822)) + } + printMsg(LevelError, "%v%s", report.err, extra) + } + for k := range erragg { + // compiler-optimized map-clearing post go1.11 (golang/go#20138) + delete(erragg, k) + } + erron = false +} + +func printMsg(lvl Level, format string, a ...interface{}) { + var b strings.Builder + b.Grow(len(prefixMsg) + 1 + len(lvl.String()) + 2 + len(format)) + b.WriteString(prefixMsg) + b.WriteString(" ") + b.WriteString(lvl.String()) + b.WriteString(": ") + b.WriteString(fmt.Sprintf(format, a...)) + mu.RLock() + if ll, ok := logger.(interface { + LogL(lvl Level, msg string) + }); !ok { + logger.Log(b.String()) + } else { + ll.LogL(lvl, b.String()) + } + mu.RUnlock() +} + +type defaultLogger struct{ l *log.Logger } + +var _ Logger = &defaultLogger{} + +func (p *defaultLogger) Log(msg string) { p.l.Print(msg) } + +// DiscardLogger discards every call to Log(). +type DiscardLogger struct{} + +var _ Logger = &DiscardLogger{} + +// Log implements Logger. +func (d DiscardLogger) Log(_ string) {} + +// RecordLogger records every call to Log() and makes it available via Logs(). +type RecordLogger struct { + m sync.Mutex + logs []string + ignore []string // a log is ignored if it contains a string in ignored +} + +var _ Logger = &RecordLogger{} + +// Ignore adds substrings to the ignore field of RecordLogger, allowing +// the RecordLogger to ignore attempts to log strings with certain substrings. +func (r *RecordLogger) Ignore(substrings ...string) { + r.m.Lock() + defer r.m.Unlock() + r.ignore = append(r.ignore, substrings...) +} + +// Log implements Logger. +func (r *RecordLogger) Log(msg string) { + r.m.Lock() + defer r.m.Unlock() + for _, ignored := range r.ignore { + if strings.Contains(msg, ignored) { + return + } + } + r.logs = append(r.logs, msg) +} + +// Logs returns the ordered list of logs recorded by the logger. +func (r *RecordLogger) Logs() []string { + r.m.Lock() + defer r.m.Unlock() + copied := make([]string, len(r.logs)) + copy(copied, r.logs) + return copied +} + +// Reset resets the logger's internal logs +func (r *RecordLogger) Reset() { + r.m.Lock() + defer r.m.Unlock() + r.logs = r.logs[:0] + r.ignore = r.ignore[:0] +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/meta_internal_types.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/meta_internal_types.go new file mode 100644 index 00000000..4bd9fb80 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/meta_internal_types.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package internal + +// MetaStructValue is a custom type wrapper used to send metadata to the agent via the `meta_struct` field +// instead of the `meta` inside a span. +type MetaStructValue struct { + Value any // TODO: further constraining Value's type, especially if it becomes public +} + +// TraceSourceTagValue is a custom type wrapper used to create the trace source (_dd.p.ts) tag that will +// be propagated to downstream distributed traces via the `X-Datadog-Tags` HTTP header for example. +// It is represented as a 2 character hexadecimal string +type TraceSourceTagValue struct { + Value TraceSource +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/namingschema/namingschema.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/namingschema/namingschema.go new file mode 100644 index 00000000..d8ab25f9 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/namingschema/namingschema.go @@ -0,0 +1,98 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023 Datadog, Inc. + +// Package namingschema allows to use the naming schema from the integrations to set different +// service and span/operation names based on the value of the DD_TRACE_SPAN_ATTRIBUTE_SCHEMA environment variable. +package namingschema + +import ( + "strings" + "sync/atomic" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// Version represents the available naming schema versions. +type Version int + +const ( + // SchemaV0 represents naming schema v0. + SchemaV0 Version = iota + // SchemaV1 represents naming schema v1. + SchemaV1 +) + +type Config struct { + NamingSchemaVersion Version + RemoveIntegrationServiceNames bool + DDService string +} + +var ( + activeNamingSchema atomic.Int32 + removeIntegrationServiceNames atomic.Bool +) + +func LoadFromEnv() { + schemaVersionStr := env.Get("DD_TRACE_SPAN_ATTRIBUTE_SCHEMA") + if v, ok := parseVersionStr(schemaVersionStr); ok { + setVersion(v) + } else { + setVersion(SchemaV0) + log.Warn("DD_TRACE_SPAN_ATTRIBUTE_SCHEMA=%s is not a valid value, setting to default of v%d", schemaVersionStr, v) + } + // Allow DD_TRACE_SPAN_ATTRIBUTE_SCHEMA=v0 users to disable default integration (contrib AKA v0) service names. + // These default service names are always disabled for v1 onwards. + SetRemoveIntegrationServiceNames(internal.BoolEnv("DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED", false)) +} + +// ReloadConfig is used to reload the configuration in tests. +func ReloadConfig() { + LoadFromEnv() + globalconfig.SetServiceName(env.Get("DD_SERVICE")) +} + +// GetConfig returns the naming schema config. +func GetConfig() Config { + return Config{ + NamingSchemaVersion: GetVersion(), + RemoveIntegrationServiceNames: getRemoveIntegrationServiceNames(), + DDService: globalconfig.ServiceName(), + } +} + +// GetVersion returns the global naming schema version used for this application. +func GetVersion() Version { + return Version(activeNamingSchema.Load()) +} + +// setVersion sets the global naming schema version used for this application. +func setVersion(v Version) { + activeNamingSchema.Store(int32(v)) +} + +// parseVersionStr attempts to parse the version string. +func parseVersionStr(v string) (Version, bool) { + switch strings.ToLower(v) { + case "", "v0": + return SchemaV0, true + case "v1": + return SchemaV1, true + default: + return SchemaV0, false + } +} + +func getRemoveIntegrationServiceNames() bool { + return removeIntegrationServiceNames.Load() +} + +// SetRemoveIntegrationServiceNames sets the value of the RemoveIntegrationServiceNames setting for this application. +func SetRemoveIntegrationServiceNames(v bool) { + removeIntegrationServiceNames.Store(v) +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer/normalizer.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/normalizer/normalizer.go similarity index 94% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer/normalizer.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/normalizer/normalizer.go index eab11896..1d0984ad 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer/normalizer.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/normalizer/normalizer.go @@ -11,8 +11,8 @@ import ( "regexp" "strings" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "github.com/DataDog/dd-trace-go/v2/ddtrace/ext" + "github.com/DataDog/dd-trace-go/v2/internal/log" ) // headerTagRegexp is used to replace all invalid characters in the config. Only alphanumerics, whitespaces and dashes allowed. @@ -43,7 +43,7 @@ func HeaderTagSlice(headers []string) map[string]string { header, tag := HeaderTag(h) // If `header` or `tag` is just the empty string, we don't want to set it. if len(header) == 0 || len(tag) == 0 { - log.Debug("Header-tag input is in unsupported format; dropping input value %v", h) + log.Debug("Header-tag input is in unsupported format; dropping input value %s", h) continue } headerTagsMap[header] = tag diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion/context.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/context.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion/context.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/context.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion/context_stack.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/context_stack.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion/context_stack.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/context_stack.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion/gls.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/gls.go similarity index 90% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion/gls.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/gls.go index d117759c..c36a807e 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion/gls.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/gls.go @@ -22,10 +22,10 @@ var ( // Accessors set by orchestrion in the runtime package. If orchestrion is not enabled, these will be nil as per the default values. //revive:disable:var-naming -//go:linkname __dd_orchestrion_gls_get __dd_orchestrion_gls_get +//go:linkname __dd_orchestrion_gls_get __dd_orchestrion_gls_get.V2 var __dd_orchestrion_gls_get func() any -//go:linkname __dd_orchestrion_gls_set __dd_orchestrion_gls_set +//go:linkname __dd_orchestrion_gls_set __dd_orchestrion_gls_set.V2 var __dd_orchestrion_gls_set func(any) //revive:enable:var-naming diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/gls.orchestrion.yml b/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/gls.orchestrion.yml new file mode 100644 index 00000000..5afc5284 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/gls.orchestrion.yml @@ -0,0 +1,48 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2023-present Datadog, Inc. +--- +# yaml-language-server: $schema=https://datadoghq.dev/orchestrion/schema.json +meta: + name: github.com/DataDog/dd-trace-go/v2/internal/orchestrion + description: Operations that interact with Go's runtime system. + caveats: |- + This configuration introduces a way to access the Goroutine Local Storage (GLS), which is not + meant to be used directly by end-users. This is intended to be used only by tracer internals to + enable trace context forwarding in places where a {{}} + value is not available. + +aspects: + - id: __dd_gls_v2 + join-point: + struct-definition: runtime.g + advice: + - add-struct-field: + name: __dd_gls_v2 + type: any + - add-blank-import: unsafe # Needed for go:linkname + - inject-declarations: + # Reference: https://github.com/golang/go/blob/6d89b38ed86e0bfa0ddaba08dc4071e6bb300eea/src/runtime/HACKING.md?plain=1#L44-L54 + template: |- + //go:linkname __dd_orchestrion_gls_get __dd_orchestrion_gls_get.V2 + var __dd_orchestrion_gls_get = func() any { + return getg().m.curg.__dd_gls_v2 + } + + //go:linkname __dd_orchestrion_gls_set __dd_orchestrion_gls_set.V2 + var __dd_orchestrion_gls_set = func(val any) { + getg().m.curg.__dd_gls_v2 = val + } + - id: goexit1 + join-point: + all-of: + - import-path: runtime + - function-body: + function: + # This is the function that finishes the execution of a goroutine. + # See: https://github.com/golang/go/blob/f38d42f2c4c6ad0d7cbdad5e1417cac3be2a5dcb/src/runtime/proc.go#L4264 + - name: goexit1 + advice: + - prepend-statements: + template: getg().__dd_gls_v2 = nil diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/orchestrion.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/orchestrion.go new file mode 100644 index 00000000..334a6db1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/orchestrion/orchestrion.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package orchestrion + +// Orchestrion will change this at build-time +// +//orchestrion:enabled +var enabled = false + +// The version of the orchestrion binary used to build the current binray, or +// blank if the current binary was not built using orchestrion. +// +//orchestrion:version +const Version = "" + +// Enabled returns whether the current build was compiled with orchestrion or not. +func Enabled() bool { + return enabled +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/osinfo/osinfo.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/osinfo/osinfo.go new file mode 100644 index 00000000..32ddb763 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/osinfo/osinfo.go @@ -0,0 +1,54 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package osinfo + +import ( + "runtime" +) + +// Modified in init functions to provide OS-specific information +var ( + osName = runtime.GOOS + osVersion = "unknown" + arch = runtime.GOARCH + kernelName = "unknown" + kernelRelease = "unknown" + kernelVersion = "unknown" +) + +// OSName returns the name of the operating system, including the distribution +// for Linux when possible. +func OSName() string { + // call out to OS-specific implementation + return osName +} + +// OSVersion returns the operating system release, e.g. major/minor version +// number and build ID. +func OSVersion() string { + // call out to OS-specific implementation + return osVersion +} + +// Architecture returns the architecture of the operating system. +func Architecture() string { + return arch +} + +// KernelName returns the name of the kernel. +func KernelName() string { + return kernelName +} + +// KernelRelease returns the release of the kernel. +func KernelRelease() string { + return kernelRelease +} + +// KernelVersion returns the version of the kernel. +func KernelVersion() string { + return kernelVersion +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/osinfo/osinfo_unix.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/osinfo/osinfo_unix.go new file mode 100644 index 00000000..36d96da3 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/osinfo/osinfo_unix.go @@ -0,0 +1,70 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +//go:build unix + +package osinfo + +import ( + "bufio" + "bytes" + "os" + "os/exec" + "runtime" + "strings" + + "golang.org/x/sys/unix" +) + +func init() { + // Change the default values for backwards compatibility on scenarios + if runtime.GOOS == "linux" { + osName = "Linux (Unknown Distribution)" + kernelName = "Linux" + } + + if runtime.GOOS == "darwin" { + kernelName = "Darwin" + out, err := exec.Command("sw_vers", "-productVersion").Output() + if err != nil { + return + } + + osVersion = string(bytes.Trim(out, "\n")) + } + + var uts unix.Utsname + if err := unix.Uname(&uts); err == nil { + kernelName = string(bytes.TrimRight(uts.Sysname[:], "\x00")) + kernelVersion = string(bytes.TrimRight(uts.Version[:], "\x00")) + kernelRelease = strings.SplitN(strings.TrimRight(string(uts.Release[:]), "\x00"), "-", 2)[0] + + // Backwards compatibility on how data is reported for freebsd + if runtime.GOOS == "freebsd" { + osVersion = kernelRelease + } + } + + f, err := os.Open("/etc/os-release") + if err != nil { + return + } + + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + parts := strings.SplitN(scanner.Text(), "=", 2) + switch parts[0] { + case "NAME": + osName = strings.Trim(parts[1], "\"") + case "VERSION": + osVersion = strings.Trim(parts[1], "\"") + case "VERSION_ID": + if osVersion == "" { // Fallback to VERSION_ID if VERSION is not set + osVersion = strings.Trim(parts[1], "\"") + } + } + } +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_windows.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/osinfo/osinfo_windows.go similarity index 89% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_windows.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/osinfo/osinfo_windows.go index 659bd9ce..27552846 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo/osinfo_windows.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/osinfo/osinfo_windows.go @@ -3,24 +3,21 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016 Datadog, Inc. +//go:build windows + package osinfo import ( "fmt" - "runtime" "strings" "golang.org/x/sys/windows/registry" ) -func osName() string { - return runtime.GOOS -} - -func osVersion() string { +func init() { k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) if err != nil { - return "unknown" + return } defer k.Close() @@ -50,5 +47,6 @@ func osVersion() string { } else { version.WriteString(" Unknown Build") } - return version.String() + + osVersion = version.String() } diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/processtags/processtags.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/processtags/processtags.go new file mode 100644 index 00000000..a820d975 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/processtags/processtags.go @@ -0,0 +1,155 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package processtags + +import ( + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +const envProcessTagsEnabled = "DD_EXPERIMENTAL_PROPAGATE_PROCESS_TAGS_ENABLED" + +const ( + tagEntrypointName = "entrypoint.name" + tagEntrypointBasedir = "entrypoint.basedir" + tagEntrypointWorkdir = "entrypoint.workdir" + tagEntrypointType = "entrypoint.type" +) + +const ( + entrypointTypeExecutable = "executable" +) + +var ( + enabled bool + pTags *ProcessTags +) + +func init() { + Reload() +} + +type ProcessTags struct { + mu sync.RWMutex + tags map[string]string + str string + slice []string +} + +// String returns the string representation of the process tags. +func (p *ProcessTags) String() string { + if p == nil { + return "" + } + p.mu.RLock() + defer p.mu.RUnlock() + return p.str +} + +// Slice returns the string slice representation of the process tags. +func (p *ProcessTags) Slice() []string { + if p == nil { + return nil + } + p.mu.RLock() + defer p.mu.RUnlock() + return p.slice +} + +func (p *ProcessTags) merge(newTags map[string]string) { + if len(newTags) == 0 { + return + } + pTags.mu.Lock() + defer pTags.mu.Unlock() + + if p.tags == nil { + p.tags = make(map[string]string) + } + for k, v := range newTags { + p.tags[k] = v + } + + // loop over the sorted map keys so the resulting string and slice versions are created consistently. + keys := make([]string, 0, len(p.tags)) + for k := range p.tags { + keys = append(keys, k) + } + sort.Strings(keys) + + tagsSlice := make([]string, 0, len(p.tags)) + var b strings.Builder + first := true + for _, k := range keys { + val := p.tags[k] + if !first { + b.WriteByte(',') + } + first = false + keyVal := normalize.NormalizeTag(k + ":" + val) + b.WriteString(keyVal) + tagsSlice = append(tagsSlice, keyVal) + } + p.slice = tagsSlice + p.str = b.String() +} + +// Reload initializes the configuration and process tags collection. This is useful for tests. +func Reload() { + enabled = internal.BoolEnv(envProcessTagsEnabled, false) + if !enabled { + return + } + pTags = &ProcessTags{} + tags := collect() + if len(tags) > 0 { + Add(tags) + } +} + +func collect() map[string]string { + tags := make(map[string]string) + execPath, err := os.Executable() + if err != nil { + log.Debug("failed to get binary path: %s", err.Error()) + } else { + baseDirName := filepath.Base(filepath.Dir(execPath)) + tags[tagEntrypointName] = filepath.Base(execPath) + tags[tagEntrypointBasedir] = baseDirName + tags[tagEntrypointType] = entrypointTypeExecutable + } + wd, err := os.Getwd() + if err != nil { + log.Debug("failed to get working directory: %s", err.Error()) + } else { + tags[tagEntrypointWorkdir] = filepath.Base(wd) + } + return tags +} + +// GlobalTags returns the global process tags. +func GlobalTags() *ProcessTags { + if !enabled { + return nil + } + return pTags +} + +// Add merges the given tags into the global processTags map. +func Add(tags map[string]string) { + if !enabled { + return + } + pTags.merge(tags) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/config.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/config.go new file mode 100644 index 00000000..d4d57d95 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/config.go @@ -0,0 +1,68 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023 Datadog, Inc. + +package remoteconfig + +import ( + "net/http" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/version" +) + +const ( + envPollIntervalSec = "DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS" +) + +// ClientConfig contains the required values to configure a remoteconfig client +type ClientConfig struct { + // The address at which the agent is listening for remoteconfig update requests on + AgentURL string + // The semantic version of the user's application + AppVersion string + // The env this tracer is running in + Env string + // The time interval between two client polls to the agent for updates + PollInterval time.Duration + // The tracer's runtime id + RuntimeID string + // The name of the user's application + ServiceName string + // The semantic version of the tracer + TracerVersion string + // The base TUF root metadata file + TUFRoot string + // HTTP is the HTTP client used to receive config updates + HTTP *http.Client +} + +// DefaultClientConfig returns the default remote config client configuration +func DefaultClientConfig() ClientConfig { + return ClientConfig{ + Env: env.Get("DD_ENV"), + HTTP: &http.Client{Timeout: 10 * time.Second}, + PollInterval: pollIntervalFromEnv(), + RuntimeID: globalconfig.RuntimeID(), + ServiceName: globalconfig.ServiceName(), + TracerVersion: version.Tag, + TUFRoot: env.Get("DD_RC_TUF_ROOT"), + } +} + +func pollIntervalFromEnv() time.Duration { + interval := internal.FloatEnv(envPollIntervalSec, 5.0) + if interval < 0 { + log.Debug("Remote config: cannot use a negative poll interval: %s = %f. Defaulting to 5s.", envPollIntervalSec, interval) + interval = 5.0 + } else if interval == 0 { + log.Debug("Remote config: poll interval set to 0. Polling will be continuous.") + return time.Nanosecond + } + return time.Duration(interval * float64(time.Second)) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/path.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/path.go new file mode 100644 index 00000000..db09f95f --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/path.go @@ -0,0 +1,96 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022 Datadog, Inc. + +package remoteconfig + +import ( + "fmt" + "strings" +) + +type ( + Path struct { + // The source of the config. Either "datadog/", or "employee" + Source Source + // The name of the product that produced this config (e.g, "ASM_DD"). + Product string + // The ID of the config (e.g, "blocked_ips") + ConfigID string + // The name of the config object (e.g, "config") + Name string + } + Source interface { + fmt.Stringer + isSource() + } + DatadogSource struct { + source + OrgID string + } + EmployeeSource struct { + source + } + source struct{} +) + +// ParsePath parses a remote config target file path into its components. +func ParsePath(filename string) (Path, bool) { + // See: https://docs.google.com/document/d/1u_G7TOr8wJX0dOM_zUDKuRJgxoJU_hVTd5SeaMucQUs/edit?tab=t.0#bookmark=id.ew0e2fwzf8p7 + parts := strings.Split(filename, "/") + if len(parts) < 4 { + return Path{}, false + } + + var source Source + switch parts[0] { + case "datadog": + orgID := parts[1] + if orgID == "" { + // Invalid org ID (empty)... + return Path{}, false + } + for _, c := range orgID { + if c < '0' || c > '9' { + // Invalid org ID (non-numeric)... + return Path{}, false + } + } + source = DatadogSource{OrgID: orgID} + parts = parts[2:] + case "employee": + source = EmployeeSource{} + parts = parts[1:] + default: + // Invalid source... + return Path{}, false + } + + if len(parts) != 3 { + // Invalid number of parts... + return Path{}, false + } + + product, configID, name := parts[0], parts[1], parts[2] + if product == "" || configID == "" || name == "" { + // Invalid product, config ID, or name (none of these can be empty)... + return Path{}, false + } + + return Path{Source: source, Product: product, ConfigID: configID, Name: name}, true +} + +func (p Path) String() string { + return p.Source.String() + "/" + p.Product + "/" + p.ConfigID + "/" + p.Name +} + +func (s DatadogSource) String() string { + return fmt.Sprintf("datadog/%s", s.OrgID) +} + +func (s EmployeeSource) String() string { + return "employee" +} + +func (source) isSource() {} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/remoteconfig.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/remoteconfig.go new file mode 100644 index 00000000..47290589 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/remoteconfig.go @@ -0,0 +1,694 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022 Datadog, Inc. + +package remoteconfig + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "maps" + "math/big" + "net/http" + "reflect" + "slices" + "sync" + "time" + + rc "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/processtags" +) + +// Callback represents a function that can process a remote config update. +// A Callback function can be registered to a remote config client to automatically +// react upon receiving updates. This function returns the configuration processing status +// for each config file received through the update. +type Callback func(updates map[string]ProductUpdate) map[string]rc.ApplyStatus + +// ProductCallback is like Callback but for a specific product. +type ProductCallback func(update ProductUpdate) map[string]rc.ApplyStatus + +// Capability represents a bit index to be set in clientData.Capabilites in order to register a client +// for a specific capability +type Capability uint + +const ( + _ Capability = iota + // ASMActivation represents the capability to activate ASM through remote configuration + ASMActivation + // ASMIPBlocking represents the capability for ASM to block requests based on user IP + ASMIPBlocking + // ASMDDRules represents the capability to update the rules used by the ASM WAF for threat detection + ASMDDRules + // ASMExclusions represents the capability for ASM to exclude traffic from its protections + ASMExclusions + // ASMRequestBlocking represents the capability for ASM to block requests based on the HTTP request related WAF addresses + ASMRequestBlocking + // ASMResponseBlocking represents the capability for ASM to block requests based on the HTTP response related WAF addresses + ASMResponseBlocking + // ASMUserBlocking represents the capability for ASM to block requests based on user ID + ASMUserBlocking + // ASMCustomRules represents the capability for ASM to receive and use user-defined security rules + ASMCustomRules + // ASMCustomBlockingResponse represents the capability for ASM to receive and use user-defined blocking responses + ASMCustomBlockingResponse + // ASMTrustedIPs represents Trusted IPs through the ASM product + ASMTrustedIPs + // ASMApiSecuritySampleRate represents API Security sampling rate + ASMApiSecuritySampleRate + // APMTracingSampleRate represents the rate at which to sample traces from APM client libraries + APMTracingSampleRate + // APMTracingLogsInjection enables APM client libraries to inject trace ids into log records + APMTracingLogsInjection + // APMTracingHTTPHeaderTags enables APM client libraries to tag http header values to http server or client spans + APMTracingHTTPHeaderTags + // APMTracingCustomTags enables APM client to set custom tags on all spans + APMTracingCustomTags + // ASMProcessorOverrides adds support for processor overrides through the ASM RC Product + ASMProcessorOverrides + // ASMCustomDataScanners adds support for custom data scanners through the ASM RC Product + ASMCustomDataScanners + // ASMExclusionData adds support configurable exclusion filter data from the ASM_DATA Product + ASMExclusionData + // APMTracingEnabled enables APM tracing + APMTracingEnabled + // APMTracingDataStreamsEnabled enables Data Streams Monitoring + APMTracingDataStreamsEnabled + // ASMRASPSQLI enables ASM support for runtime protection against SQL Injection attacks + ASMRASPSQLI + // ASMRASPLFI enables ASM support for runtime protection against Local File Inclusion attacks + ASMRASPLFI + // ASMRASPSSRF enables ASM support for runtime protection against SSRF attacks + ASMRASPSSRF + // ASMRASPSHI enables ASM support for runtime protection against XSS attacks + ASMRASPSHI + // ASMRASPXXE enables ASM support for runtime protection against XXE attacks + ASMRASPXXE + // ASMRASPRCE enables ASM support for runtime protection against Remote Code Execution + ASMRASPRCE + // ASMRASPNOSQLI enables ASM support for runtime protection against NoSQL Injection attacks + ASMRASPNOSQLI + // ASMRASPXSS enables ASM support for runtime protection against Cross Site Scripting attacks + ASMRASPXSS + // APMTracingSampleRules represents the sampling rate using matching rules from APM client libraries + APMTracingSampleRules + // CSMActivation represents the capability to activate CSM through remote configuration + CSMActivation + // ASMAutoUserInstrumMode represents the capability to enable the automatic user instrumentation mode + ASMAutoUserInstrumMode + // ASMEndpointFingerprinting represents the capability to enable endpoint fingerprinting + ASMEndpointFingerprinting + // ASMSessionFingerprinting represents the capability to enable session fingerprinting + ASMSessionFingerprinting + // ASMNetworkFingerprinting represents the capability to enable network fingerprinting + ASMNetworkFingerprinting + // ASMHeaderFingerprinting represents the capability to enable header fingerprinting + ASMHeaderFingerprinting + // ASMTruncationRules is the support for truncation payload rules + ASMTruncationRules + // ASMRASPCommandInjection represents the capability for ASM's RASP Command Injection prevention + ASMRASPCommandInjection + // APMTracingEnableDynamicInstrumentation represents the capability to enable dynamic instrumentation + APMTracingEnableDynamicInstrumentation + // APMTracingEnableExceptionReplay represents the capability to enable exception replay + APMTracingEnableExceptionReplay + // APMTracingEnableCodeOrigin represents the capability to enable code origin + APMTracingEnableCodeOrigin + // APMTracingEnableLiveDebugging represents the capability to enable live debugging + APMTracingEnableLiveDebugging + // ASMDDMultiConfig represents the capability to handle multiple ASM_DD configuration objects + ASMDDMultiConfig + // ASMTraceTaggingRules represents the capability to honor trace tagging rules + ASMTraceTaggingRules +) + +// ErrClientNotStarted is returned when the remote config client is not started. +var ErrClientNotStarted = errors.New("remote config client not started") + +// ProductUpdate represents an update for a specific product. +// It is a map of file path to raw file content +type ProductUpdate map[string][]byte + +// A Client interacts with an Agent to update and track the state of remote +// configuration +type Client struct { + sync.RWMutex + ClientConfig + + clientID string + endpoint string + repository *rc.Repository + stop chan struct{} + + // When acquiring several locks and using defer to release them, make sure to acquire the locks in the following order: + callbacks []Callback + _callbacksMu sync.RWMutex + products map[string]struct{} + productsMu sync.RWMutex + productsWithCallbacks map[string]ProductCallback + productsWithCallbacksMu sync.RWMutex + capabilities map[Capability]struct{} + capabilitiesMu sync.RWMutex + + lastError error +} + +// client is a RC client singleton that can be accessed by multiple products (tracing, ASM, profiling etc.). +// Using a single RC client instance in the tracer is a requirement for remote configuration. +var client *Client + +var ( + startOnce sync.Once + stopOnce sync.Once +) + +// newClient creates a new remoteconfig Client +func newClient(config ClientConfig) (*Client, error) { + repo, err := rc.NewUnverifiedRepository() + if err != nil { + return nil, err + } + if config.HTTP == nil { + config.HTTP = DefaultClientConfig().HTTP + } + + return &Client{ + ClientConfig: config, + clientID: generateID(), + endpoint: fmt.Sprintf("%s/v0.7/config", config.AgentURL), + repository: repo, + stop: make(chan struct{}), + lastError: nil, + callbacks: []Callback{}, + capabilities: map[Capability]struct{}{}, + products: map[string]struct{}{}, + productsWithCallbacks: make(map[string]ProductCallback), + }, nil +} + +// Start starts the client's update poll loop in a fresh goroutine. +// Noop if the client has already started. +func Start(config ClientConfig) error { + var err error + startOnce.Do(func() { + if !internal.BoolEnv("DD_REMOTE_CONFIGURATION_ENABLED", true) { + // Don't start polling if the feature is disabled explicitly + return + } + client, err = newClient(config) + if err != nil { + return + } + go func() { + ticker := time.NewTicker(client.PollInterval) + defer ticker.Stop() + + for { + select { + case <-client.stop: + close(client.stop) + return + case <-ticker.C: + client.Lock() + client.updateState() + client.Unlock() + } + } + }() + }) + return err +} + +// Stop stops the client's update poll loop. +// Noop if the client has already been stopped. +// The remote config client is supposed to have the same lifecycle as the tracer. +// It can't be restarted after a call to Stop() unless explicitly calling Reset(). +func Stop() { + if client == nil { + // In case Stop() is called before Start() + return + } + stopOnce.Do(func() { + log.Debug("remoteconfig: gracefully stopping the client") + client.stop <- struct{}{} + select { + case <-client.stop: + log.Debug("remoteconfig: client stopped successfully") + case <-time.After(time.Second): + log.Debug("remoteconfig: client stopping timeout") + } + }) +} + +// Reset destroys the client instance. +// To be used only in tests to reset the state of the client. +func Reset() { + client = nil + startOnce = sync.Once{} + stopOnce = sync.Once{} +} + +func (c *Client) updateState() { + data, err := c.newUpdateRequest() + if err != nil { + log.Error("remoteconfig: unexpected error while creating a new update request payload: %s", err.Error()) + return + } + + req, err := http.NewRequest(http.MethodGet, c.endpoint, &data) + if err != nil { + log.Error("remoteconfig: unexpected error while creating a new http request: %s", err.Error()) + return + } + if internal.ContainerID() != "" { + req.Header.Set("Datadog-Container-ID", internal.ContainerID()) + } + if internal.EntityID() != "" { + req.Header.Set("Datadog-Entity-ID", internal.EntityID()) + } + + resp, err := c.HTTP.Do(req) + if err != nil { + log.Debug("remoteconfig: http request error: %s", err.Error()) + return + } + // Flush and close the response body when returning (cf. https://pkg.go.dev/net/http#Client.Do) + defer func() { + io.ReadAll(resp.Body) + resp.Body.Close() + }() + + if sc := resp.StatusCode; sc != http.StatusOK { + log.Debug("remoteconfig: http request error: response status code is not 200 (OK) but %s", http.StatusText(sc)) + return + } + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.Error("remoteconfig: http request error: could not read the response body: %s", err.Error()) + return + } + + if body := string(respBody); body == `{}` || body == `null` { + return + } + + var update clientGetConfigsResponse + if err := json.Unmarshal(respBody, &update); err != nil { + log.Error("remoteconfig: http request error: could not parse the json response body: %s", err.Error()) + return + } + + c.lastError = c.applyUpdate(&update) +} + +// Subscribe registers a product and its callback to be invoked when the client receives configuration updates. +// Subscribe should be preferred over RegisterProduct and RegisterCallback if your callback only handles a single product. +func Subscribe(product string, callback ProductCallback, capabilities ...Capability) error { + if client == nil { + return ErrClientNotStarted + } + client.productsMu.RLock() + defer client.productsMu.RUnlock() + if _, found := client.products[product]; found { + return fmt.Errorf("product %s already registered via RegisterProduct", product) + } + + client.productsWithCallbacksMu.Lock() + defer client.productsWithCallbacksMu.Unlock() + client.productsWithCallbacks[product] = callback + + client.capabilitiesMu.Lock() + defer client.capabilitiesMu.Unlock() + for _, cap := range capabilities { + client.capabilities[cap] = struct{}{} + } + return nil +} + +// RegisterCallback allows registering a callback that will be invoked when the client +// receives configuration updates. It is up to that callback to then decide what to do +// depending on the product related to the configuration update. +func RegisterCallback(f Callback) error { + if client == nil { + return ErrClientNotStarted + } + client._callbacksMu.Lock() + defer client._callbacksMu.Unlock() + client.callbacks = append(client.callbacks, f) + return nil +} + +// UnregisterCallback removes a previously registered callback from the active callbacks list +// This remove operation preserves ordering +func UnregisterCallback(f Callback) error { + if client == nil { + return ErrClientNotStarted + } + client._callbacksMu.Lock() + defer client._callbacksMu.Unlock() + + toRemove := reflect.ValueOf(f).Pointer() + client.callbacks = slices.DeleteFunc(client.callbacks, func(cb Callback) bool { + return reflect.ValueOf(cb).Pointer() == toRemove + }) + return nil +} + +// RegisterProduct adds a product to the list of products listened by the client +func RegisterProduct(p string) error { + if client == nil { + return ErrClientNotStarted + } + client.productsMu.Lock() + defer client.productsMu.Unlock() + client.productsWithCallbacksMu.RLock() + defer client.productsWithCallbacksMu.RUnlock() + if _, found := client.productsWithCallbacks[p]; found { + return fmt.Errorf("product %s already registered via Subscribe", p) + } + client.products[p] = struct{}{} + return nil +} + +// UnregisterProduct removes a product from the list of products listened by the client +func UnregisterProduct(p string) error { + if client == nil { + return ErrClientNotStarted + } + client.productsMu.Lock() + defer client.productsMu.Unlock() + delete(client.products, p) + return nil +} + +// HasProduct returns whether a given product was registered +func HasProduct(p string) (bool, error) { + if client == nil { + return false, ErrClientNotStarted + } + client.productsMu.RLock() + defer client.productsMu.RUnlock() + client.productsWithCallbacksMu.RLock() + defer client.productsWithCallbacksMu.RUnlock() + _, found := client.products[p] + _, foundWithCallback := client.productsWithCallbacks[p] + return found || foundWithCallback, nil +} + +// RegisterCapability adds a capability to the list of capabilities exposed by the client when requesting +// configuration updates +func RegisterCapability(cpb Capability) error { + if client == nil { + return ErrClientNotStarted + } + client.capabilitiesMu.Lock() + defer client.capabilitiesMu.Unlock() + client.capabilities[cpb] = struct{}{} + return nil +} + +// UnregisterCapability removes a capability from the list of capabilities exposed by the client when requesting +// configuration updates +func UnregisterCapability(cpb Capability) error { + if client == nil { + return ErrClientNotStarted + } + client.capabilitiesMu.Lock() + defer client.capabilitiesMu.Unlock() + delete(client.capabilities, cpb) + return nil +} + +// HasCapability returns whether a given capability was registered +func HasCapability(cpb Capability) (bool, error) { + if client == nil { + return false, ErrClientNotStarted + } + client.capabilitiesMu.RLock() + defer client.capabilitiesMu.RUnlock() + _, found := client.capabilities[cpb] + return found, nil +} + +func (c *Client) allCapabilities() *big.Int { + client.capabilitiesMu.Lock() + defer client.capabilitiesMu.Unlock() + capa := big.NewInt(0) + for i := range c.capabilities { + capa.SetBit(capa, int(i), 1) + } + return capa +} + +func (c *Client) globalCallbacks() []Callback { + c._callbacksMu.RLock() + defer c._callbacksMu.RUnlock() + callbacks := make([]Callback, len(c.callbacks)) + copy(callbacks, c.callbacks) + return callbacks +} + +func (c *Client) productCallbacks() map[string]ProductCallback { + c.productsWithCallbacksMu.RLock() + defer c.productsWithCallbacksMu.RUnlock() + callbacks := make(map[string]ProductCallback, len(c.productsWithCallbacks)) + for k, v := range c.productsWithCallbacks { + callbacks[k] = v + } + return callbacks +} + +func (c *Client) allProducts() []string { + c.productsMu.RLock() + defer c.productsMu.RUnlock() + c.productsWithCallbacksMu.RLock() + defer c.productsWithCallbacksMu.RUnlock() + products := make([]string, 0, len(c.products)+len(c.productsWithCallbacks)) + for p := range c.products { + products = append(products, p) + } + for p := range c.productsWithCallbacks { + products = append(products, p) + } + return products +} + +func (c *Client) applyUpdate(pbUpdate *clientGetConfigsResponse) error { + fileMap := make(map[string][]byte, len(pbUpdate.TargetFiles)) + allProducts := c.allProducts() + productUpdates := make(map[string]ProductUpdate, len(allProducts)) + for _, f := range pbUpdate.TargetFiles { + path, valid := ParsePath(f.Path) + if !valid { + log.Warn("remoteconfig: ignoring invalid target file path: %s", f.Path) + continue + } + + fileMap[f.Path] = f.Raw + if !slices.Contains(allProducts, path.Product) { + log.Debug("remoteconfig: received file for unknown product %s (known: %#v): %s", path.Product, allProducts, f.Path) //nolint:gocritic // Debug logging for unknown products + } + if productUpdates[path.Product] == nil { + productUpdates[path.Product] = make(ProductUpdate) + } + productUpdates[path.Product][f.Path] = f.Raw + } + + mapify := func(s *rc.RepositoryState) map[string]string { + m := make(map[string]string) + for i := range s.Configs { + path := s.CachedFiles[i].Path + product := s.Configs[i].Product + m[path] = product + } + return m + } + + // Check the repository state before and after the update to detect which configs are not being sent anymore. + // This is needed because some products can stop sending configurations, and we want to make sure that the subscribers + // are provided with this information in this case + stateBefore, err := c.repository.CurrentState() + if err != nil { + return fmt.Errorf("repository current state error: %s", err.Error()) + } + products, err := c.repository.Update(rc.Update{ + TUFRoots: pbUpdate.Roots, + TUFTargets: pbUpdate.Targets, + TargetFiles: fileMap, + ClientConfigs: pbUpdate.ClientConfigs, + }) + if err != nil { + return fmt.Errorf("repository update error: %s", err.Error()) + } + stateAfter, err := c.repository.CurrentState() + if err != nil { + return fmt.Errorf("repository current state error after update: %s", err.Error()) + } + + // Create a config files diff between before/after the update to see which config files are missing + mBefore := mapify(&stateBefore) + for k := range mapify(&stateAfter) { + delete(mBefore, k) + } + + // Set the payload data to nil for missing config files. The callbacks then can handle the nil config case to detect + // that this config will not be updated anymore. + updatedProducts := make(map[string]struct{}) + for path, product := range mBefore { + if productUpdates[product] == nil { + productUpdates[product] = make(ProductUpdate) + } + productUpdates[product][path] = nil + updatedProducts[product] = struct{}{} + } + // Aggregate updated products and missing products so that callbacks get called for both + for _, p := range products { + updatedProducts[p] = struct{}{} + } + + if len(updatedProducts) == 0 { + return nil + } + // Performs the callbacks registered and update the application status in the repository (RCTE2) + // In case of several callbacks handling the same config, statuses take precedence in this order: + // 1 - ApplyStateError + // 2 - ApplyStateUnacknowledged + // 3 - ApplyStateAcknowledged + // This makes sure that any product that would need to re-receive the config in a subsequent update will be allowed to + statuses := make(map[string]rc.ApplyStatus) + for _, cb := range c.globalCallbacks() { + for path, status := range cb(productUpdates) { + if s, ok := statuses[path]; !ok || status.State == rc.ApplyStateError || + s.State == rc.ApplyStateAcknowledged && status.State == rc.ApplyStateUnacknowledged { + statuses[path] = status + } + } + } + // Call the product-specific callbacks registered via Subscribe + productCallbacks := c.productCallbacks() + for product, update := range productUpdates { + if fn, ok := productCallbacks[product]; ok { + maps.Copy(statuses, fn(update)) + } + } + for p, s := range statuses { + c.repository.UpdateApplyStatus(p, s) + } + + return nil +} + +func (c *Client) newUpdateRequest() (bytes.Buffer, error) { + state, err := c.repository.CurrentState() + if err != nil { + return bytes.Buffer{}, err + } + // Temporary check while using untrusted repo, for which no initial root file is provided + if state.RootsVersion < 1 { + state.RootsVersion = 1 + } + + pbCachedFiles := make([]*targetFileMeta, 0, len(state.CachedFiles)) + for _, f := range state.CachedFiles { + pbHashes := make([]*targetFileHash, 0, len(f.Hashes)) + for alg, hash := range f.Hashes { + pbHashes = append(pbHashes, &targetFileHash{ + Algorithm: alg, + Hash: hex.EncodeToString(hash), + }) + } + pbCachedFiles = append(pbCachedFiles, &targetFileMeta{ + Path: f.Path, + Length: int64(f.Length), + Hashes: pbHashes, + }) + } + + hasError := c.lastError != nil + errMsg := "" + if hasError { + errMsg = c.lastError.Error() + } + + var pbConfigState []*configState + if !hasError { + pbConfigState = make([]*configState, 0, len(state.Configs)) + for _, f := range state.Configs { + pbConfigState = append(pbConfigState, &configState{ + ID: f.ID, + Version: f.Version, + Product: f.Product, + ApplyState: f.ApplyStatus.State, + ApplyError: f.ApplyStatus.Error, + }) + } + } + + capa := c.allCapabilities() + var tags []string + for k, v := range internal.GetGitMetadataTags() { + tags = append(tags, k+":"+v) + } + req := clientGetConfigsRequest{ + Client: &clientData{ + State: &clientState{ + RootVersion: uint64(state.RootsVersion), + TargetsVersion: uint64(state.TargetsVersion), + ConfigStates: pbConfigState, + HasError: hasError, + Error: errMsg, + }, + ID: c.clientID, + Products: c.allProducts(), + IsTracer: true, + ClientTracer: &clientTracer{ + RuntimeID: c.RuntimeID, + Language: "go", + TracerVersion: c.TracerVersion, + Service: c.ServiceName, + Env: c.Env, + AppVersion: c.AppVersion, + ProcessTags: processtags.GlobalTags().Slice(), + Tags: tags, + }, + Capabilities: capa.Bytes(), + }, + CachedTargetFiles: pbCachedFiles, + } + + var b bytes.Buffer + + err = json.NewEncoder(&b).Encode(&req) + if err != nil { + return bytes.Buffer{}, err + } + + return b, nil +} + +var ( + idSize = 21 + idAlphabet = []rune("_-0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") +) + +func generateID() string { + bytes := make([]byte, idSize) + _, err := rand.Read(bytes) + if err != nil { + panic(err) + } + id := make([]rune, idSize) + for i := 0; i < idSize; i++ { + id[i] = idAlphabet[bytes[i]&63] + } + return string(id[:idSize]) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/types.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/types.go new file mode 100644 index 00000000..fb9272dc --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/types.go @@ -0,0 +1,74 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022 Datadog, Inc. + +package remoteconfig + +import rc "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" + +type clientData struct { + State *clientState `json:"state,omitempty"` + ID string `json:"id,omitempty"` + Products []string `json:"products,omitempty"` + IsTracer bool `json:"is_tracer,omitempty"` + ClientTracer *clientTracer `json:"client_tracer,omitempty"` + LastSeen uint64 `json:"last_seen,omitempty"` + Capabilities []byte `json:"capabilities,omitempty"` +} + +type clientTracer struct { + RuntimeID string `json:"runtime_id,omitempty"` + Language string `json:"language,omitempty"` + TracerVersion string `json:"tracer_version,omitempty"` + Service string `json:"service,omitempty"` + Env string `json:"env,omitempty"` + AppVersion string `json:"app_version,omitempty"` + Tags []string `json:"tags,omitempty"` + ProcessTags []string `json:"process_tags,omitempty"` +} + +type configState struct { + ID string `json:"id,omitempty"` + Version uint64 `json:"version,omitempty"` + Product string `json:"product,omitempty"` + ApplyState rc.ApplyState `json:"apply_state,omitempty"` + ApplyError string `json:"apply_error,omitempty"` +} + +type clientState struct { + RootVersion uint64 `json:"root_version"` + TargetsVersion uint64 `json:"targets_version"` + ConfigStates []*configState `json:"config_states,omitempty"` + HasError bool `json:"has_error,omitempty"` + Error string `json:"error,omitempty"` + BackendClientState []byte `json:"backend_client_state,omitempty"` +} + +type targetFileHash struct { + Algorithm string `json:"algorithm,omitempty"` + Hash string `json:"hash,omitempty"` +} + +type targetFileMeta struct { + Path string `json:"path,omitempty"` + Length int64 `json:"length,omitempty"` + Hashes []*targetFileHash `json:"hashes,omitempty"` +} + +type clientGetConfigsRequest struct { + Client *clientData `json:"client,omitempty"` + CachedTargetFiles []*targetFileMeta `json:"cached_target_files,omitempty"` +} + +type clientGetConfigsResponse struct { + Roots [][]byte `json:"roots,omitempty"` + Targets []byte `json:"targets,omitempty"` + TargetFiles []*file `json:"target_files,omitempty"` + ClientConfigs []string `json:"client_configs,omitempty"` +} + +type file struct { + Path string `json:"path,omitempty"` + Raw []byte `json:"raw,omitempty"` +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames/samplernames.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/samplernames/samplernames.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames/samplernames.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/samplernames/samplernames.go diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stableconfig/api.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stableconfig/api.go new file mode 100644 index 00000000..3fce0524 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stableconfig/api.go @@ -0,0 +1,86 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package stableconfig provides utilities to load and manage APM configurations +// loaded from YAML configuration files +package stableconfig + +import ( + "errors" + "fmt" + "iter" + "strconv" + + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +// ConfigData holds configuration value with its origin and config ID +type ConfigData struct { + Origin telemetry.Origin + Value string + ConfigID string +} + +func reportTelemetryAndReturnWithErr(env string, value bool, origin telemetry.Origin, id string, err error) (bool, telemetry.Origin, error) { + if env == "DD_APPSEC_SCA_ENABLED" && origin == telemetry.OriginDefault { + return value, origin, err + } + telemetry.RegisterAppConfigs(telemetry.Configuration{Name: telemetry.EnvToTelemetryName(env), Value: value, Origin: origin, ID: id}) + return value, origin, err +} + +func reportTelemetryAndReturn(env string, value string, origin telemetry.Origin, id string) (string, telemetry.Origin) { + telemetry.RegisterAppConfigs(telemetry.Configuration{Name: telemetry.EnvToTelemetryName(env), Value: value, Origin: origin, ID: id}) + return value, origin +} + +// Bool returns a boolean config value from managed file-based config, environment variable, +// or local file-based config, in that order. If none provide a valid boolean, it returns the default. +// Also returns the value's origin and any parse error encountered. +func Bool(env string, def bool) (value bool, origin telemetry.Origin, err error) { + for configData := range stableConfigByPriority(env) { + if val, err := strconv.ParseBool(configData.Value); err == nil { + return reportTelemetryAndReturnWithErr(env, val, configData.Origin, configData.ConfigID, nil) + } + err = errors.Join(err, fmt.Errorf("non-boolean value for %s: '%s' in %s configuration, dropping", env, configData.Value, configData.Origin)) + } + return reportTelemetryAndReturnWithErr(env, def, telemetry.OriginDefault, telemetry.EmptyID, err) +} + +// String returns a string config value from managed file-based config, environment variable, +// or local file-based config, in that order. If none are set, it returns the default value and origin. +func String(env string, def string) (string, telemetry.Origin) { + for configData := range stableConfigByPriority(env) { + return reportTelemetryAndReturn(env, configData.Value, configData.Origin, configData.ConfigID) + } + return reportTelemetryAndReturn(env, def, telemetry.OriginDefault, telemetry.EmptyID) +} + +func stableConfigByPriority(key string) iter.Seq[ConfigData] { + return func(yield func(ConfigData) bool) { + if v := ManagedConfig.Get(key); v != "" && !yield(ConfigData{ + Origin: telemetry.OriginManagedStableConfig, + Value: v, + ConfigID: ManagedConfig.GetID(), + }) { + return + } + if v, ok := env.Lookup(key); ok && !yield(ConfigData{ + Origin: telemetry.OriginEnvVar, + Value: v, + ConfigID: telemetry.EmptyID, // environment variables do not have config ID + }) { + return + } + if v := LocalConfig.Get(key); v != "" && !yield(ConfigData{ + Origin: telemetry.OriginLocalStableConfig, + Value: v, + ConfigID: LocalConfig.GetID(), + }) { + return + } + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stableconfig/stableconfig.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stableconfig/stableconfig.go new file mode 100644 index 00000000..ddd5348d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stableconfig/stableconfig.go @@ -0,0 +1,37 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package stableconfig provides utilities to load and manage APM configurations +// loaded from YAML configuration files +package stableconfig + +import "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + +// stableConfig represents a configuration loaded from a YAML source file. +type stableConfig struct { + Config map[string]string `yaml:"apm_configuration_default,omitempty"` // Configuration key-value pairs. + ID string `yaml:"config_id,omitempty"` // Identifier for the config set. +} + +func (s *stableConfig) get(key string) string { + return s.Config[key] +} + +func (s *stableConfig) getID() string { + return s.ID +} + +// isEmpty checks if the config is considered empty (no ID and no config entries). +func (s *stableConfig) isEmpty() bool { + return s.ID == telemetry.EmptyID && len(s.Config) == 0 +} + +// emptyStableConfig creates and returns a new, empty stableConfig instance. +func emptyStableConfig() *stableConfig { + return &stableConfig{ + Config: make(map[string]string, 0), + ID: telemetry.EmptyID, + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stableconfig/stableconfigsource.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stableconfig/stableconfigsource.go new file mode 100644 index 00000000..a6db2365 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stableconfig/stableconfigsource.go @@ -0,0 +1,100 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package stableconfig provides utilities to load and manage APM configurations +// loaded from YAML configuration files +package stableconfig + +import ( + "os" + + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" + "gopkg.in/yaml.v3" +) + +const ( + // File paths are supported on linux only + localFilePath = "/etc/datadog-agent/application_monitoring.yaml" + managedFilePath = "/etc/datadog-agent/managed/datadog-agent/stable/application_monitoring.yaml" + + // maxFileSize defines the maximum size in bytes for stable config files (4KB). This limit ensures predictable memory use and guards against malformed large files. + maxFileSize = 4 * 1024 +) + +// LocalConfig holds the configuration loaded from the user-managed file. +var LocalConfig = newStableConfigSource(localFilePath, telemetry.OriginLocalStableConfig) + +// ManagedConfig holds the configuration loaded from the fleet-managed file. +var ManagedConfig = newStableConfigSource(managedFilePath, telemetry.OriginManagedStableConfig) + +// stableConfigSource represents a source of stable configuration loaded from a file. +type stableConfigSource struct { + filePath string // Path to the configuration file. + origin telemetry.Origin // Origin identifier for telemetry. + config *stableConfig // Parsed stable configuration. +} + +func (s *stableConfigSource) Get(key string) string { + return s.config.get(key) +} + +func (s *stableConfigSource) GetID() string { + return s.config.getID() +} + +// newStableConfigSource initializes a new stableConfigSource from the given file. +func newStableConfigSource(filePath string, origin telemetry.Origin) *stableConfigSource { + return &stableConfigSource{ + filePath: filePath, + origin: origin, + config: parseFile(filePath), + } +} + +// ParseFile reads and parses the config file at the given path. +// Returns an empty config if the file doesn't exist or is invalid. +func parseFile(filePath string) *stableConfig { + info, err := os.Stat(filePath) + if err != nil { + // It's expected that the stable config file may not exist; its absence is not an error. + if !os.IsNotExist(err) { + log.Warn("Failed to stat stable config file %q, dropping: %v", filePath, err.Error()) + } + return emptyStableConfig() + } + + if info.Size() > maxFileSize { + log.Warn("Stable config file %s exceeds size limit (%d bytes > %d bytes), dropping", + filePath, info.Size(), maxFileSize) + return emptyStableConfig() + } + + data, err := os.ReadFile(filePath) + if err != nil { + // It's expected that the stable config file may not exist; its absence is not an error. + if !os.IsNotExist(err) { + log.Warn("Failed to read stable config file %q, dropping: %v", filePath, err.Error()) + } + return emptyStableConfig() + } + + return fileContentsToConfig(data, filePath) +} + +// fileContentsToConfig parses YAML data into a stableConfig struct. +// Returns an empty config if parsing fails or the data is malformed. +func fileContentsToConfig(data []byte, fileName string) *stableConfig { + scfg := &stableConfig{} + err := yaml.Unmarshal(data, scfg) + if err != nil { + log.Warn("Parsing stable config file %s failed due to error, dropping: %v", fileName, err.Error()) + return emptyStableConfig() + } + if scfg.Config == nil { + scfg.Config = make(map[string]string, 0) + } + return scfg +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/event.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/event.go new file mode 100644 index 00000000..649717e7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/event.go @@ -0,0 +1,115 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +//go:generate go run github.com/tinylib/msgp -o event_msgp.go -tests=false + +package stacktrace + +import ( + "github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/trace" + "github.com/DataDog/dd-trace-go/v2/internal" + + "github.com/tinylib/msgp/msgp" +) + +var _ msgp.Marshaler = (*Event)(nil) + +type EventCategory string + +const ( + // ExceptionEvent is the event type for exception events + ExceptionEvent EventCategory = "exception" + // VulnerabilityEvent is the event type for vulnerability events + VulnerabilityEvent EventCategory = "vulnerability" + // ExploitEvent is the event type for exploit events + ExploitEvent EventCategory = "exploit" +) + +const SpanKey = "_dd.stack" + +// Event is the toplevel structure to contain a stacktrace and the additional information needed to correlate it with other data +type Event struct { + // Category is a well-known type of the event, not optional + Category EventCategory `msg:"-"` + // Type is a value event category specific, optional + Type string `msg:"type,omitempty"` + // Language is the language of the code that generated the event (set to "go" anyway here) + Language string `msg:"language,omitempty"` + // ID is the id of the event, optional for exceptions but mandatory for vulnerabilities and exploits to correlate with more data + ID string `msg:"id,omitempty"` + // Message is a generic message for the event + Message string `msg:"message,omitempty"` + // Frames is the stack trace of the event + Frames StackTrace `msg:"frames"` +} + +// NewEvent creates a new stacktrace event with the given category, type and message +func NewEvent(eventCat EventCategory, options ...Options) *Event { + event := &Event{ + Category: eventCat, + Language: "go", + Frames: SkipAndCapture(defaultCallerSkip), + } + + for _, opt := range options { + opt(event) + } + + return event +} + +// Options is a function type to set optional parameters for the event +type Options func(*Event) + +// WithType sets the type of the event +func WithType(eventType string) Options { + return func(event *Event) { + event.Type = eventType + } +} + +// WithMessage sets the message of the event +func WithMessage(message string) Options { + return func(event *Event) { + event.Message = message + } +} + +// WithID sets the id of the event +func WithID(id string) Options { + return func(event *Event) { + event.ID = id + } +} + +// GetSpanValue returns the value to be set as a tag on a span for the given stacktrace events +func GetSpanValue(events ...*Event) any { + if !Enabled() { + return nil + } + + groupByCategory := make(map[string][]*Event, 3) + for _, event := range events { + if _, ok := groupByCategory[string(event.Category)]; !ok { + groupByCategory[string(event.Category)] = []*Event{event} + continue + } + groupByCategory[string(event.Category)] = append(groupByCategory[string(event.Category)], event) + } + + return internal.MetaStructValue{Value: groupByCategory} +} + +// AddToSpan adds the event to the given span's root span as a tag if stacktrace collection is enabled +func AddToSpan(span trace.TagSetter, events ...*Event) { + value := GetSpanValue(events...) + type rooter interface { + Root() trace.TagSetter + } + if lrs, ok := span.(rooter); ok { + span = lrs.Root() + } + span.SetTag(SpanKey, value) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/event_msgp.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/event_msgp.go new file mode 100644 index 00000000..3a9e6145 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/event_msgp.go @@ -0,0 +1,335 @@ +package stacktrace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *Event) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "type": + z.Type, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + case "language": + z.Language, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Language") + return + } + case "id": + z.ID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ID") + return + } + case "message": + z.Message, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Message") + return + } + case "frames": + err = z.Frames.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Frames") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Event) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 5 bits */ + _ = zb0001Mask + if z.Type == "" { + zb0001Len-- + zb0001Mask |= 0x1 + } + if z.Language == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.ID == "" { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.Message == "" { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + if (zb0001Mask & 0x1) == 0 { // if not omitted + // write "type" + err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Type) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + } + if (zb0001Mask & 0x2) == 0 { // if not omitted + // write "language" + err = en.Append(0xa8, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Language) + if err != nil { + err = msgp.WrapError(err, "Language") + return + } + } + if (zb0001Mask & 0x4) == 0 { // if not omitted + // write "id" + err = en.Append(0xa2, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteString(z.ID) + if err != nil { + err = msgp.WrapError(err, "ID") + return + } + } + if (zb0001Mask & 0x8) == 0 { // if not omitted + // write "message" + err = en.Append(0xa7, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Message) + if err != nil { + err = msgp.WrapError(err, "Message") + return + } + } + // write "frames" + err = en.Append(0xa6, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73) + if err != nil { + return + } + err = z.Frames.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Frames") + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Event) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // check for omitted fields + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 5 bits */ + _ = zb0001Mask + if z.Type == "" { + zb0001Len-- + zb0001Mask |= 0x1 + } + if z.Language == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.ID == "" { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.Message == "" { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + if (zb0001Mask & 0x1) == 0 { // if not omitted + // string "type" + o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, z.Type) + } + if (zb0001Mask & 0x2) == 0 { // if not omitted + // string "language" + o = append(o, 0xa8, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65) + o = msgp.AppendString(o, z.Language) + } + if (zb0001Mask & 0x4) == 0 { // if not omitted + // string "id" + o = append(o, 0xa2, 0x69, 0x64) + o = msgp.AppendString(o, z.ID) + } + if (zb0001Mask & 0x8) == 0 { // if not omitted + // string "message" + o = append(o, 0xa7, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65) + o = msgp.AppendString(o, z.Message) + } + // string "frames" + o = append(o, 0xa6, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73) + o, err = z.Frames.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Frames") + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Event) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "type": + z.Type, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + case "language": + z.Language, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Language") + return + } + case "id": + z.ID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ID") + return + } + case "message": + z.Message, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Message") + return + } + case "frames": + bts, err = z.Frames.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Frames") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Event) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Type) + 9 + msgp.StringPrefixSize + len(z.Language) + 3 + msgp.StringPrefixSize + len(z.ID) + 8 + msgp.StringPrefixSize + len(z.Message) + 7 + z.Frames.Msgsize() + return +} + +// DecodeMsg implements msgp.Decodable +func (z *EventCategory) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 string + zb0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = EventCategory(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z EventCategory) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteString(string(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z EventCategory) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendString(o, string(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *EventCategory) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 string + zb0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = EventCategory(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z EventCategory) Msgsize() (s int) { + s = msgp.StringPrefixSize + len(string(z)) + return +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace.go new file mode 100644 index 00000000..3028fd38 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace.go @@ -0,0 +1,293 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +//go:generate go run github.com/tinylib/msgp -o=stacktrace_msgp.go -tests=false + +package stacktrace + +import ( + "errors" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +var ( + enabled = true + defaultTopFrameDepth = 8 + defaultMaxDepth = 32 + + // internalPackagesPrefixes is the list of prefixes for internal packages that should be hidden in the stack trace + internalSymbolPrefixes = []string{ + "github.com/DataDog/dd-trace-go/v2", + "gopkg.in/DataDog/dd-trace-go.v1", + "github.com/DataDog/go-libddwaf", + "github.com/DataDog/datadog-agent", + "github.com/datadog/orchestrion", + "github.com/DataDog/orchestrion", + } +) + +const ( + defaultCallerSkip = 4 + envStackTraceDepth = "DD_APPSEC_MAX_STACK_TRACE_DEPTH" + envStackTraceEnabled = "DD_APPSEC_STACK_TRACE_ENABLE" +) + +func init() { + if env := env.Get(envStackTraceEnabled); env != "" { + if e, err := strconv.ParseBool(env); err == nil { + enabled = e + } else { + log.Error("Failed to parse %s env var as boolean: (using default value: %t) %v", envStackTraceEnabled, enabled, err.Error()) + } + } + + if env := env.Get(envStackTraceDepth); env != "" { + if !enabled { + log.Warn("Ignoring %s because stacktrace generation is disable", envStackTraceDepth) + return + } + + if depth, err := strconv.Atoi(env); err == nil { + defaultMaxDepth = depth + } else { + if depth <= 0 { + err = errors.New("value is not a strictly positive integer") + } + log.Error("Failed to parse %s env var as a positive integer: (using default value: %d) %v", envStackTraceDepth, defaultMaxDepth, err.Error()) + } + } + + defaultTopFrameDepth = defaultMaxDepth / 4 +} + +// Enabled returns whether stacktrace should be collected +func Enabled() bool { + return enabled +} + +type ( + // StackTrace is intended to be sent over the span tag `_dd.stack`, the first frame is the current frame + StackTrace []StackFrame + + // StackFrame represents a single frame in the stack trace + StackFrame struct { + Index uint32 `msg:"id"` // Index of the frame (0 = top of the stack) + Text string `msg:"text,omitempty"` // Text version of the stackframe as a string + File string `msg:"file,omitempty"` // File name where the code line is + Line uint32 `msg:"line,omitempty"` // Line number in the context of the file where the code is + Column uint32 `msg:"column,omitempty"` // Column where the code ran is + Namespace string `msg:"namespace,omitempty"` // Namespace is the fully qualified name of the package where the code is + ClassName string `msg:"class_name,omitempty"` // ClassName is the fully qualified name of the class where the line of code is + Function string `msg:"function,omitempty"` // Function is the fully qualified name of the function where the line of code is + } + + symbol struct { + Package string + Receiver string + Function string + } +) + +type queue[T any] struct { + data []T + head, tail int + size, cap int +} + +func newQueue[T any](capacity int) *queue[T] { + return &queue[T]{ + data: make([]T, capacity), + cap: capacity, + } +} + +func (q *queue[T]) Length() int { + return q.size +} + +func (q *queue[T]) Add(item T) { + if q.size == q.cap { + // Overwrite oldest + q.data[q.tail] = item + q.tail = (q.tail + 1) % q.cap + q.head = q.tail + } else { + q.data[q.head] = item + q.head = (q.head + 1) % q.cap + q.size++ + } +} + +func (q *queue[T]) Remove() T { + if q.size == 0 { + var zero T + return zero + } + item := q.data[q.tail] + q.tail = (q.tail + 1) % q.cap + q.size-- + return item +} + +var symbolRegex = regexp.MustCompile(`^(([^(]+/)?([^(/.]+)?)(\.\(([^/)]+)\))?\.([^/()]+)$`) + +// parseSymbol parses a symbol name into its package, receiver and function +// ex: github.com/DataDog/dd-trace-go/v2/internal/stacktrace.(*Event).NewException +// -> package: github.com/DataDog/dd-trace-go/v2/internal/stacktrace +// -> receiver: *Event +// -> function: NewException +func parseSymbol(name string) symbol { + matches := symbolRegex.FindStringSubmatch(name) + if len(matches) != 7 { + log.Error("Failed to parse symbol for stacktrace: %s", name) + return symbol{ + Package: "", + Receiver: "", + Function: "", + } + } + + return symbol{ + Package: matches[1], + Receiver: matches[5], + Function: matches[6], + } +} + +// Capture create a new stack trace from the current call stack +func Capture() StackTrace { + return SkipAndCapture(defaultCallerSkip) +} + +// SkipAndCapture creates a new stack trace from the current call stack, skipping the first `skip` frames +func SkipAndCapture(skip int) StackTrace { + return skipAndCapture(skip, defaultMaxDepth, internalSymbolPrefixes) +} + +func skipAndCapture(skip int, maxDepth int, symbolSkip []string) StackTrace { + iter := iterator(skip, maxDepth, symbolSkip) + stack := make([]StackFrame, defaultMaxDepth) + nbStoredFrames := 0 + topFramesQueue := newQueue[StackFrame](defaultTopFrameDepth) + + // We have to make sure we don't store more than maxDepth frames + // if there is more than maxDepth frames, we get X frames from the bottom of the stack and Y from the top + for frame, ok := iter.Next(); ok; frame, ok = iter.Next() { + // we reach the top frames: start to use the queue + if nbStoredFrames >= defaultMaxDepth-defaultTopFrameDepth { + topFramesQueue.Add(frame) + // queue is full, remove the oldest frame + if topFramesQueue.Length() > defaultTopFrameDepth { + topFramesQueue.Remove() + } + continue + } + + // Bottom frames: directly store them in the stack + stack[nbStoredFrames] = frame + nbStoredFrames++ + } + + // Stitch the top frames to the stack + for topFramesQueue.Length() > 0 { + stack[nbStoredFrames] = topFramesQueue.Remove() + nbStoredFrames++ + } + + return stack[:nbStoredFrames] +} + +// framesIterator is an iterator over the frames of a call stack +// It skips internal packages and caches the frames to avoid multiple calls to runtime.Callers +// It also skips the first `skip` frames +// It is not thread-safe +type framesIterator struct { + skipPrefixes []string + cache []uintptr + frames *queue[runtime.Frame] + cacheDepth int + cacheSize int + currDepth int +} + +func iterator(skip, cacheSize int, internalPrefixSkip []string) framesIterator { + return framesIterator{ + skipPrefixes: internalPrefixSkip, + cache: make([]uintptr, cacheSize), + frames: newQueue[runtime.Frame](cacheSize + 4), + cacheDepth: skip, + cacheSize: cacheSize, + currDepth: 0, + } +} + +// next returns the next runtime.Frame in the call stack, filling the cache if needed +func (it *framesIterator) next() (runtime.Frame, bool) { + if it.frames.Length() == 0 { + n := runtime.Callers(it.cacheDepth, it.cache) + if n == 0 { + return runtime.Frame{}, false + } + + frames := runtime.CallersFrames(it.cache[:n]) + for { + frame, more := frames.Next() + it.frames.Add(frame) + it.cacheDepth++ + if !more { + break + } + } + } + + it.currDepth++ + return it.frames.Remove(), true +} + +// Next returns the next StackFrame in the call stack, skipping internal packages and refurbishing the cache if needed +func (it *framesIterator) Next() (StackFrame, bool) { + for { + frame, ok := it.next() + if !ok { + return StackFrame{}, false + } + + if it.skipFrame(frame) { + continue + } + + parsedSymbol := parseSymbol(frame.Function) + return StackFrame{ + Index: uint32(it.currDepth - 1), + Text: "", + File: frame.File, + Line: uint32(frame.Line), + Column: 0, // No column given by the runtime + Namespace: parsedSymbol.Package, + ClassName: parsedSymbol.Receiver, + Function: parsedSymbol.Function, + }, true + } +} + +func (it *framesIterator) skipFrame(frame runtime.Frame) bool { + if frame.File == "" { // skip orchestrion generated code + return true + } + + for _, prefix := range it.skipPrefixes { + if strings.HasPrefix(frame.Function, prefix) { + return true + } + } + + return false +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace_msgp.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace_msgp.go new file mode 100644 index 00000000..e1076571 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace_msgp.go @@ -0,0 +1,477 @@ +package stacktrace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *StackFrame) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "id": + z.Index, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + case "text": + z.Text, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Text") + return + } + case "file": + z.File, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "File") + return + } + case "line": + z.Line, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "Line") + return + } + case "column": + z.Column, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "Column") + return + } + case "namespace": + z.Namespace, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Namespace") + return + } + case "class_name": + z.ClassName, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ClassName") + return + } + case "function": + z.Function, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Function") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *StackFrame) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(8) + var zb0001Mask uint8 /* 8 bits */ + _ = zb0001Mask + if z.Text == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.File == "" { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.Line == 0 { + zb0001Len-- + zb0001Mask |= 0x8 + } + if z.Column == 0 { + zb0001Len-- + zb0001Mask |= 0x10 + } + if z.Namespace == "" { + zb0001Len-- + zb0001Mask |= 0x20 + } + if z.ClassName == "" { + zb0001Len-- + zb0001Mask |= 0x40 + } + if z.Function == "" { + zb0001Len-- + zb0001Mask |= 0x80 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "id" + err = en.Append(0xa2, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteUint32(z.Index) + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + if (zb0001Mask & 0x2) == 0 { // if not omitted + // write "text" + err = en.Append(0xa4, 0x74, 0x65, 0x78, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Text) + if err != nil { + err = msgp.WrapError(err, "Text") + return + } + } + if (zb0001Mask & 0x4) == 0 { // if not omitted + // write "file" + err = en.Append(0xa4, 0x66, 0x69, 0x6c, 0x65) + if err != nil { + return + } + err = en.WriteString(z.File) + if err != nil { + err = msgp.WrapError(err, "File") + return + } + } + if (zb0001Mask & 0x8) == 0 { // if not omitted + // write "line" + err = en.Append(0xa4, 0x6c, 0x69, 0x6e, 0x65) + if err != nil { + return + } + err = en.WriteUint32(z.Line) + if err != nil { + err = msgp.WrapError(err, "Line") + return + } + } + if (zb0001Mask & 0x10) == 0 { // if not omitted + // write "column" + err = en.Append(0xa6, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e) + if err != nil { + return + } + err = en.WriteUint32(z.Column) + if err != nil { + err = msgp.WrapError(err, "Column") + return + } + } + if (zb0001Mask & 0x20) == 0 { // if not omitted + // write "namespace" + err = en.Append(0xa9, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Namespace) + if err != nil { + err = msgp.WrapError(err, "Namespace") + return + } + } + if (zb0001Mask & 0x40) == 0 { // if not omitted + // write "class_name" + err = en.Append(0xaa, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.ClassName) + if err != nil { + err = msgp.WrapError(err, "ClassName") + return + } + } + if (zb0001Mask & 0x80) == 0 { // if not omitted + // write "function" + err = en.Append(0xa8, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Function) + if err != nil { + err = msgp.WrapError(err, "Function") + return + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *StackFrame) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // check for omitted fields + zb0001Len := uint32(8) + var zb0001Mask uint8 /* 8 bits */ + _ = zb0001Mask + if z.Text == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.File == "" { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.Line == 0 { + zb0001Len-- + zb0001Mask |= 0x8 + } + if z.Column == 0 { + zb0001Len-- + zb0001Mask |= 0x10 + } + if z.Namespace == "" { + zb0001Len-- + zb0001Mask |= 0x20 + } + if z.ClassName == "" { + zb0001Len-- + zb0001Mask |= 0x40 + } + if z.Function == "" { + zb0001Len-- + zb0001Mask |= 0x80 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "id" + o = append(o, 0xa2, 0x69, 0x64) + o = msgp.AppendUint32(o, z.Index) + if (zb0001Mask & 0x2) == 0 { // if not omitted + // string "text" + o = append(o, 0xa4, 0x74, 0x65, 0x78, 0x74) + o = msgp.AppendString(o, z.Text) + } + if (zb0001Mask & 0x4) == 0 { // if not omitted + // string "file" + o = append(o, 0xa4, 0x66, 0x69, 0x6c, 0x65) + o = msgp.AppendString(o, z.File) + } + if (zb0001Mask & 0x8) == 0 { // if not omitted + // string "line" + o = append(o, 0xa4, 0x6c, 0x69, 0x6e, 0x65) + o = msgp.AppendUint32(o, z.Line) + } + if (zb0001Mask & 0x10) == 0 { // if not omitted + // string "column" + o = append(o, 0xa6, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e) + o = msgp.AppendUint32(o, z.Column) + } + if (zb0001Mask & 0x20) == 0 { // if not omitted + // string "namespace" + o = append(o, 0xa9, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65) + o = msgp.AppendString(o, z.Namespace) + } + if (zb0001Mask & 0x40) == 0 { // if not omitted + // string "class_name" + o = append(o, 0xaa, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.ClassName) + } + if (zb0001Mask & 0x80) == 0 { // if not omitted + // string "function" + o = append(o, 0xa8, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.Function) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *StackFrame) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "id": + z.Index, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + case "text": + z.Text, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Text") + return + } + case "file": + z.File, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "File") + return + } + case "line": + z.Line, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Line") + return + } + case "column": + z.Column, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Column") + return + } + case "namespace": + z.Namespace, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Namespace") + return + } + case "class_name": + z.ClassName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ClassName") + return + } + case "function": + z.Function, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Function") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *StackFrame) Msgsize() (s int) { + s = 1 + 3 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Text) + 5 + msgp.StringPrefixSize + len(z.File) + 5 + msgp.Uint32Size + 7 + msgp.Uint32Size + 10 + msgp.StringPrefixSize + len(z.Namespace) + 11 + msgp.StringPrefixSize + len(z.ClassName) + 9 + msgp.StringPrefixSize + len(z.Function) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *StackTrace) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0002) { + (*z) = (*z)[:zb0002] + } else { + (*z) = make(StackTrace, zb0002) + } + for zb0001 := range *z { + err = (*z)[zb0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z StackTrace) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(uint32(len(z))) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0003 := range z { + err = z[zb0003].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, zb0003) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z StackTrace) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, uint32(len(z))) + for zb0003 := range z { + o, err = z[zb0003].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, zb0003) + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *StackTrace) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0002) { + (*z) = (*z)[:zb0002] + } else { + (*z) = make(StackTrace, zb0002) + } + for zb0001 := range *z { + bts, err = (*z)[zb0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z StackTrace) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + for zb0003 := range z { + s += z[zb0003].Msgsize() + } + return +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/statsd.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/statsd.go new file mode 100644 index 00000000..60f3d443 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/statsd.go @@ -0,0 +1,38 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package internal + +import ( + "time" + + "github.com/DataDog/datadog-go/v5/statsd" +) + +const DefaultDogstatsdAddr = "localhost:8125" + +type StatsdClient interface { + Incr(name string, tags []string, rate float64) error + Count(name string, value int64, tags []string, rate float64) error + CountWithTimestamp(name string, value int64, tags []string, rate float64, timestamp time.Time) error + Gauge(name string, value float64, tags []string, rate float64) error + GaugeWithTimestamp(name string, value float64, tags []string, rate float64, timestamp time.Time) error + DistributionSamples(name string, values []float64, tags []string, rate float64) error + Timing(name string, value time.Duration, tags []string, rate float64) error + Flush() error + Close() error +} + +// NewStatsdClient returns a new statsd client with the provided address and globaltags +func NewStatsdClient(addr string, globalTags []string) (StatsdClient, error) { + if addr == "" { + addr = DefaultDogstatsdAddr + } + client, err := statsd.NewDirect(addr, statsd.WithMaxMessagesPerPayload(40), statsd.WithTags(globalTags)) + if err != nil { + return &statsd.NoOpClientDirect{}, err + } + return client, nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/README.md b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/README.md new file mode 100644 index 00000000..2b8f69fd --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/README.md @@ -0,0 +1,69 @@ +# Instrumentation Telemetry Client Architecture + +This documentation details the current architecture of the Instrumentation Telemetry Client of dd-trace-go and was are its capabilities. +For an API documentation, please refer to the [api.go](https://github.com/DataDog/dd-trace-go/blob/main/internal/telemetry/api.go) file content. + +Please, make sure to read the [Specification Documentation](https://github.com/DataDog/instrumentation-telemetry-api-docs/tree/main) before reading this document. + +### Data Flow + +```mermaid +flowchart TD + linkStyle default interpolate basis + globalclient@{ shape: circle } -->|client == nil| recorder + globalclient -->|client != nil| client + recorder@{ shape: cyl } --> client@{ shape: circle } + + subgraph datasources + integrations@{ shape: cyl } + configuration@{ shape: cyl } + dependencies@{ shape: cyl } + products@{ shape: cyl } + logs@{ shape: cyl } + metrics@{ shape: cyl } + end + + client --> datasources + + subgraph mapper + direction LR + app-started --> + default[message-batch
heartbeat
extended-heartbeat] --> app-closing + end + + flush@{ shape:rounded } + + queue@{ shape: cyl } --> flush + + datasources -..->|at flush| mapper --> flush + flush -->|if writer fails| queue + + flush --> writer + + writer --> agent@{ shape: das } + writer --> backend@{ shape: stadium } + agent --> backend +``` + +### Low Level Components + +- **`RingQueue[T]`**: The ring queue is an arbitrary data structure that support growing buffers, a buffer pool, and overflow. It is used as a backend data structure for the payload queue, the recorder and distribution metrics. +- **`Recorder[T]`**: The recorder is a `RingBuffer[func(T)]` that stores functions until the actual value `T` has been created when calling `Replay(T)` dequeues all functions from the recorder and applies them to the value `T`. By default, it can store 512 functions at most. +- **`Range[T]`**: Simple data structure that stores a start and end value, a minimum and maximum interval and has utils functions to help managing ranges. +- **`SyncMap[K, V]`**: Typed version of `sync.Map` +- **`SyncPool[T]`**: Typed version of `sync.Pool` + +### High Level Components + +- **GlobalClient**: The global client is a singleton that is used to access the client instance. It is used to create a new client instance if it does not exist yet. It is also used to access the client instance if it already exists. The global client recorder record calls to the clients until the `StartApp` function is called +- **Client**: The actual `Client` interface implementation. It's main job is to steer data to its corresponding data source. Other than that it actually manages the config of the client and gather data from the data sources to call `Flush` with it. +- **Data Sources**: Each data source implement the `dataSource` interface that has the method `Payload() transport.Payload` that is supposed to flush all data from the data source and make it into a payload ready to be serialized and sent to the backend. + - **Integrations**: The integrations data source is responsible for creating the [`app-integrations-change`](https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas/app_integrations_change.md) payload. A very simple slice and mutex is used as backing store. + - **Configuration**: The configuration data source is responsible for creating the [`app-client-configuration-change`](https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas/app_client_configuration_change.md) payload. A map and mutex is used as backing store. + - **Dependencies**: The dependencies data source is responsible for gathering data [`app-dependencies-loaded`](https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas/app_dependencies_loaded.md) payload. No public API is available for this as this is does in-house with the `ClientConfig.DependencyLoader` function output. + - **Product**: The product data source is responsible for gathering data [`app-product-change`](https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas/app_product_change.md) payload. A map and mutex is used as backing store. + - **Metrics**: The metrics data source is responsible for gathering data for the [`generate-metrics`](https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas/generate_metrics.md) payload. A `SyncMap[metrickey, metricHandle]` is used as backing store. More on that in the metrics specific section + - **Distributions**: The distributions data source is responsible for gathering data for the [`distributions`](https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas/distributions.md) payload. A `SyncMap[distributionkey, distributionHandle]` is used as backing store. More on that in the metrics specific section + - **Logs**: The logs data source is responsible for gathering data for the [`generate-logs`](https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas/logs.md) payload. A `SyncMap[logkey, logValue]` is used as backing store. More on that in the logs specific section. +- **Mapper**: The mapper is also responsible for creating the `app-started`, `app-closing`, `heartbeat`, `extended-heartbeat` and `message-batch` payloads from the data sources that needs data from other payloads but not from the API user. The mapper already return another mapper that will be used in the next call to `Flush`. +- **Writer**: The writer is responsible for sending the payload to the backend. It is a simple interface that has a `Write` method that receives a `transport.Payload` and returns statistics about the write operation. diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/api.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/api.go new file mode 100644 index 00000000..aac5bd08 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/api.go @@ -0,0 +1,176 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +// Package telemetry provides a telemetry client that is thread-safe burden-less telemetry client following the specification of the instrumentation telemetry from Datadog. +// Specification here: https://github.com/DataDog/instrumentation-telemetry-api-docs/tree/main +// +// The telemetry package has 6 main capabilities: +// - Metrics: Support for [Count], [Rate], [Gauge], [Distribution] metrics. +// - Logs: Support Debug, Warn, Error logs with tags and stack traces via the subpackage [log] or the [Log] function. +// - Product: Start, Stop and Startup errors reporting to the backend +// - App Config: Register and change the configuration of the application and declare its origin +// - Integration: Loading and errors +// - Dependencies: Sending all the dependencies of the application to the backend (for SCA purposes for example) +// +// Each of these capabilities is exposed through the [Client] interface but mainly through the package level functions. +// that mirror and call the global client that is started through the [StartApp] function. +// +// Before the [StartApp] function is called, all called to the global client will be recorded and replay +// when the [StartApp] function is called synchronously. The telemetry client is allowed to record at most 512 calls. +// +// At the end of the app lifetime. If [tracer.Stop] is called, the client should be stopped with the [StopApp] function. +// For all data to be flushed to the backend appropriately. +// +// Note: No public API is available for the dependencies payloads as this is does in-house with the `ClientConfig.DependencyLoader` function output. +package telemetry + +import ( + "io" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +// Namespace describes a product to distinguish telemetry coming from +// different products used by the same application +type Namespace = transport.Namespace +type Origin = transport.Origin +type LogLevel = transport.LogLevel + +//goland:noinspection GoVarAndConstTypeMayBeOmitted Goland is having a hard time with the following const block, it keeps deleting the type +const ( + NamespaceGeneral Namespace = transport.NamespaceGeneral + NamespaceTracers Namespace = transport.NamespaceTracers + NamespaceProfilers Namespace = transport.NamespaceProfilers + NamespaceAppSec Namespace = transport.NamespaceAppSec + NamespaceIAST Namespace = transport.NamespaceIAST + NamespaceCIVisibility Namespace = transport.NamespaceCIVisibility + NamespaceMLOps Namespace = transport.NamespaceMLOps + NamespaceRUM Namespace = transport.NamespaceRUM +) + +// Origin describes the source of a configuration change + +//goland:noinspection GoVarAndConstTypeMayBeOmitted Goland is having a hard time with the following const block, it keeps deleting the type +const ( + OriginDefault Origin = transport.OriginDefault + OriginCode Origin = transport.OriginCode + OriginDDConfig Origin = transport.OriginDDConfig + OriginEnvVar Origin = transport.OriginEnvVar + OriginRemoteConfig Origin = transport.OriginRemoteConfig + OriginLocalStableConfig Origin = transport.OriginLocalStableConfig + OriginManagedStableConfig Origin = transport.OriginManagedStableConfig +) + +// EmptyID represents the absence of a configuration ID. +// It can be assigned to the ID field of a Configuration when no ID is available or required. +const EmptyID = "" + +// LogLevel describes the level of a log message + +//goland:noinspection GoVarAndConstTypeMayBeOmitted Goland is having a hard time with the following const block, it keeps deleting the type +const ( + LogDebug LogLevel = transport.LogLevelDebug + LogWarn LogLevel = transport.LogLevelWarn + LogError LogLevel = transport.LogLevelError +) + +// MetricHandle can be used to submit different values for the same metric. +// MetricHandle is used to reduce lock contention when submitting metrics. +// This can also be used ephemerally to submit a single metric value like this: +// +// telemetry.metric(telemetry.Appsec, "my-count", map[string]string{"tag1": "true", "tag2": "1.0"}).Submit(1.0) +type MetricHandle interface { + // Submit submits a value to the metric handle. + Submit(value float64) + // Get returns the last value submitted to the metric handle. + Get() float64 +} + +// Integration is an integration that is configured to be traced. +type Integration struct { + // Name is an arbitrary string that must stay constant for the integration. + Name string + // Version is the version of the integration/dependency that is being loaded. + Version string + // Error is the error that occurred while loading the integration. If this field is specified, the integration is + // considered to be having been forcefully disabled because of the error. + Error string +} + +// Configuration is a key-value pair that is used to configure the application. +type Configuration struct { + // Key is the key of the configuration. + Name string + // Value is the value of the configuration. Need to be json serializable. + Value any + // Origin is the source of the configuration change. + Origin Origin + // ID is the config ID of the configuration change. + ID string +} + +// LogOption is a function that modifies the log message that is sent to the telemetry. +type LogOption func(key *loggerKey, value *loggerValue) + +// Client constitutes all the functions available concurrently for the telemetry users. All methods are thread-safe +// This is an interface for easier testing but all functions will be mirrored at the package level to call +// the global client. +type Client interface { + io.Closer + + // Count obtains the metric handle for the given parameters, or creates a new one if none was created just yet. + // Tags cannot contain commas. + Count(namespace Namespace, name string, tags []string) MetricHandle + + // Rate obtains the metric handle for the given parameters, or creates a new one if none was created just yet. + // Tags cannot contain commas. + Rate(namespace Namespace, name string, tags []string) MetricHandle + + // Gauge obtains the metric handle for the given parameters, or creates a new one if none was created just yet. + // Tags cannot contain commas. + Gauge(namespace Namespace, name string, tags []string) MetricHandle + + // Distribution obtains the metric handle for the given parameters, or creates a new one if none was created just yet. + // Tags cannot contain commas. + Distribution(namespace Namespace, name string, tags []string) MetricHandle + + // Log sends a telemetry log at the desired level with the given text and options. + // Options include sending key-value pairs as tags, and a stack trace frozen from inside the Log function. + Log(level LogLevel, text string, options ...LogOption) + + // ProductStarted declares a product to have started at the customer's request + ProductStarted(product Namespace) + + // ProductStopped declares a product to have being stopped by the customer + ProductStopped(product Namespace) + + // ProductStartError declares that a product could not start because of the following error + ProductStartError(product Namespace, err error) + + // RegisterAppConfig adds a key value pair to the app configuration and send the change to telemetry + // value has to be json serializable and the origin is the source of the change. + RegisterAppConfig(key string, value any, origin Origin) + + // RegisterAppConfigs adds a list of key value pairs to the app configuration and sends the change to telemetry. + // Same as AddAppConfig but for multiple values. + RegisterAppConfigs(kvs ...Configuration) + + // MarkIntegrationAsLoaded marks an integration as loaded in the telemetry + MarkIntegrationAsLoaded(integration Integration) + + // Flush closes the client and flushes any remaining data. + Flush() + + // AppStart sends the telemetry necessary to signal that the app is starting. + // Preferred use via [StartApp] package level function + AppStart() + + // AppStop sends the telemetry necessary to signal that the app is stopping. + // Preferred use via [StopApp] package level function + AppStop() + + // AddFlushTicker adds a function that is called at each telemetry Flush. By default, every minute + AddFlushTicker(ticker func(Client)) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/client.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/client.go new file mode 100644 index 00000000..78bcc716 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/client.go @@ -0,0 +1,394 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package telemetry + +import ( + "errors" + "os" + "strconv" + "sync" + + "github.com/puzpuzpuz/xsync/v3" + + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +// NewClient creates a new telemetry client with the given service, environment, and version and config. +func NewClient(service, env, version string, config ClientConfig) (Client, error) { + if service == "" { + return nil, errors.New("service name must not be empty") + } + + config = defaultConfig(config) + if err := config.validateConfig(); err != nil { + return nil, err + } + + return newClient(internal.TracerConfig{Service: service, Env: env, Version: version}, config) +} + +func newClient(tracerConfig internal.TracerConfig, config ClientConfig) (*client, error) { + writerConfig, err := newWriterConfig(config, tracerConfig) + if err != nil { + return nil, err + } + + writer, err := internal.NewWriter(writerConfig) + if err != nil { + return nil, err + } + + client := &client{ + tracerConfig: tracerConfig, + writer: writer, + clientConfig: config, + flushMapper: mapper.NewDefaultMapper(config.HeartbeatInterval, config.ExtendedHeartbeatInterval), + payloadQueue: internal.NewRingQueue[transport.Payload](config.PayloadQueueSize), + + dependencies: dependencies{ + DependencyLoader: config.DependencyLoader, + }, + metrics: metrics{ + store: xsync.NewMapOf[metricKey, metricHandle](xsync.WithPresize(knownmetrics.SizeWithFilter(func(decl knownmetrics.Declaration) bool { return decl.Type != transport.DistMetric }))), + skipAllowlist: config.Debug, + }, + distributions: distributions{ + store: xsync.NewMapOf[metricKey, *distribution](xsync.WithPresize(knownmetrics.SizeWithFilter(func(decl knownmetrics.Declaration) bool { return decl.Type == transport.DistMetric }))), + pool: internal.NewSyncPool(func() []float64 { return make([]float64, config.DistributionsSize.Min) }), + skipAllowlist: config.Debug, + queueSize: config.DistributionsSize, + }, + logger: logger{ + store: xsync.NewMapOf[loggerKey, *loggerValue](), + maxDistinctLogs: config.MaxDistinctLogs, + }, + } + + client.dataSources = append(client.dataSources, + &client.integrations, + &client.products, + &client.configuration, + &client.dependencies, + ) + + if config.LogsEnabled { + client.dataSources = append(client.dataSources, &client.logger) + } + + if config.MetricsEnabled { + client.dataSources = append(client.dataSources, &client.metrics, &client.distributions) + } + + client.flushTicker = internal.NewTicker(client.Flush, config.FlushInterval) + + return client, nil +} + +// dataSources is where the data that will be flushed is coming from. I.e metrics, logs, configurations, etc. +type dataSource interface { + Payload() transport.Payload +} + +type client struct { + tracerConfig internal.TracerConfig + clientConfig ClientConfig + + // Data sources + dataSources []dataSource + integrations integrations + products products + configuration configuration + dependencies dependencies + logger logger + metrics metrics + distributions distributions + + // flushMapper is the transformer to use for the next flush on the gathered bodies on this tick + flushMapper mapper.Mapper + flushMapperMu sync.Mutex + + // flushTicker is the ticker that triggers a call to client.Flush every flush interval + flushTicker *internal.Ticker + // flushMu is used to ensure that only one flush is happening at a time + flushMu sync.Mutex + + // writer is the writer to use to send the payloads to the backend or the agent + writer internal.Writer + + // payloadQueue is used when we cannot flush previously built payload for multiple reasons. + payloadQueue *internal.RingQueue[transport.Payload] + + // flushTickerFuncs are functions that are called just before flushing the data to the backend. + flushTickerFuncs []func(Client) + flushTickerFuncsMu sync.Mutex +} + +func (c *client) Log(level LogLevel, text string, options ...LogOption) { + if !c.clientConfig.LogsEnabled { + return + } + + c.logger.Add(level, text, options...) +} + +func (c *client) MarkIntegrationAsLoaded(integration Integration) { + c.integrations.Add(integration) +} + +func (c *client) Count(namespace Namespace, name string, tags []string) MetricHandle { + if !c.clientConfig.MetricsEnabled { + return noopMetricHandle{} + } + return c.metrics.LoadOrStore(namespace, transport.CountMetric, name, tags) +} + +func (c *client) Rate(namespace Namespace, name string, tags []string) MetricHandle { + if !c.clientConfig.MetricsEnabled { + return noopMetricHandle{} + } + return c.metrics.LoadOrStore(namespace, transport.RateMetric, name, tags) +} + +func (c *client) Gauge(namespace Namespace, name string, tags []string) MetricHandle { + if !c.clientConfig.MetricsEnabled { + return noopMetricHandle{} + } + return c.metrics.LoadOrStore(namespace, transport.GaugeMetric, name, tags) +} + +func (c *client) Distribution(namespace Namespace, name string, tags []string) MetricHandle { + if !c.clientConfig.MetricsEnabled { + return noopMetricHandle{} + } + return c.distributions.LoadOrStore(namespace, name, tags) +} + +func (c *client) ProductStarted(product Namespace) { + c.products.Add(product, true, nil) +} + +func (c *client) ProductStopped(product Namespace) { + c.products.Add(product, false, nil) +} + +func (c *client) ProductStartError(product Namespace, err error) { + c.products.Add(product, false, err) +} + +func (c *client) RegisterAppConfig(key string, value any, origin Origin) { + c.configuration.Add(Configuration{Name: key, Value: value, Origin: origin}) +} + +func (c *client) RegisterAppConfigs(kvs ...Configuration) { + for _, value := range kvs { + c.configuration.Add(value) + } +} + +func (c *client) AddFlushTicker(f func(Client)) { + c.flushTickerFuncsMu.Lock() + defer c.flushTickerFuncsMu.Unlock() + c.flushTickerFuncs = append(c.flushTickerFuncs, f) +} + +func (c *client) Config() ClientConfig { + return c.clientConfig +} + +// Flush sends all the data sources before calling flush +// This function is called by the flushTicker so it should not panic, or it will crash the whole customer application. +// If a panic occurs, we stop the telemetry and log the error. +func (c *client) Flush() { + defer func() { + r := recover() + if r == nil { + return + } + if err, ok := r.(error); ok { + log.Warn("panic while flushing telemetry data, stopping telemetry: %s", err.Error()) + } else { + log.Warn("panic while flushing telemetry data, stopping telemetry!") + } + telemetryClientDisabled = true + if gc, ok := GlobalClient().(*client); ok && gc == c { + SwapClient(nil) + } + }() + + // We call the flushTickerFuncs before flushing the data for data sources + { + c.flushTickerFuncsMu.Lock() + defer c.flushTickerFuncsMu.Unlock() + + for _, f := range c.flushTickerFuncs { + f(c) + } + } + + payloads := make([]transport.Payload, 0, 8) + for _, ds := range c.dataSources { + if payload := ds.Payload(); payload != nil { + payloads = append(payloads, payload) + } + } + + nbBytes, err := c.flush(payloads) + if err != nil { + // We check if the failure is about telemetry or appsec data to log the error at the right level + var dependenciesFound bool + for _, payload := range payloads { + if payload.RequestType() == transport.RequestTypeAppDependenciesLoaded { + dependenciesFound = true + break + } + } + if dependenciesFound { + log.Warn("appsec: error while flushing SCA Security Data: %s", err.Error()) + } else { + log.Debug("telemetry: error while flushing telemetry data: %s", err.Error()) + } + + return + } + + if c.clientConfig.Debug { + log.Debug("telemetry: flushed %d bytes of data", nbBytes) + } +} + +func (c *client) transform(payloads []transport.Payload) []transport.Payload { + c.flushMapperMu.Lock() + defer c.flushMapperMu.Unlock() + payloads, c.flushMapper = c.flushMapper.Transform(payloads) + return payloads +} + +// flush sends all the data sources to the writer after having sent them through the [transform] function. +// It returns the amount of bytes sent to the writer. +func (c *client) flush(payloads []transport.Payload) (int, error) { + c.flushMu.Lock() + defer c.flushMu.Unlock() + payloads = c.transform(payloads) + + if c.payloadQueue.IsEmpty() && len(payloads) == 0 { + return 0, nil + } + + emptyQueue := c.payloadQueue.IsEmpty() + // We enqueue the new payloads to preserve the order of the payloads + c.payloadQueue.Enqueue(payloads...) + payloads = c.payloadQueue.Flush() + + var ( + nbBytes int + speedIncreased bool + failedCalls []internal.EndpointRequestResult + ) + + for i, payload := range payloads { + results, err := c.writer.Flush(payload) + c.computeFlushMetrics(results, err) + if err != nil { + // We stop flushing when we encounter a fatal error, put the bodies in the queue and return the error + if results[len(results)-1].StatusCode == 413 { // If the payload is too large we have no way to divide it, we can only skip it... + log.Warn("telemetry: tried sending a payload that was too large, dropping it") + continue + } + c.payloadQueue.Enqueue(payloads[i:]...) + return nbBytes, err + } + + failedCalls = append(failedCalls, results[:len(results)-1]...) + successfulCall := results[len(results)-1] + + if !speedIncreased && successfulCall.PayloadByteSize > c.clientConfig.EarlyFlushPayloadSize { + // We increase the speed of the flushTicker to try to flush the remaining bodies faster as we are at risk of sending too large bodies to the backend + c.flushTicker.CanIncreaseSpeed() + speedIncreased = true + } + + nbBytes += successfulCall.PayloadByteSize + } + + if emptyQueue && !speedIncreased { // If we did not send a very big payload, and we have no payloads + c.flushTicker.CanDecreaseSpeed() + } + + if len(failedCalls) > 0 { + var errs []error + for _, call := range failedCalls { + errs = append(errs, call.Error) + } + log.Debug("non-fatal error(s) while flushing telemetry data: %v", errors.Join(errs...).Error()) + } + + return nbBytes, nil +} + +// computeFlushMetrics computes and submits the metrics for the flush operation using the output from the writer.Flush method. +// It will submit the number of requests, responses, errors, the number of bytes sent and the duration of the call that was successful. +func (c *client) computeFlushMetrics(results []internal.EndpointRequestResult, reason error) { + if !c.clientConfig.internalMetricsEnabled { + return + } + + indexToEndpoint := func(i int) string { + if i == 0 && c.clientConfig.AgentURL != "" { + return "agent" + } + return "agentless" + } + + for i, result := range results { + endpoint := "endpoint:" + indexToEndpoint(i) + c.Count(transport.NamespaceTelemetry, "telemetry_api.requests", []string{endpoint}).Submit(1) + if result.StatusCode != 0 { + c.Count(transport.NamespaceTelemetry, "telemetry_api.responses", []string{endpoint, "status_code:" + strconv.Itoa(result.StatusCode)}).Submit(1) + } + + if result.Error != nil { + typ := "type:network" + if os.IsTimeout(result.Error) { + typ = "type:timeout" + } + var writerStatusCodeError *internal.WriterStatusCodeError + if errors.As(result.Error, &writerStatusCodeError) { + typ = "type:status_code" + } + c.Count(transport.NamespaceTelemetry, "telemetry_api.errors", []string{endpoint, typ}).Submit(1) + } + } + + if reason != nil { + return + } + + successfulCall := results[len(results)-1] + endpoint := "endpoint:" + indexToEndpoint(len(results)-1) + c.Distribution(transport.NamespaceTelemetry, "telemetry_api.bytes", []string{endpoint}).Submit(float64(successfulCall.PayloadByteSize)) + c.Distribution(transport.NamespaceTelemetry, "telemetry_api.ms", []string{endpoint}).Submit(float64(successfulCall.CallDuration.Milliseconds())) +} + +func (c *client) AppStart() { + c.flushMapperMu.Lock() + defer c.flushMapperMu.Unlock() + c.flushMapper = mapper.NewAppStartedMapper(c.flushMapper) +} + +func (c *client) AppStop() { + c.flushMapperMu.Lock() + defer c.flushMapperMu.Unlock() + c.flushMapper = mapper.NewAppClosingMapper(c.flushMapper) +} + +func (c *client) Close() error { + c.flushTicker.Stop() + return nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/client_config.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/client_config.go new file mode 100644 index 00000000..844b3224 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/client_config.go @@ -0,0 +1,287 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package telemetry + +import ( + "fmt" + "net/http" + "net/url" + "runtime/debug" + "time" + + globalinternal "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/env" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal" +) + +type ClientConfig struct { + // DependencyLoader determines how dependency data is sent via telemetry. + // The default value is [debug.ReadBuildInfo] since Application Security Monitoring uses this data to detect vulnerabilities in the ASM-SCA product + // To disable this feature, please implement a function that returns nil, false. + // This can only be controlled via the env var DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED + DependencyLoader func() (*debug.BuildInfo, bool) + + // MetricsEnabled determines whether metrics are sent via telemetry. + // If false, libraries should not send the generate-metrics or distributions events. + // This can only be controlled via the env var DD_TELEMETRY_METRICS_ENABLED + MetricsEnabled bool + + // LogsEnabled determines whether logs are sent via telemetry. + // This can only be controlled via the env var DD_TELEMETRY_LOG_COLLECTION_ENABLED + LogsEnabled bool + + // AgentlessURL is the full URL to the agentless telemetry endpoint. (optional) + // Defaults to https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry + AgentlessURL string + + // AgentURL is the url of the agent to send telemetry to. (optional) + // If the AgentURL is not set, the telemetry client will not attempt to connect to the agent before sending to the agentless endpoint. + AgentURL string + + // HTTPClient is the http client to use for sending telemetry, defaults to a http.DefaultClient copy. + HTTPClient *http.Client + + // HeartbeatInterval is the interval at which to send a heartbeat payload, defaults to 60s. + // The maximum value is 60s. + HeartbeatInterval time.Duration + + // ExtendedHeartbeatInterval is the interval at which to send an extended heartbeat payload, defaults to 24h. + ExtendedHeartbeatInterval time.Duration + + // FlushInterval is the interval at which the client flushes the data. + // By default, the client will start to Flush at 60s intervals and will reduce the interval based on the load till it hit 15s + // Both values cannot be higher than 60s because the heartbeat need to be sent at least every 60s. Values will be clamped otherwise. + FlushInterval internal.Range[time.Duration] + + // PayloadQueueSize is the size of the payload queue. Default range is [4, 32]. + PayloadQueueSize internal.Range[int] + + // DistributionsSize is the size of the distribution queue. Default range is [2^8, 2^14]. + DistributionsSize internal.Range[int] + + // Debug enables debug mode for the telemetry client and sent it to the backend so it logs the request. The + // DD_TELEMETRY_DEBUG environment variable, when set to a truthy value, overrides this setting. + Debug bool + + // APIKey is the API key to use for sending telemetry to the agentless endpoint. (using DD_API_KEY env var by default) + APIKey string + + // EarlyFlushPayloadSize is the size of the payload that will trigger an early flush. + // This is necessary because backend won't allow bodies larger than 5MB. + // The default value here will be 2MB to take into account the large inaccuracy in estimating the size of bodies + EarlyFlushPayloadSize int + + // MaxDistributionsSize is the maximum number of logs with distinct message, level and tags that can be stored per flush window. + // If the limit is reached, logs will be dropped and a log will be sent to the backend about it + // The default value is 1024. + MaxDistinctLogs int32 + + // internalMetricsEnabled determines whether client stats metrics are sent via telemetry. Default to true. + internalMetricsEnabled bool +} + +var ( + // agentlessURL is the endpoint used to send telemetry in an agentless environment. It is + // also the default URL in case connecting to the agent URL fails. + agentlessURL = "https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry" + + // defaultHeartbeatInterval is the default interval at which the agent sends a heartbeat. + defaultHeartbeatInterval = time.Minute + + // defaultExtendedHeartbeatInterval is the default interval at which the agent sends an extended heartbeat. + defaultExtendedHeartbeatInterval = 24 * time.Hour + + // defaultMinFlushInterval is the default interval at which the client flushes the data. + defaultFlushIntervalRange = internal.Range[time.Duration]{ + Min: 15 * time.Second, + Max: 60 * time.Second, + } + + defaultAuthorizedHearbeatRange = internal.Range[time.Duration]{ + Min: time.Microsecond, + Max: time.Minute, + } + + agentProxyAPIPath = "/telemetry/proxy/api/v2/apmtelemetry" + + defaultEarlyFlushPayloadSize = 2 * 1024 * 1024 // 2MB + + // authorizedPayloadSize.Max is specified by the backend to be 5MB. The goal is to never reach this value otherwise our data will be silently dropped. + authorizedPayloadSize = internal.Range[int]{ + Min: 0, + Max: 5 * 1024 * 1024, // 5MB + } + + // TODO: tweak this value once we get real telemetry data from the telemetry client + // This means that, by default, we incur dataloss if we spend ~30mins without flushing, considering we send telemetry data this looks reasonable. + // This also means that in the worst case scenario, memory-wise, the app is stabilized after running for 30mins. + // Ideally both values should be power of 2 because of the way the ring queue is implemented as it's growing + defaultPayloadQueueSize = internal.Range[int]{ + Min: 4, + Max: 32, + } + + // TODO: tweak this value once we get telemetry data from the telemetry client + // Default max size is a 2^14 array of float64 (2^3 bytes) which makes a distribution 128KB bytes array _at worse_. + // Considering we add a point per user request on a simple http server, we would be losing data after 2^14 requests per minute or about 280 requests per second or under 3ms per request. + // If this throughput is constant, the telemetry client flush ticker speed will increase to, at best, double twice to flush 15 seconds of data each time. + // Which will bring our max throughput to 1100 points per second or about 750µs per request. + distributionsSize = internal.Range[int]{ + Min: 1 << 8, + Max: 1 << 14, + } + + // defaultMaxDistinctLogs is the default maximum number of logs with distinct message, level and tags that can be stored in a flush windows. 1024 per minute is already plenty, it's just to avoid memory leaks. + defaultMaxDistinctLogs = int32(256) +) + +func (config ClientConfig) validateConfig() error { + if config.HeartbeatInterval > time.Minute { + return fmt.Errorf("HeartbeatInterval cannot be higher than 60s, got %v", config.HeartbeatInterval) + } + + if config.FlushInterval.Min > time.Minute || config.FlushInterval.Max > time.Minute { + return fmt.Errorf("FlushIntervalRange cannot be higher than 60s, got Min: %v, Max: %v", config.FlushInterval.Min, config.FlushInterval.Max) + } + + if !config.FlushInterval.IsOrdered() { + return fmt.Errorf("FlushIntervalRange Min cannot be higher than Max, got Min: %v, Max: %v", config.FlushInterval.Min, config.FlushInterval.Max) + } + + if !authorizedPayloadSize.Contains(config.EarlyFlushPayloadSize) { + return fmt.Errorf("EarlyFlushPayloadSize must be between 0 and 5MB, got %v", config.EarlyFlushPayloadSize) + } + + return nil +} + +// defaultConfig returns a ClientConfig with default values set. +func defaultConfig(config ClientConfig) ClientConfig { + config.Debug = config.Debug || globalinternal.BoolEnv("DD_TELEMETRY_DEBUG", false) + + if config.AgentlessURL == "" { + config.AgentlessURL = agentlessURL + } + + if config.APIKey == "" { + config.APIKey = env.Get("DD_API_KEY") + } + + if config.FlushInterval.Min == 0 { + config.FlushInterval.Min = defaultFlushIntervalRange.Min + } else { + config.FlushInterval.Min = defaultAuthorizedHearbeatRange.Clamp(config.FlushInterval.Min) + } + + if config.FlushInterval.Max == 0 { + config.FlushInterval.Max = defaultFlushIntervalRange.Max + } else { + config.FlushInterval.Max = defaultAuthorizedHearbeatRange.Clamp(config.FlushInterval.Max) + } + + heartBeatInterval := defaultHeartbeatInterval + if config.HeartbeatInterval != 0 { + heartBeatInterval = config.HeartbeatInterval + } + + envVal := globalinternal.FloatEnv("DD_TELEMETRY_HEARTBEAT_INTERVAL", heartBeatInterval.Seconds()) + config.HeartbeatInterval = defaultAuthorizedHearbeatRange.Clamp(time.Duration(envVal * float64(time.Second))) + if config.HeartbeatInterval != defaultHeartbeatInterval { + log.Debug("telemetry: using custom heartbeat interval %s", config.HeartbeatInterval) + } + // Make sure we flush at least at each heartbeat interval + config.FlushInterval = config.FlushInterval.ReduceMax(config.HeartbeatInterval) + + if config.HeartbeatInterval == config.FlushInterval.Max { // Since the go ticker is not exact when it comes to the interval, we need to make sure the heartbeat is actually sent + config.HeartbeatInterval = config.HeartbeatInterval - 10*time.Millisecond + } + + if config.DependencyLoader == nil && globalinternal.BoolEnv("DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED", true) { + config.DependencyLoader = debug.ReadBuildInfo + } + + if !config.MetricsEnabled { + config.MetricsEnabled = globalinternal.BoolEnv("DD_TELEMETRY_METRICS_ENABLED", true) + } + + if !config.LogsEnabled { + config.LogsEnabled = globalinternal.BoolEnv("DD_TELEMETRY_LOG_COLLECTION_ENABLED", true) + } + + if !config.internalMetricsEnabled { + config.internalMetricsEnabled = true + } + + if config.EarlyFlushPayloadSize == 0 { + config.EarlyFlushPayloadSize = defaultEarlyFlushPayloadSize + } + + if config.ExtendedHeartbeatInterval == 0 { + config.ExtendedHeartbeatInterval = defaultExtendedHeartbeatInterval + } + + if config.PayloadQueueSize.Min == 0 { + config.PayloadQueueSize.Min = defaultPayloadQueueSize.Min + } + + if config.PayloadQueueSize.Max == 0 { + config.PayloadQueueSize.Max = defaultPayloadQueueSize.Max + } + + if config.DistributionsSize.Min == 0 { + config.DistributionsSize.Min = distributionsSize.Min + } + + if config.DistributionsSize.Max == 0 { + config.DistributionsSize.Max = distributionsSize.Max + } + + if config.MaxDistinctLogs == 0 { + config.MaxDistinctLogs = defaultMaxDistinctLogs + } + + return config +} + +func newWriterConfig(config ClientConfig, tracerConfig internal.TracerConfig) (internal.WriterConfig, error) { + endpoints := make([]*http.Request, 0, 2) + if config.AgentURL != "" { + baseURL, err := url.Parse(config.AgentURL) + if err != nil { + return internal.WriterConfig{}, fmt.Errorf("invalid agent URL: %s", err.Error()) + } + + baseURL.Path = agentProxyAPIPath + request, err := http.NewRequest(http.MethodPost, baseURL.String(), nil) + if err != nil { + return internal.WriterConfig{}, fmt.Errorf("failed to create request: %s", err.Error()) + } + + endpoints = append(endpoints, request) + } + + if config.AgentlessURL != "" && config.APIKey != "" { + request, err := http.NewRequest(http.MethodPost, config.AgentlessURL, nil) + if err != nil { + return internal.WriterConfig{}, fmt.Errorf("failed to create request: %s", err.Error()) + } + + request.Header.Set("DD-API-KEY", config.APIKey) + endpoints = append(endpoints, request) + } + + if len(endpoints) == 0 { + return internal.WriterConfig{}, fmt.Errorf("telemetry: could not build any endpoint, please provide an AgentURL or an APIKey with an optional AgentlessURL") + } + + return internal.WriterConfig{ + TracerConfig: tracerConfig, + Endpoints: endpoints, + HTTPClient: config.HTTPClient, + Debug: config.Debug, + }, nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/configuration.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/configuration.go new file mode 100644 index 00000000..ed5668f7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/configuration.go @@ -0,0 +1,187 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package telemetry + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "slices" + "strings" + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +type configuration struct { + mu sync.Mutex + config map[string]transport.ConfKeyValue + seqID uint64 +} + +func idOrEmpty(id string) string { + if id == EmptyID { + return "" + } + return id +} + +func (c *configuration) Add(kv Configuration) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.config == nil { + c.config = make(map[string]transport.ConfKeyValue) + } + + ID := idOrEmpty(kv.ID) + + c.config[kv.Name] = transport.ConfKeyValue{ + Name: kv.Name, + Value: kv.Value, + Origin: kv.Origin, + ID: ID, + } +} + +func (c *configuration) Payload() transport.Payload { + c.mu.Lock() + defer c.mu.Unlock() + if len(c.config) == 0 { + return nil + } + + configs := make([]transport.ConfKeyValue, len(c.config)) + idx := 0 + for _, conf := range c.config { + if conf.Origin == "" { + conf.Origin = transport.OriginDefault + } + conf.Value = SanitizeConfigValue(conf.Value) + conf.SeqID = c.seqID + configs[idx] = conf + idx++ + c.seqID++ + delete(c.config, conf.Name) + } + + return transport.AppClientConfigurationChange{ + Configuration: configs, + } +} + +// SanitizeConfigValue sanitizes the value of a configuration key to ensure it can be marshalled. +func SanitizeConfigValue(value any) any { + if value == nil { + return "" + } + + // Skip reflection for basic types + switch val := value.(type) { + case string, bool, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return val + case float32: + if math.IsNaN(float64(val)) || math.IsInf(float64(val), 0) { + return "" + } + return val + case float64: + // https://github.com/golang/go/issues/59627 + if math.IsNaN(val) || math.IsInf(val, 0) { + return nil + } + return val + case []string: + return strings.Join(val, ",") // Retro compatibility with old code + } + + if _, ok := value.(json.Marshaler); ok { + return value + } + + if v, ok := value.(fmt.Stringer); ok { + return v.String() + } + + valueOf := reflect.ValueOf(value) + + // Unwrap pointers and interfaces up to 10 levels deep. + for i := 0; i < 10; i++ { + if valueOf.Kind() == reflect.Ptr || valueOf.Kind() == reflect.Interface { + valueOf = valueOf.Elem() + } else { + break + } + } + + switch { + case valueOf.Kind() == reflect.Slice, valueOf.Kind() == reflect.Array: + var sb strings.Builder + sb.WriteString("[") + for i := 0; i < valueOf.Len(); i++ { + if i > 0 { + sb.WriteString(" ") + } + sb.WriteString(fmt.Sprintf("%v", valueOf.Index(i).Interface())) + } + sb.WriteString("]") + return sb.String() + case valueOf.Kind() == reflect.Map: + kvPair := make([]struct { + key string + value string + }, valueOf.Len()) + + iter := valueOf.MapRange() + for i := 0; iter.Next(); i++ { + kvPair[i].key = fmt.Sprintf("%v", iter.Key().Interface()) + kvPair[i].value = fmt.Sprintf("%v", iter.Value().Interface()) + } + + slices.SortStableFunc(kvPair, func(a, b struct { + key string + value string + }) int { + return strings.Compare(a.key, b.key) + }) + + var sb strings.Builder + for _, k := range kvPair { + if sb.Len() > 0 { + sb.WriteString(",") + } + sb.WriteString(k.key) + sb.WriteString(":") + sb.WriteString(k.value) + } + + return sb.String() + } + + return fmt.Sprintf("%v", value) +} + +func EnvToTelemetryName(env string) string { + switch env { + case "DD_TRACE_DEBUG": + return "trace_debug_enabled" + case "DD_APM_TRACING_ENABLED": + return "apm_tracing_enabled" + case "DD_RUNTIME_METRICS_ENABLED": + return "runtime_metrics_enabled" + case "DD_DATA_STREAMS_ENABLED": + return "data_streams_enabled" + case "DD_APPSEC_ENABLED": + return "appsec_enabled" + case "DD_DYNAMIC_INSTRUMENTATION_ENABLED": + return "dynamic_instrumentation_enabled" + case "DD_PROFILING_ENABLED": + return "profiling_enabled" + default: + return env + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/dependencies.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/dependencies.go new file mode 100644 index 00000000..1f105d3b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/dependencies.go @@ -0,0 +1,94 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package telemetry + +import ( + "runtime/debug" + "strings" + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +type dependencies struct { + DependencyLoader func() (*debug.BuildInfo, bool) + + once sync.Once + + mu sync.Mutex + payloads []transport.Payload +} + +func (d *dependencies) Payload() transport.Payload { + d.once.Do(func() { + deps := d.loadDeps() + // Requirement described here: + // https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/producing-telemetry.md#app-dependencies-loaded + const maxPerPayload = 2000 + if len(deps) > maxPerPayload { + log.Debug("telemetry: too many (%d) dependencies to send, sending over multiple bodies", len(deps)) + } + + for i := 0; i < len(deps); i += maxPerPayload { + end := min(i+maxPerPayload, len(deps)) + + d.payloads = append(d.payloads, transport.AppDependenciesLoaded{ + Dependencies: deps[i:end], + }) + } + }) + + d.mu.Lock() + defer d.mu.Unlock() + + if len(d.payloads) == 0 { + return nil + } + + // return payloads one by one + payloadZero := d.payloads[0] + if len(d.payloads) == 1 { + d.payloads = nil + } + + if len(d.payloads) > 1 { + d.payloads = d.payloads[1:] + } + + return payloadZero +} + +// loadDeps returns the dependencies from the DependencyLoader, formatted for telemetry intake. +func (d *dependencies) loadDeps() []transport.Dependency { + if d.DependencyLoader == nil { + return nil + } + + deps, ok := d.DependencyLoader() + if !ok { + log.Debug("telemetry: could not read build info, no dependencies will be reported") + return nil + } + + transportDeps := make([]transport.Dependency, 0, len(deps.Deps)) + for _, dep := range deps.Deps { + if dep == nil { + continue + } + + if dep.Replace != nil && dep.Replace.Version != "" { + dep = dep.Replace + } + + transportDeps = append(transportDeps, transport.Dependency{ + Name: dep.Path, + Version: strings.TrimPrefix(dep.Version, "v"), + }) + } + + return transportDeps +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/distributions.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/distributions.go new file mode 100644 index 00000000..8cb084cd --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/distributions.go @@ -0,0 +1,94 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package telemetry + +import ( + "fmt" + "sync" + + "github.com/puzpuzpuz/xsync/v3" + + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +type distributions struct { + store *xsync.MapOf[metricKey, *distribution] + pool *internal.SyncPool[[]float64] + queueSize internal.Range[int] + skipAllowlist bool // Debugging feature to skip the allowlist of known metrics +} + +// LoadOrStore returns a MetricHandle for the given distribution metric. If the metric key does not exist, it will be created. +func (d *distributions) LoadOrStore(namespace Namespace, name string, tags []string) MetricHandle { + kind := transport.DistMetric + key := newMetricKey(namespace, kind, name, tags) + handle, loaded := d.store.LoadOrCompute(key, func() *distribution { + return &distribution{ + key: key, + values: internal.NewRingQueueWithPool[float64](d.queueSize, d.pool), + } + }) + if !loaded && !d.skipAllowlist { // The metric is new: validate and log issues about it + if err := validateMetricKey(namespace, kind, name, tags); err != nil { + log.Warn("telemetry: %s", err.Error()) + } + } + + return handle +} + +func (d *distributions) Payload() transport.Payload { + series := make([]transport.DistributionSeries, 0, d.store.Size()) + d.store.Range(func(_ metricKey, handle *distribution) bool { + if payload := handle.payload(); payload.Namespace != "" { + series = append(series, payload) + } + return true + }) + + if len(series) == 0 { + return nil + } + + return transport.Distributions{Series: series, SkipAllowlist: d.skipAllowlist} +} + +type distribution struct { + key metricKey + values *internal.RingQueue[float64] + + logLoss sync.Once +} + +func (d *distribution) Submit(value float64) { + if !d.values.Enqueue(value) { + d.logLoss.Do(func() { + log.Debug("telemetry: distribution %q is losing values because the buffer is full", d.key.name) + Log(LogWarn, fmt.Sprintf("telemetry: distribution %s is losing values because the buffer is full", d.key), WithStacktrace()) + }) + } +} + +func (d *distribution) Get() float64 { + return d.values.ReversePeek() +} + +func (d *distribution) payload() transport.DistributionSeries { + if d.values.IsEmpty() { + return transport.DistributionSeries{} + } + + return transport.DistributionSeries{ + Metric: d.key.name, + Namespace: d.key.namespace, + Tags: d.key.SplitTags(), + Common: knownmetrics.IsCommonMetric(d.key.namespace, d.key.kind, d.key.name), + Points: d.values.Flush(), + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/globalclient.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/globalclient.go new file mode 100644 index 00000000..591894d5 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/globalclient.go @@ -0,0 +1,273 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package telemetry + +import ( + "sync" + "sync/atomic" + + "github.com/puzpuzpuz/xsync/v3" + + globalinternal "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +var ( + globalClient atomic.Pointer[Client] + + // globalClientRecorder contains all actions done on the global client done before StartApp() with an actual client object is called + globalClientRecorder = internal.NewRecorder[Client]() + + // metricsHandleSwappablePointers contains all the swappableMetricHandle, used to replay actions done before the actual MetricHandle is set + metricsHandleSwappablePointers = xsync.NewMapOf[metricKey, *swappableMetricHandle](xsync.WithPresize(knownmetrics.Size())) +) + +// GlobalClient returns the global telemetry client. +func GlobalClient() Client { + client := globalClient.Load() + if client == nil { + return nil + } + return *client +} + +// StartApp starts the telemetry client with the given client send the app-started telemetry and sets it as the global (*client) +// then calls client.Flush on the client asynchronously. +func StartApp(client Client) { + if Disabled() { + return + } + + if GlobalClient() != nil || SwapClient(client) != nil { + log.Debug("telemetry: StartApp called multiple times, ignoring") + return + } + + client.AppStart() + go client.Flush() +} + +// SwapClient swaps the global client with the given client and Flush the old (*client). +func SwapClient(client Client) Client { + if Disabled() { + return nil + } + + oldClientPtr := globalClient.Swap(&client) + var oldClient Client + if oldClientPtr != nil && *oldClientPtr != nil { + oldClient = *oldClientPtr + } + + if oldClient != nil { + oldClient.Close() + } + + if client == nil { + return oldClient + } + + globalClientRecorder.Replay(client) + // Swap all metrics hot pointers to the new MetricHandle + metricsHandleSwappablePointers.Range(func(_ metricKey, value *swappableMetricHandle) bool { + value.swap(value.maker(client)) + return true + }) + + return oldClient +} + +// MockClient swaps the global client with the given client and clears the recorder to make sure external calls are not replayed. +// It returns a function that can be used to swap back the global client +func MockClient(client Client) func() { + globalClientRecorder.Clear() + metricsHandleSwappablePointers.Clear() + + oldClient := SwapClient(client) + return func() { + SwapClient(oldClient) + } +} + +// StopApp creates the app-stopped telemetry, adding to the queue and Flush all the queue before stopping the (*client). +func StopApp() { + if client := globalClient.Swap(nil); client != nil && *client != nil { + (*client).AppStop() + (*client).Flush() + (*client).Close() + } +} + +var telemetryClientDisabled = !globalinternal.BoolEnv("DD_INSTRUMENTATION_TELEMETRY_ENABLED", true) + +// Disabled returns whether instrumentation telemetry is disabled +// according to the DD_INSTRUMENTATION_TELEMETRY_ENABLED env var +func Disabled() bool { + return telemetryClientDisabled +} + +// Count creates a new metric handle for the given parameters that can be used to submit values. +// Count will always return a [MetricHandle], even if telemetry is disabled or the client has yet to start. +// The [MetricHandle] is then swapped with the actual [MetricHandle] once the client is started. +func Count(namespace Namespace, name string, tags []string) MetricHandle { + return globalClientNewMetric(namespace, transport.CountMetric, name, tags) +} + +// Rate creates a new metric handle for the given parameters that can be used to submit values. +// Rate will always return a [MetricHandle], even if telemetry is disabled or the client has yet to start. +// The [MetricHandle] is then swapped with the actual [MetricHandle] once the client is started. +func Rate(namespace Namespace, name string, tags []string) MetricHandle { + return globalClientNewMetric(namespace, transport.RateMetric, name, tags) +} + +// Gauge creates a new metric handle for the given parameters that can be used to submit values. +// Gauge will always return a [MetricHandle], even if telemetry is disabled or the client has yet to start. +// The [MetricHandle] is then swapped with the actual [MetricHandle] once the client is started. +func Gauge(namespace Namespace, name string, tags []string) MetricHandle { + return globalClientNewMetric(namespace, transport.GaugeMetric, name, tags) +} + +// Distribution creates a new metric handle for the given parameters that can be used to submit values. +// Distribution will always return a [MetricHandle], even if telemetry is disabled or the client has yet to start. +// The [MetricHandle] is then swapped with the actual [MetricHandle] once the client is started. +// The Get() method of the [MetricHandle] will return the last value submitted. +// Distribution MetricHandle is advised to be held in a variable more than the rest of the metric types to avoid too many useless allocations. +func Distribution(namespace Namespace, name string, tags []string) MetricHandle { + return globalClientNewMetric(namespace, transport.DistMetric, name, tags) +} + +func Log(level LogLevel, text string, options ...LogOption) { + globalClientCall(func(client Client) { + client.Log(level, text, options...) + }) +} + +// ProductStarted declares a product to have started at the customer’s request. If telemetry is disabled, it will do nothing. +// If the telemetry client has not started yet, it will record the action and replay it once the client is started. +func ProductStarted(product Namespace) { + globalClientCall(func(client Client) { + client.ProductStarted(product) + }) +} + +// ProductStopped declares a product to have being stopped by the customer. If telemetry is disabled, it will do nothing. +// If the telemetry client has not started yet, it will record the action and replay it once the client is started. +func ProductStopped(product Namespace) { + globalClientCall(func(client Client) { + client.ProductStopped(product) + }) +} + +// ProductStartError declares that a product could not start because of the following error. If telemetry is disabled, it will do nothing. +// If the telemetry client has not started yet, it will record the action and replay it once the client is started. +func ProductStartError(product Namespace, err error) { + globalClientCall(func(client Client) { + client.ProductStartError(product, err) + }) +} + +// RegisterAppConfig adds a key value pair to the app configuration and send the change to telemetry +// value has to be json serializable and the origin is the source of the change. If telemetry is disabled, it will do nothing. +// If the telemetry client has not started yet, it will record the action and replay it once the client is started. +func RegisterAppConfig(key string, value any, origin Origin) { + globalClientCall(func(client Client) { + client.RegisterAppConfig(key, value, origin) + }) +} + +// RegisterAppConfigs adds a list of key value pairs to the app configuration and sends the change to telemetry. +// Same as AddAppConfig but for multiple values. If telemetry is disabled, it will do nothing. +// If the telemetry client has not started yet, it will record the action and replay it once the client is started. +func RegisterAppConfigs(kvs ...Configuration) { + globalClientCall(func(client Client) { + client.RegisterAppConfigs(kvs...) + }) +} + +// MarkIntegrationAsLoaded marks an integration as loaded in the telemetry. If telemetry is disabled +// or the client has not started yet it will record the action and replay it once the client is started. +func MarkIntegrationAsLoaded(integration Integration) { + globalClientCall(func(client Client) { + client.MarkIntegrationAsLoaded(integration) + }) +} + +// LoadIntegration marks an integration as loaded in the telemetry client. If telemetry is disabled, it will do nothing. +// If the telemetry client has not started yet, it will record the action and replay it once the client is started. +func LoadIntegration(integration string) { + globalClientCall(func(client Client) { + client.MarkIntegrationAsLoaded(Integration{ + Name: integration, + }) + }) +} + +// AddFlushTicker adds a function that is called at each telemetry Flush. By default, every minute +func AddFlushTicker(ticker func(Client)) { + globalClientCall(func(client Client) { + client.AddFlushTicker(ticker) + }) +} + +var globalClientLogLossOnce sync.Once + +// globalClientCall takes a function that takes a Client and calls it with the global client if it exists. +// otherwise, it records the action for when the client is started. +func globalClientCall(fun func(client Client)) { + if Disabled() { + return + } + + client := globalClient.Load() + if client == nil || *client == nil { + if !globalClientRecorder.Record(fun) { + globalClientLogLossOnce.Do(func() { + log.Debug("telemetry: global client recorder queue is full, dropping telemetry data, please start the telemetry client earlier to avoid data loss") + }) + } + return + } + + fun(*client) +} + +var noopMetricHandleInstance = noopMetricHandle{} + +func globalClientNewMetric(namespace Namespace, kind transport.MetricType, name string, tags []string) MetricHandle { + if Disabled() { + return noopMetricHandleInstance + } + + key := newMetricKey(namespace, kind, name, tags) + hotPtr, _ := metricsHandleSwappablePointers.LoadOrCompute(key, func() *swappableMetricHandle { + maker := func(client Client) MetricHandle { + switch kind { + case transport.CountMetric: + return client.Count(namespace, name, tags) + case transport.RateMetric: + return client.Rate(namespace, name, tags) + case transport.GaugeMetric: + return client.Gauge(namespace, name, tags) + case transport.DistMetric: + return client.Distribution(namespace, name, tags) + } + log.Warn("telemetry: unknown metric type %q", kind) + return nil + } + wrapper := &swappableMetricHandle{maker: maker} + if client := globalClient.Load(); client == nil || *client == nil { + wrapper.recorder = internal.NewRecorder[MetricHandle]() + } + globalClientCall(func(client Client) { + wrapper.swap(maker(client)) + }) + return wrapper + }) + return hotPtr +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/integration.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/integration.go new file mode 100644 index 00000000..9df61d91 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/integration.go @@ -0,0 +1,41 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package telemetry + +import ( + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +type integrations struct { + mu sync.Mutex + integrations []transport.Integration +} + +func (i *integrations) Add(integration Integration) { + i.mu.Lock() + defer i.mu.Unlock() + i.integrations = append(i.integrations, transport.Integration{ + Name: integration.Name, + Version: integration.Version, + Enabled: integration.Error == "", // no error means the integration was enabled successfully + Error: integration.Error, + }) +} + +func (i *integrations) Payload() transport.Payload { + i.mu.Lock() + defer i.mu.Unlock() + if len(i.integrations) == 0 { + return nil + } + integrations := i.integrations + i.integrations = nil + return transport.AppIntegrationChange{ + Integrations: integrations, + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/known_metric.golang.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/known_metric.golang.go new file mode 100644 index 00000000..da51ebcf --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/known_metric.golang.go @@ -0,0 +1,14 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +// Code generated by 'go run github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/generator'; DO NOT EDIT. + +package knownmetrics + +var golangMetrics = []Declaration{ + { Type: "count", Name: "errorstack.source" }, + { Type: "distribution", Name: "errorstack.duration" }, + { Type: "gauge", Name: "orchestrion.enabled" }, +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/known_metrics.common.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/known_metrics.common.go new file mode 100644 index 00000000..1c2925f1 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/known_metrics.common.go @@ -0,0 +1,227 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +// Code generated by 'go run github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/generator'; DO NOT EDIT. + +package knownmetrics + +var commonMetrics = []Declaration{ + { Namespace: "appsec", Type: "count", Name: "api_security.missing_route" }, + { Namespace: "appsec", Type: "count", Name: "api_security.request.no_schema" }, + { Namespace: "appsec", Type: "count", Name: "api_security.request.schema" }, + { Namespace: "appsec", Type: "count", Name: "instrum.user_auth.missing_user_id" }, + { Namespace: "appsec", Type: "count", Name: "instrum.user_auth.missing_user_login" }, + { Namespace: "appsec", Type: "count", Name: "rasp.error" }, + { Namespace: "appsec", Type: "count", Name: "rasp.rule.eval" }, + { Namespace: "appsec", Type: "count", Name: "rasp.rule.match" }, + { Namespace: "appsec", Type: "count", Name: "rasp.rule.skipped" }, + { Namespace: "appsec", Type: "count", Name: "rasp.timeout" }, + { Namespace: "appsec", Type: "count", Name: "sdk.event" }, + { Namespace: "appsec", Type: "count", Name: "waf.config_errors" }, + { Namespace: "appsec", Type: "count", Name: "waf.error" }, + { Namespace: "appsec", Type: "count", Name: "waf.init" }, + { Namespace: "appsec", Type: "count", Name: "waf.input_truncated" }, + { Namespace: "appsec", Type: "count", Name: "waf.requests" }, + { Namespace: "appsec", Type: "count", Name: "waf.updates" }, + { Namespace: "appsec", Type: "distribution", Name: "rasp.duration" }, + { Namespace: "appsec", Type: "distribution", Name: "rasp.duration_ext" }, + { Namespace: "appsec", Type: "distribution", Name: "waf.duration" }, + { Namespace: "appsec", Type: "distribution", Name: "waf.duration_ext" }, + { Namespace: "appsec", Type: "distribution", Name: "waf.truncated_value_size" }, + { Namespace: "appsec", Type: "gauge", Name: "enabled" }, + { Namespace: "civisibility", Type: "count", Name: "code_coverage.errors" }, + { Namespace: "civisibility", Type: "count", Name: "code_coverage.is_empty" }, + { Namespace: "civisibility", Type: "count", Name: "code_coverage_finished" }, + { Namespace: "civisibility", Type: "count", Name: "code_coverage_started" }, + { Namespace: "civisibility", Type: "count", Name: "early_flake_detection.request" }, + { Namespace: "civisibility", Type: "count", Name: "early_flake_detection.request_errors" }, + { Namespace: "civisibility", Type: "count", Name: "endpoint_payload.dropped" }, + { Namespace: "civisibility", Type: "count", Name: "endpoint_payload.requests" }, + { Namespace: "civisibility", Type: "count", Name: "endpoint_payload.requests_errors" }, + { Namespace: "civisibility", Type: "count", Name: "event_created" }, + { Namespace: "civisibility", Type: "count", Name: "event_finished" }, + { Namespace: "civisibility", Type: "count", Name: "events_enqueued_for_serialization" }, + { Namespace: "civisibility", Type: "count", Name: "flaky_tests.request" }, + { Namespace: "civisibility", Type: "count", Name: "flaky_tests.request_errors" }, + { Namespace: "civisibility", Type: "count", Name: "git.command" }, + { Namespace: "civisibility", Type: "count", Name: "git.command_errors" }, + { Namespace: "civisibility", Type: "count", Name: "git.commit_sha_discrepancy" }, + { Namespace: "civisibility", Type: "count", Name: "git.commit_sha_match" }, + { Namespace: "civisibility", Type: "count", Name: "git_requests.objects_pack" }, + { Namespace: "civisibility", Type: "count", Name: "git_requests.objects_pack_errors" }, + { Namespace: "civisibility", Type: "count", Name: "git_requests.search_commits" }, + { Namespace: "civisibility", Type: "count", Name: "git_requests.search_commits_errors" }, + { Namespace: "civisibility", Type: "count", Name: "git_requests.settings" }, + { Namespace: "civisibility", Type: "count", Name: "git_requests.settings_errors" }, + { Namespace: "civisibility", Type: "count", Name: "git_requests.settings_response" }, + { Namespace: "civisibility", Type: "count", Name: "impacted_tests_detection.request" }, + { Namespace: "civisibility", Type: "count", Name: "impacted_tests_detection.request_errors" }, + { Namespace: "civisibility", Type: "count", Name: "itr_forced_run" }, + { Namespace: "civisibility", Type: "count", Name: "itr_skippable_tests.request" }, + { Namespace: "civisibility", Type: "count", Name: "itr_skippable_tests.request_errors" }, + { Namespace: "civisibility", Type: "count", Name: "itr_skippable_tests.response_suites" }, + { Namespace: "civisibility", Type: "count", Name: "itr_skippable_tests.response_tests" }, + { Namespace: "civisibility", Type: "count", Name: "itr_skipped" }, + { Namespace: "civisibility", Type: "count", Name: "itr_unskippable" }, + { Namespace: "civisibility", Type: "count", Name: "jenkins.http_endpoint.dropped" }, + { Namespace: "civisibility", Type: "count", Name: "jenkins.http_endpoint.requests" }, + { Namespace: "civisibility", Type: "count", Name: "jenkins.http_endpoint.requests_errors" }, + { Namespace: "civisibility", Type: "count", Name: "jenkins.logs.dropped" }, + { Namespace: "civisibility", Type: "count", Name: "jenkins.logs.submitted" }, + { Namespace: "civisibility", Type: "count", Name: "jenkins.traces.dropped" }, + { Namespace: "civisibility", Type: "count", Name: "jenkins.traces.submitted" }, + { Namespace: "civisibility", Type: "count", Name: "known_tests.request" }, + { Namespace: "civisibility", Type: "count", Name: "known_tests.request_errors" }, + { Namespace: "civisibility", Type: "count", Name: "manual_api_events" }, + { Namespace: "civisibility", Type: "count", Name: "test_management_tests.request" }, + { Namespace: "civisibility", Type: "count", Name: "test_management_tests.request_errors" }, + { Namespace: "civisibility", Type: "count", Name: "test_session" }, + { Namespace: "civisibility", Type: "distribution", Name: "code_coverage.files" }, + { Namespace: "civisibility", Type: "distribution", Name: "early_flake_detection.request_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "early_flake_detection.response_bytes" }, + { Namespace: "civisibility", Type: "distribution", Name: "early_flake_detection.response_tests" }, + { Namespace: "civisibility", Type: "distribution", Name: "endpoint_payload.bytes" }, + { Namespace: "civisibility", Type: "distribution", Name: "endpoint_payload.events_count" }, + { Namespace: "civisibility", Type: "distribution", Name: "endpoint_payload.events_serialization_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "endpoint_payload.requests_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "flaky_tests.request_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "flaky_tests.response_bytes" }, + { Namespace: "civisibility", Type: "distribution", Name: "flaky_tests.response_tests" }, + { Namespace: "civisibility", Type: "distribution", Name: "git.command_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "git_requests.objects_pack_bytes" }, + { Namespace: "civisibility", Type: "distribution", Name: "git_requests.objects_pack_files" }, + { Namespace: "civisibility", Type: "distribution", Name: "git_requests.objects_pack_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "git_requests.search_commits_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "git_requests.settings_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "impacted_tests_detection.request_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "impacted_tests_detection.response_bytes" }, + { Namespace: "civisibility", Type: "distribution", Name: "impacted_tests_detection.response_files" }, + { Namespace: "civisibility", Type: "distribution", Name: "itr_skippable_tests.request_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "itr_skippable_tests.response_bytes" }, + { Namespace: "civisibility", Type: "distribution", Name: "jenkins.http_endpoint.bytes" }, + { Namespace: "civisibility", Type: "distribution", Name: "jenkins.http_endpoint.events_count" }, + { Namespace: "civisibility", Type: "distribution", Name: "jenkins.http_endpoint.requests_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "known_tests.request_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "known_tests.response_bytes" }, + { Namespace: "civisibility", Type: "distribution", Name: "known_tests.response_tests" }, + { Namespace: "civisibility", Type: "distribution", Name: "test_management_tests.request_ms" }, + { Namespace: "civisibility", Type: "distribution", Name: "test_management_tests.response_bytes" }, + { Namespace: "civisibility", Type: "distribution", Name: "test_management_tests.response_tests" }, + { Namespace: "general", Type: "count", Name: "logs_created" }, + { Namespace: "general", Type: "distribution", Name: "init_time" }, + { Namespace: "general", Type: "distribution", Name: "tracer_init_time" }, + { Namespace: "iast", Type: "count", Name: "executed.propagation" }, + { Namespace: "iast", Type: "count", Name: "executed.sink" }, + { Namespace: "iast", Type: "count", Name: "executed.source" }, + { Namespace: "iast", Type: "count", Name: "executed.tainted" }, + { Namespace: "iast", Type: "count", Name: "instrumented.propagation" }, + { Namespace: "iast", Type: "count", Name: "instrumented.sink" }, + { Namespace: "iast", Type: "count", Name: "instrumented.source" }, + { Namespace: "iast", Type: "count", Name: "json.tag.size.exceeded" }, + { Namespace: "iast", Type: "count", Name: "request.tainted" }, + { Namespace: "iast", Type: "count", Name: "suppressed.vulnerabilities" }, + { Namespace: "mlobs", Type: "count", Name: "activate_distributed_headers" }, + { Namespace: "mlobs", Type: "count", Name: "annotations" }, + { Namespace: "mlobs", Type: "count", Name: "dropped_eval_events" }, + { Namespace: "mlobs", Type: "count", Name: "dropped_span_events" }, + { Namespace: "mlobs", Type: "count", Name: "evals_submitted" }, + { Namespace: "mlobs", Type: "count", Name: "evaluators.error" }, + { Namespace: "mlobs", Type: "count", Name: "evaluators.init" }, + { Namespace: "mlobs", Type: "count", Name: "evaluators.run" }, + { Namespace: "mlobs", Type: "count", Name: "inject_distributed_headers" }, + { Namespace: "mlobs", Type: "count", Name: "product_enabled" }, + { Namespace: "mlobs", Type: "count", Name: "span.finished" }, + { Namespace: "mlobs", Type: "count", Name: "span.start" }, + { Namespace: "mlobs", Type: "count", Name: "spans_exported" }, + { Namespace: "mlobs", Type: "count", Name: "user_flush" }, + { Namespace: "mlobs", Type: "count", Name: "user_processor_called" }, + { Namespace: "mlobs", Type: "distribution", Name: "evaluators.rule_sample_rate" }, + { Namespace: "mlobs", Type: "distribution", Name: "init_time" }, + { Namespace: "mlobs", Type: "distribution", Name: "span.raw_size" }, + { Namespace: "mlobs", Type: "distribution", Name: "span.size" }, + { Namespace: "profilers", Type: "count", Name: "profile_api.errors" }, + { Namespace: "profilers", Type: "count", Name: "profile_api.requests" }, + { Namespace: "profilers", Type: "count", Name: "profile_api.responses" }, + { Namespace: "profilers", Type: "distribution", Name: "profile_api.bytes" }, + { Namespace: "profilers", Type: "distribution", Name: "profile_api.ms" }, + { Namespace: "rum", Type: "count", Name: "injection.content_security_policy" }, + { Namespace: "rum", Type: "count", Name: "injection.failed" }, + { Namespace: "rum", Type: "count", Name: "injection.initialization.failed" }, + { Namespace: "rum", Type: "count", Name: "injection.initialization.succeed" }, + { Namespace: "rum", Type: "count", Name: "injection.installation" }, + { Namespace: "rum", Type: "count", Name: "injection.skipped" }, + { Namespace: "rum", Type: "count", Name: "injection.succeed" }, + { Namespace: "rum", Type: "distribution", Name: "injection.installation.duration" }, + { Namespace: "rum", Type: "distribution", Name: "injection.ms" }, + { Namespace: "rum", Type: "distribution", Name: "injection.response.bytes" }, + { Namespace: "sidecar", Type: "count", Name: "server.submitted_payloads" }, + { Namespace: "sidecar", Type: "distribution", Name: "server.memory_usage" }, + { Namespace: "sidecar", Type: "gauge", Name: "server.active_sessions" }, + { Namespace: "telemetry", Type: "count", Name: "telemetry_api.errors" }, + { Namespace: "telemetry", Type: "count", Name: "telemetry_api.requests" }, + { Namespace: "telemetry", Type: "count", Name: "telemetry_api.responses" }, + { Namespace: "telemetry", Type: "distribution", Name: "telemetry_api.bytes" }, + { Namespace: "telemetry", Type: "distribution", Name: "telemetry_api.ms" }, + { Namespace: "tracers", Type: "count", Name: "context_header.truncated" }, + { Namespace: "tracers", Type: "count", Name: "context_header_style.extracted" }, + { Namespace: "tracers", Type: "count", Name: "context_header_style.injected" }, + { Namespace: "tracers", Type: "count", Name: "docker_lib_injection.failure" }, + { Namespace: "tracers", Type: "count", Name: "docker_lib_injection.success" }, + { Namespace: "tracers", Type: "count", Name: "exporter_fallback" }, + { Namespace: "tracers", Type: "count", Name: "host_lib_injection.failure" }, + { Namespace: "tracers", Type: "count", Name: "host_lib_injection.success" }, + { Namespace: "tracers", Type: "count", Name: "inject.error" }, + { Namespace: "tracers", Type: "count", Name: "inject.language_detection" }, + { Namespace: "tracers", Type: "count", Name: "inject.skip" }, + { Namespace: "tracers", Type: "count", Name: "inject.success" }, + { Namespace: "tracers", Type: "count", Name: "integration_errors" }, + { Namespace: "tracers", Type: "count", Name: "k8s_lib_injection.failure" }, + { Namespace: "tracers", Type: "count", Name: "k8s_lib_injection.success" }, + { Namespace: "tracers", Type: "count", Name: "library_entrypoint.abort" }, + { Namespace: "tracers", Type: "count", Name: "library_entrypoint.abort.integration" }, + { Namespace: "tracers", Type: "count", Name: "library_entrypoint.abort.runtime" }, + { Namespace: "tracers", Type: "count", Name: "library_entrypoint.complete" }, + { Namespace: "tracers", Type: "count", Name: "library_entrypoint.error" }, + { Namespace: "tracers", Type: "count", Name: "library_entrypoint.injector.error" }, + { Namespace: "tracers", Type: "count", Name: "library_entrypoint.start" }, + { Namespace: "tracers", Type: "count", Name: "otel.env.hiding" }, + { Namespace: "tracers", Type: "count", Name: "otel.env.invalid" }, + { Namespace: "tracers", Type: "count", Name: "otel.env.unsupported" }, + { Namespace: "tracers", Type: "count", Name: "public_api.called" }, + { Namespace: "tracers", Type: "count", Name: "span_created" }, + { Namespace: "tracers", Type: "count", Name: "span_finished" }, + { Namespace: "tracers", Type: "count", Name: "span_pointer_calculation" }, + { Namespace: "tracers", Type: "count", Name: "span_pointer_calculation.issue" }, + { Namespace: "tracers", Type: "count", Name: "spans_created" }, + { Namespace: "tracers", Type: "count", Name: "spans_dropped" }, + { Namespace: "tracers", Type: "count", Name: "spans_enqueued_for_serialization" }, + { Namespace: "tracers", Type: "count", Name: "spans_finished" }, + { Namespace: "tracers", Type: "count", Name: "stats_api.errors" }, + { Namespace: "tracers", Type: "count", Name: "stats_api.requests" }, + { Namespace: "tracers", Type: "count", Name: "stats_api.responses" }, + { Namespace: "tracers", Type: "count", Name: "trace_api.errors" }, + { Namespace: "tracers", Type: "count", Name: "trace_api.requests" }, + { Namespace: "tracers", Type: "count", Name: "trace_api.responses" }, + { Namespace: "tracers", Type: "count", Name: "trace_chunks_dropped" }, + { Namespace: "tracers", Type: "count", Name: "trace_chunks_enqueued" }, + { Namespace: "tracers", Type: "count", Name: "trace_chunks_enqueued_for_serialization" }, + { Namespace: "tracers", Type: "count", Name: "trace_chunks_sent" }, + { Namespace: "tracers", Type: "count", Name: "trace_partial_flush.count" }, + { Namespace: "tracers", Type: "count", Name: "trace_segments_closed" }, + { Namespace: "tracers", Type: "count", Name: "trace_segments_created" }, + { Namespace: "tracers", Type: "distribution", Name: "inject.latency.baseline" }, + { Namespace: "tracers", Type: "distribution", Name: "inject.latency.end_to_end" }, + { Namespace: "tracers", Type: "distribution", Name: "inject.latency.init_container" }, + { Namespace: "tracers", Type: "distribution", Name: "stats_api.bytes" }, + { Namespace: "tracers", Type: "distribution", Name: "stats_api.ms" }, + { Namespace: "tracers", Type: "distribution", Name: "trace_api.bytes" }, + { Namespace: "tracers", Type: "distribution", Name: "trace_api.ms" }, + { Namespace: "tracers", Type: "distribution", Name: "trace_chunk_serialization.bytes" }, + { Namespace: "tracers", Type: "distribution", Name: "trace_chunk_serialization.ms" }, + { Namespace: "tracers", Type: "distribution", Name: "trace_chunk_size" }, + { Namespace: "tracers", Type: "distribution", Name: "trace_partial_flush.spans_closed" }, + { Namespace: "tracers", Type: "distribution", Name: "trace_partial_flush.spans_remaining" }, + { Namespace: "tracers", Type: "gauge", Name: "stats_buckets" }, +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/known_metrics.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/known_metrics.go new file mode 100644 index 00000000..f32e7716 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics/known_metrics.go @@ -0,0 +1,61 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package knownmetrics + +import ( + "slices" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +type Declaration struct { + Namespace transport.Namespace `json:"namespace"` + Type transport.MetricType `json:"type"` + Name string `json:"name"` +} + +// IsKnownMetric returns true if the given metric name is a known metric by the backend +// This is linked to generated common_metrics.json file and golang_metrics.json file. If you added new metrics to the backend, you should rerun the generator. +func IsKnownMetric(namespace transport.Namespace, typ transport.MetricType, name string) bool { + return IsCommonMetric(namespace, typ, name) || IsLanguageMetric(typ, name) +} + +// IsCommonMetric returns true if the given metric name is a known common (cross-language) metric by the backend +// This is linked to the generated common_metrics.json file. If you added new metrics to the backend, you should rerun the generator. +func IsCommonMetric(namespace transport.Namespace, typ transport.MetricType, name string) bool { + decl := Declaration{Namespace: namespace, Type: typ, Name: name} + return slices.Contains(commonMetrics, decl) +} + +// Size returns the total number of known metrics, including common and golang metrics +func Size() int { + return len(commonMetrics) + len(golangMetrics) +} + +// SizeWithFilter returns the total number of known metrics, including common and golang metrics, that pass the given filter +func SizeWithFilter(filter func(Declaration) bool) int { + size := 0 + for _, decl := range commonMetrics { + if filter(decl) { + size++ + } + } + + for _, decl := range golangMetrics { + if filter(decl) { + size++ + } + } + + return size +} + +// IsLanguageMetric returns true if the given metric name is a known Go language metric by the backend +// This is linked to the generated golang_metrics.json file. If you added new metrics to the backend, you should rerun the generator. +func IsLanguageMetric(typ transport.MetricType, name string) bool { + decl := Declaration{Type: typ, Name: name} + return slices.Contains(golangMetrics, decl) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/app_closing.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/app_closing.go new file mode 100644 index 00000000..da47f19f --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/app_closing.go @@ -0,0 +1,23 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package mapper + +import ( + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +// NewAppClosingMapper returns a new Mapper that appends an AppClosing payload to the given payloads and calls the underlying Mapper with it. +func NewAppClosingMapper(next Mapper) Mapper { + return &appClosingEnricher{next: next} +} + +type appClosingEnricher struct { + next Mapper +} + +func (t *appClosingEnricher) Transform(payloads []transport.Payload) ([]transport.Payload, Mapper) { + return t.next.Transform(append(payloads, transport.AppClosing{})) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/app_started.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/app_started.go new file mode 100644 index 00000000..3f00575d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/app_started.go @@ -0,0 +1,48 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package mapper + +import ( + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +type appStartedReducer struct { + next Mapper +} + +// NewAppStartedMapper returns a new Mapper that adds an AppStarted payload to the beginning of all payloads +// and pass it down to irs underlying mapper. +// The AppStarted payload ingest the [transport.AppClientConfigurationChange] and [transport.AppProductChange] payloads +func NewAppStartedMapper(next Mapper) Mapper { + return &appStartedReducer{next: next} +} + +func (t *appStartedReducer) Transform(payloads []transport.Payload) ([]transport.Payload, Mapper) { + appStarted := transport.AppStarted{ + InstallSignature: transport.InstallSignature{ + InstallID: globalconfig.InstrumentationInstallID(), + InstallType: globalconfig.InstrumentationInstallType(), + InstallTime: globalconfig.InstrumentationInstallTime(), + }, + } + + payloadLefts := make([]transport.Payload, 0, len(payloads)) + for _, payload := range payloads { + switch payload := payload.(type) { + case transport.AppClientConfigurationChange: + appStarted.Configuration = payload.Configuration + case transport.AppProductChange: + appStarted.Products = payload.Products + default: + payloadLefts = append(payloadLefts, payload) + } + } + + // The app-started event should be the first event in the payload and not in an message-batch + payloads, mapper := t.next.Transform(payloadLefts) + return append([]transport.Payload{appStarted}, payloads...), mapper +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/default.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/default.go new file mode 100644 index 00000000..313597da --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/default.go @@ -0,0 +1,98 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package mapper + +import ( + "time" + + "golang.org/x/time/rate" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +// NewDefaultMapper returns a Mapper that transforms payloads into a MessageBatch and adds a heartbeat message. +// The heartbeat message is added every heartbeatInterval. +func NewDefaultMapper(heartbeatInterval, extendedHeartBeatInterval time.Duration) Mapper { + mapper := &defaultMapper{ + heartbeatEnricher: heartbeatEnricher{ + heartbeatRL: rate.NewLimiter(rate.Every(heartbeatInterval), 1), + extendedHeartbeatRL: rate.NewLimiter(rate.Every(extendedHeartBeatInterval), 1), + }, + } + + // The rate limiter is initialized with a token, but we want the first heartbeat to be sent in one minute, so we consume the token + mapper.heartbeatEnricher.heartbeatRL.Allow() + mapper.heartbeatEnricher.extendedHeartbeatRL.Allow() + return mapper +} + +type defaultMapper struct { + heartbeatEnricher + messageBatchReducer +} + +func (t *defaultMapper) Transform(payloads []transport.Payload) ([]transport.Payload, Mapper) { + payloads, _ = t.heartbeatEnricher.Transform(payloads) + payloads, _ = t.messageBatchReducer.Transform(payloads) + return payloads, t +} + +type messageBatchReducer struct{} + +func (t *messageBatchReducer) Transform(payloads []transport.Payload) ([]transport.Payload, Mapper) { + if len(payloads) <= 1 { + return payloads, t + } + + messages := make([]transport.Message, len(payloads)) + for i, payload := range payloads { + messages[i] = transport.Message{ + RequestType: payload.RequestType(), + Payload: payload, + } + } + + return []transport.Payload{transport.MessageBatch(messages)}, t +} + +type heartbeatEnricher struct { + heartbeatRL *rate.Limiter + extendedHeartbeatRL *rate.Limiter + + extendedHeartbeat transport.AppExtendedHeartbeat + heartbeat transport.AppHeartbeat +} + +func (t *heartbeatEnricher) Transform(payloads []transport.Payload) ([]transport.Payload, Mapper) { + // Built the extended heartbeat using other payloads + // Composition described here: + // https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/producing-telemetry.md#app-extended-heartbeat + for _, payload := range payloads { + switch payload := payload.(type) { + case transport.AppStarted: + // Should be sent only once anyway + t.extendedHeartbeat.Configuration = payload.Configuration + case transport.AppDependenciesLoaded: + if t.extendedHeartbeat.Dependencies == nil { + t.extendedHeartbeat.Dependencies = payload.Dependencies + } + case transport.AppIntegrationChange: + // The number of integrations should be small enough so we can just append to the list + t.extendedHeartbeat.Integrations = append(t.extendedHeartbeat.Integrations, payload.Integrations...) + } + } + + if t.extendedHeartbeatRL.Allow() { + return append(payloads, t.extendedHeartbeat), t + } + + if t.heartbeatRL.Allow() { + return append(payloads, t.heartbeat), t + } + + // We don't send anything + return payloads, t +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/mapper.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/mapper.go new file mode 100644 index 00000000..116c4ffd --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/mapper/mapper.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package mapper + +import ( + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +// Mapper is an interface for transforming payloads to comply with different types of lifecycle events in the application. +type Mapper interface { + // Transform transforms the given payloads and returns the transformed payloads and the Mapper to use for the next + // transformation at the next flush a minute later + Transform([]transport.Payload) ([]transport.Payload, Mapper) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/range.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/range.go new file mode 100644 index 00000000..c2e1e185 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/range.go @@ -0,0 +1,39 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package internal + +import ( + "cmp" +) + +// Range is a type that represents a range of values. +type Range[T cmp.Ordered] struct { + Min T + Max T +} + +// IsOrdered checks if the range is ordered. e.g. Min <= Max. +func (r Range[T]) IsOrdered() bool { + return r.Min <= r.Max +} + +// Contains checks if a value is within the range. +func (r Range[T]) Contains(value T) bool { + return value >= r.Min && value <= r.Max +} + +// Clamp squeezes a value between a minimum and maximum value. +func (r Range[T]) Clamp(value T) T { + return max(min(r.Max, value), r.Min) +} + +// ReduceMax returns a new range where value is the new max and min is either the current min or the new value to make sure the range is ordered. +func (r Range[T]) ReduceMax(value T) Range[T] { + return Range[T]{ + Min: min(r.Min, value), + Max: value, + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/recorder.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/recorder.go new file mode 100644 index 00000000..69c6fdd7 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/recorder.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package internal + +// Recorder is a generic thread-safe type that records functions that could have taken place before object T was created. +// Once object T is created, the Recorder can replay all the recorded functions with object T as an argument. +type Recorder[T any] struct { + queue *RingQueue[func(T)] +} + +// TODO: tweak these values once we get telemetry data from the telemetry client +var queueCap = Range[int]{ + Min: 16, // Initial queue capacity + Max: 512, // Maximum queue capacity +} + +// NewRecorder creates a new [Recorder] instance. with 512 as the maximum number of recorded functions before overflowing. +func NewRecorder[T any]() Recorder[T] { + return Recorder[T]{ + queue: NewRingQueue[func(T)](queueCap), + } +} + +// Record takes a function and records it in the [Recorder]'s queue. If the queue is full, it returns false. +// Once [Recorder.Replay] is called, all recorded functions will be replayed with object T as an argument in order of recording. +func (r Recorder[T]) Record(f func(T)) bool { + if r.queue == nil { + return true + } + return r.queue.Enqueue(f) +} + +// Replay uses T as an argument to replay all recorded functions in order of recording. +func (r Recorder[T]) Replay(t T) { + if r.queue == nil { + return + } + for { + f := r.queue.Dequeue() + if f == nil { + break + } + f(t) + } +} + +// Clear clears the Recorder's queue. +func (r Recorder[T]) Clear() { + r.queue.Clear() +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/ringbuffer.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/ringbuffer.go new file mode 100644 index 00000000..3d2d31ac --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/ringbuffer.go @@ -0,0 +1,195 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package internal + +import ( + "sync" +) + +// RingQueue is a thread-safe ring buffer can be used to store a fixed number of elements and overwrite old values when full. +type RingQueue[T any] struct { + buffer []T + head, tail, count int + // mu is the lock for the buffer, head and tail. + mu sync.Mutex + // pool is the pool of buffers. Normally there should only be one or 2 buffers in the pool. + pool *SyncPool[[]T] + // BufferSizes is the range of buffer sizes that the ring queue can have. + BufferSizes Range[int] +} + +// NewRingQueue creates a new RingQueue with a minimum size and a maximum size. +func NewRingQueue[T any](rang Range[int]) *RingQueue[T] { + return &RingQueue[T]{ + buffer: make([]T, rang.Min), + pool: NewSyncPool[[]T](func() []T { return make([]T, rang.Min) }), + BufferSizes: rang, + } +} + +// NewRingQueueWithPool creates a new RingQueue with a minimum size, a maximum size and a pool. Make sure the pool is properly initialized with the right type +func NewRingQueueWithPool[T any](rang Range[int], pool *SyncPool[[]T]) *RingQueue[T] { + return &RingQueue[T]{ + buffer: make([]T, rang.Min), + pool: pool, + BufferSizes: rang, + } +} + +// Length returns the number of elements currently stored in the queue. +func (rq *RingQueue[T]) Length() int { + rq.mu.Lock() + defer rq.mu.Unlock() + + return rq.count +} + +func (rq *RingQueue[T]) resizeLocked() { + newBuf := make([]T, rq.BufferSizes.Clamp(rq.count*2)) + defer rq.releaseBuffer(rq.buffer) + + if rq.tail > rq.head { + copy(newBuf, rq.buffer[rq.head:rq.tail]) + } else { + n := copy(newBuf, rq.buffer[rq.head:]) + copy(newBuf[n:], rq.buffer[:rq.tail]) + } + + rq.head = 0 + rq.tail = rq.count + rq.buffer = newBuf +} + +func (rq *RingQueue[T]) enqueueLocked(elem T) bool { + spaceLeft := true + if rq.count == len(rq.buffer) { + if len(rq.buffer) == rq.BufferSizes.Max { + spaceLeft = false + // bitwise modulus + rq.head = (rq.head + 1) % len(rq.buffer) + rq.count-- + } else { + rq.resizeLocked() + } + } + + rq.buffer[rq.tail] = elem + rq.tail = (rq.tail + 1) % len(rq.buffer) + rq.count++ + return spaceLeft +} + +// ReversePeek returns the last element that was enqueued without removing it. +func (rq *RingQueue[T]) ReversePeek() T { + rq.mu.Lock() + defer rq.mu.Unlock() + if rq.count == 0 { + var zero T + return zero + } + return rq.buffer[(rq.tail-1+len(rq.buffer))%len(rq.buffer)] +} + +// Enqueue adds one or multiple values to the buffer. Returns false if at least one item had to be pulled out from the queue to make space for new ones +func (rq *RingQueue[T]) Enqueue(vals ...T) bool { + rq.mu.Lock() + defer rq.mu.Unlock() + if len(vals) == 0 { + return true + } + + spaceLeft := true + for _, val := range vals { + spaceLeft = rq.enqueueLocked(val) + } + return spaceLeft +} + +// Dequeue removes a value from the buffer. +func (rq *RingQueue[T]) Dequeue() T { + rq.mu.Lock() + defer rq.mu.Unlock() + + if rq.count == 0 { + var zero T + return zero + } + + ret := rq.buffer[rq.head] + // bitwise modulus + rq.head = (rq.head + 1) % len(rq.buffer) + rq.count-- + return ret +} + +// getBuffer returns the current buffer and resets it. +func (rq *RingQueue[T]) getBuffer() []T { + rq.mu.Lock() + defer rq.mu.Unlock() + return rq.getBufferLocked() +} + +func (rq *RingQueue[T]) getBufferLocked() []T { + prevBuf := rq.buffer + rq.buffer = rq.pool.Get() + rq.head, rq.tail, rq.count = 0, 0, 0 + return prevBuf +} + +// Flush returns a copy of the buffer and resets it. +func (rq *RingQueue[T]) Flush() []T { + rq.mu.Lock() + head, count := rq.head, rq.count + buf := rq.getBufferLocked() + rq.mu.Unlock() + + // If the buffer is less than 12.5% full, we let the buffer get garbage collected because it's too big for the current throughput. + // Except when the buffer is at its minimum size. + if len(buf) == rq.BufferSizes.Min || count*8 >= len(buf) { + defer rq.releaseBuffer(buf) + } + + copyBuf := make([]T, count) + for i := 0; i < count; i++ { + copyBuf[i] = buf[(head+i)%len(buf)] + } + + return copyBuf +} + +// releaseBuffer returns the buffer to the pool. +func (rq *RingQueue[T]) releaseBuffer(buf []T) { + var zero T + buf = buf[:cap(buf)] // Make sure nobody reduced the length of the buffer + for i := range buf { + buf[i] = zero + } + rq.pool.Put(buf) +} + +// IsEmpty returns true if the buffer is empty. +func (rq *RingQueue[T]) IsEmpty() bool { + return rq.Length() == 0 +} + +// IsFull returns true if the buffer is full and cannot accept more elements. +func (rq *RingQueue[T]) IsFull() bool { + rq.mu.Lock() + defer rq.mu.Unlock() + + return len(rq.buffer) == rq.count && len(rq.buffer) == rq.BufferSizes.Max +} + +// Clear removes all elements from the buffer. +func (rq *RingQueue[T]) Clear() { + rq.mu.Lock() + defer rq.mu.Unlock() + rq.head, rq.tail, rq.count = 0, 0, 0 + var zero T + for i := range rq.buffer { + rq.buffer[i] = zero + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/syncpool.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/syncpool.go new file mode 100644 index 00000000..29ab2f9b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/syncpool.go @@ -0,0 +1,34 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package internal + +import ( + "sync" +) + +// SyncPool is a wrapper around [sync.Pool] that provides type safety. +type SyncPool[T any] struct { + pool *sync.Pool +} + +// NewSyncPool creates a new Pool with the given new function. +func NewSyncPool[T any](newT func() T) *SyncPool[T] { + return &SyncPool[T]{ + pool: &sync.Pool{ + New: func() any { + return newT() + }, + }, + } +} + +func (sp *SyncPool[T]) Get() T { + return sp.pool.Get().(T) +} + +func (sp *SyncPool[T]) Put(v T) { + sp.pool.Put(v) +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/ticker.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/ticker.go new file mode 100644 index 00000000..4d43a624 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/ticker.go @@ -0,0 +1,92 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package internal + +import ( + "sync" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +type TickFunc func() + +type Ticker struct { + ticker *time.Ticker + + tickSpeedMu sync.Mutex + tickSpeed time.Duration + + interval Range[time.Duration] + + tickFunc TickFunc + + stopChan chan struct{} + stopped bool +} + +func NewTicker(tickFunc TickFunc, interval Range[time.Duration]) *Ticker { + ticker := &Ticker{ + ticker: time.NewTicker(interval.Max), + tickSpeed: interval.Max, + interval: interval, + tickFunc: tickFunc, + stopChan: make(chan struct{}), + } + + go func() { + for { + select { + case <-ticker.ticker.C: + tickFunc() + case <-ticker.stopChan: + return + } + } + }() + + return ticker +} + +func (t *Ticker) CanIncreaseSpeed() { + t.tickSpeedMu.Lock() + defer t.tickSpeedMu.Unlock() + + oldTickSpeed := t.tickSpeed + t.tickSpeed = t.interval.Clamp(t.tickSpeed / 2) + + if oldTickSpeed == t.tickSpeed { + return + } + + log.Debug("telemetry: increasing flush speed to an interval of %s", t.tickSpeed) + t.ticker.Reset(t.tickSpeed) +} + +func (t *Ticker) CanDecreaseSpeed() { + t.tickSpeedMu.Lock() + defer t.tickSpeedMu.Unlock() + + oldTickSpeed := t.tickSpeed + t.tickSpeed = t.interval.Clamp(t.tickSpeed * 2) + + if oldTickSpeed == t.tickSpeed { + return + } + + log.Debug("telemetry: decreasing flush speed to an interval of %s", t.tickSpeed) + t.ticker.Reset(t.tickSpeed) +} + +func (t *Ticker) Stop() { + if t.stopped { + return + } + t.ticker.Stop() + t.stopChan <- struct{}{} + close(t.stopChan) + t.stopped = true +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/tracerconfig.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/tracerconfig.go new file mode 100644 index 00000000..439ff430 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/tracerconfig.go @@ -0,0 +1,16 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package internal + +// TracerConfig is the configuration for the tracer for the telemetry client. +type TracerConfig struct { + // Service is the name of the service being traced. + Service string + // Env is the environment the service is running in. + Env string + // Version is the version of the service. + Version string +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_closing.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_closing.go new file mode 100644 index 00000000..11516d34 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_closing.go @@ -0,0 +1,12 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +type AppClosing struct{} + +func (AppClosing) RequestType() RequestType { + return RequestTypeAppClosing +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_configuration_change.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_configuration_change.go new file mode 100644 index 00000000..d6ef75de --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_configuration_change.go @@ -0,0 +1,14 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +type AppClientConfigurationChange struct { + Configuration []ConfKeyValue `json:"configuration"` +} + +func (AppClientConfigurationChange) RequestType() RequestType { + return RequestTypeAppClientConfigurationChange +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_dependencies_loaded.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_dependencies_loaded.go new file mode 100644 index 00000000..425b01bb --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_dependencies_loaded.go @@ -0,0 +1,21 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +type AppDependenciesLoaded struct { + Dependencies []Dependency `json:"dependencies"` +} + +func (AppDependenciesLoaded) RequestType() RequestType { + return RequestTypeAppDependenciesLoaded +} + +// Dependency is a Go module on which the application depends. This information +// can be accesed at run-time through the runtime/debug.ReadBuildInfo API. +type Dependency struct { + Name string `json:"name"` + Version string `json:"version,omitempty"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_extended_heartbeat.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_extended_heartbeat.go new file mode 100644 index 00000000..62f0e5fe --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_extended_heartbeat.go @@ -0,0 +1,16 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +type AppExtendedHeartbeat struct { + Configuration []ConfKeyValue `json:"configuration,omitempty"` + Dependencies []Dependency `json:"dependencies,omitempty"` + Integrations []Integration `json:"integrations,omitempty"` +} + +func (AppExtendedHeartbeat) RequestType() RequestType { + return RequestTypeAppExtendedHeartBeat +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_heartbeat.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_heartbeat.go new file mode 100644 index 00000000..f2d4bab3 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_heartbeat.go @@ -0,0 +1,16 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +// All objects in this file are used to define the payload of the requests sent +// to the telemetry API. +// https://github.com/DataDog/instrumentation-telemetry-api-docs/tree/dad49961203d74ec8236b68ce4b54bbb7ed8716f/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas + +type AppHeartbeat struct{} + +func (AppHeartbeat) RequestType() RequestType { + return RequestTypeAppHeartbeat +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_integration_change.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_integration_change.go new file mode 100644 index 00000000..20244f03 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_integration_change.go @@ -0,0 +1,24 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +type AppIntegrationChange struct { + Integrations []Integration `json:"integrations"` +} + +func (AppIntegrationChange) RequestType() RequestType { + return RequestTypeAppIntegrationsChange +} + +// Integration is an integration that is configured to be traced automatically. +type Integration struct { + Name string `json:"name"` + Enabled bool `json:"enabled"` + Version string `json:"version,omitempty"` + AutoEnabled bool `json:"auto_enabled,omitempty"` + Compatible bool `json:"compatible,omitempty"` + Error string `json:"error,omitempty"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_product_change.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_product_change.go new file mode 100644 index 00000000..dcbf2ec6 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_product_change.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +type AppProductChange struct { + Products Products `json:"products"` +} + +type Products map[Namespace]Product + +func (AppProductChange) RequestType() RequestType { + return RequestTypeAppProductChange +} + +type Product struct { + Version string `json:"version,omitempty"` + Enabled bool `json:"enabled"` + Error Error `json:"error,omitempty"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_started.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_started.go new file mode 100644 index 00000000..3753d789 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/app_started.go @@ -0,0 +1,37 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +// All objects in this file are used to define the payload of the requests sent +// to the telemetry API. +// https://github.com/DataDog/instrumentation-telemetry-api-docs/tree/dad49961203d74ec8236b68ce4b54bbb7ed8716f/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas + +type AppStarted struct { + Products Products `json:"products,omitempty"` + Configuration []ConfKeyValue `json:"configuration,omitempty"` + Error Error `json:"error,omitempty"` + InstallSignature InstallSignature `json:"install_signature,omitempty"` + AdditionalPayload []AdditionalPayload `json:"additional_payload,omitempty"` +} + +func (AppStarted) RequestType() RequestType { + return RequestTypeAppStarted +} + +// InstallSignature is a structure to send the install signature with the +// AppStarted payload. +type InstallSignature struct { + InstallID string `json:"install_id,omitempty"` + InstallType string `json:"install_type,omitempty"` + InstallTime string `json:"install_time,omitempty"` +} + +// AdditionalPayload is a generic structure to send additional data with the +// AppStarted payload. +type AdditionalPayload struct { + Name RequestType `json:"name"` + Value Payload `json:"value"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/body.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/body.go new file mode 100644 index 00000000..85b15dfa --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/body.go @@ -0,0 +1,150 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package transport + +import ( + "encoding/json" + "fmt" +) + +// Application is identifying information about the app itself +type Application struct { + ServiceName string `json:"service_name"` + Env string `json:"env"` + ServiceVersion string `json:"service_version"` + TracerVersion string `json:"tracer_version"` + LanguageName string `json:"language_name"` + LanguageVersion string `json:"language_version"` + ProcessTags string `json:"process_tags,omitempty"` +} + +// Host is identifying information about the host on which the app +// is running +type Host struct { + Hostname string `json:"hostname"` + OS string `json:"os"` + OSVersion string `json:"os_version,omitempty"` + Architecture string `json:"architecture"` + KernelName string `json:"kernel_name"` + KernelRelease string `json:"kernel_release"` + KernelVersion string `json:"kernel_version"` +} + +// Body is the common high-level structure encapsulating a telemetry request body +// Described here: https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/main/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Request%20Bodies/telemetry_body.md +type Body struct { + APIVersion string `json:"api_version"` + RequestType RequestType `json:"request_type"` + TracerTime int64 `json:"tracer_time"` + RuntimeID string `json:"runtime_id"` + SeqID int64 `json:"seq_id"` + Debug bool `json:"debug,omitempty"` + Payload Payload `json:"payload"` + Application Application `json:"application"` + Host Host `json:"host"` +} + +// UnmarshalJSON is used to test the telemetry client end to end +func (b *Body) UnmarshalJSON(bytes []byte) error { + var anyMap map[string]json.RawMessage + var err error + if err = json.Unmarshal(bytes, &anyMap); err != nil { + return err + } + + if err = json.Unmarshal(anyMap["api_version"], &b.APIVersion); err != nil { + return err + } + + if err = json.Unmarshal(anyMap["request_type"], &b.RequestType); err != nil { + return err + } + + if err = json.Unmarshal(anyMap["tracer_time"], &b.TracerTime); err != nil { + return err + } + + if err = json.Unmarshal(anyMap["runtime_id"], &b.RuntimeID); err != nil { + return err + } + + if err = json.Unmarshal(anyMap["seq_id"], &b.SeqID); err != nil { + return err + } + + if _, ok := anyMap["debug"]; ok { + if err = json.Unmarshal(anyMap["debug"], &b.Debug); err != nil { + return err + } + } + + if err = json.Unmarshal(anyMap["application"], &b.Application); err != nil { + return err + } + + if err = json.Unmarshal(anyMap["host"], &b.Host); err != nil { + return err + } + + if b.RequestType == RequestTypeMessageBatch { + var messageBatch []struct { + RequestType RequestType `json:"request_type"` + Payload json.RawMessage `json:"payload"` + } + + if err = json.Unmarshal(anyMap["payload"], &messageBatch); err != nil { + return err + } + + batch := make([]Message, len(messageBatch)) + for i, message := range messageBatch { + payload, err := unmarshalPayload(message.Payload, message.RequestType) + if err != nil { + return err + } + batch[i] = Message{RequestType: message.RequestType, Payload: payload} + } + b.Payload = MessageBatch(batch) + return nil + } + + b.Payload, err = unmarshalPayload(anyMap["payload"], b.RequestType) + return err +} + +func unmarshalPayload(bytes json.RawMessage, requestType RequestType) (Payload, error) { + var payload Payload + switch requestType { + case RequestTypeAppClientConfigurationChange: + payload = new(AppClientConfigurationChange) + case RequestTypeAppProductChange: + payload = new(AppProductChange) + case RequestTypeAppIntegrationsChange: + payload = new(AppIntegrationChange) + case RequestTypeAppHeartbeat: + payload = new(AppHeartbeat) + case RequestTypeAppStarted: + payload = new(AppStarted) + case RequestTypeAppClosing: + payload = new(AppClosing) + case RequestTypeAppExtendedHeartBeat: + payload = new(AppExtendedHeartbeat) + case RequestTypeAppDependenciesLoaded: + payload = new(AppDependenciesLoaded) + case RequestTypeDistributions: + payload = new(Distributions) + case RequestTypeGenerateMetrics: + payload = new(GenerateMetrics) + case RequestTypeLogs: + payload = new(Logs) + } + + if err := json.Unmarshal(bytes, payload); err != nil { + return nil, fmt.Errorf("failed to unmarshal payload: %s", err.Error()) + } + + return payload, nil +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/conf_key_value.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/conf_key_value.go new file mode 100644 index 00000000..ea0eaf9b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/conf_key_value.go @@ -0,0 +1,18 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +// ConfKeyValue is a library-specific configuration value +type ConfKeyValue struct { + Name string `json:"name"` + Value any `json:"value"` // can be any type of integer, float, string, or boolean + Origin Origin `json:"origin"` + ID string `json:"config_id,omitempty"` + Error Error `json:"error,omitempty"` + + // SeqID is used to track the total number of configuration key value pairs applied across the tracer + SeqID uint64 `json:"seq_id,omitempty"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/distributions.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/distributions.go new file mode 100644 index 00000000..1dda0744 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/distributions.go @@ -0,0 +1,36 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +// All objects in this file are used to define the payload of the requests sent +// to the telemetry API. +// https://github.com/DataDog/instrumentation-telemetry-api-docs/tree/dad49961203d74ec8236b68ce4b54bbb7ed8716f/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas + +type Distributions struct { + Namespace Namespace `json:"namespace"` + Series []DistributionSeries `json:"series"` + SkipAllowlist bool `json:"skip_allowlist,omitempty"` +} + +func (Distributions) RequestType() RequestType { + return RequestTypeDistributions +} + +// DistributionSeries is a sequence of observations for a distribution metric. +// Unlike `MetricData`, DistributionSeries does not store timestamps in `Points` +type DistributionSeries struct { + Metric string `json:"metric"` + Points []float64 `json:"points"` + Tags []string `json:"tags,omitempty"` + // Common distinguishes metrics which are cross-language vs. + // language-specific. + // + // NOTE: If this field isn't present in the request, the API assumes + // the metric is common. So we can't "omitempty" even though the + // field is technically optional. + Common bool `json:"common,omitempty"` + Namespace Namespace `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/error.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/error.go new file mode 100644 index 00000000..b0de67be --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/error.go @@ -0,0 +1,12 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +// Error stores error information about various tracer events +type Error struct { + Code int `json:"code"` + Message string `json:"message"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/generate-metrics.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/generate-metrics.go new file mode 100644 index 00000000..220f93a9 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/generate-metrics.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +// All objects in this file are used to define the payload of the requests sent +// to the telemetry API. +// https://github.com/DataDog/instrumentation-telemetry-api-docs/tree/dad49961203d74ec8236b68ce4b54bbb7ed8716f/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas + +type GenerateMetrics struct { + Namespace Namespace `json:"namespace,omitempty"` + Series []MetricData `json:"series"` + SkipAllowlist bool `json:"skip_allowlist,omitempty"` +} + +func (GenerateMetrics) RequestType() RequestType { + return RequestTypeGenerateMetrics +} + +// MetricType is the type of metric being sent, either count, gauge, or rate +// distribution is another payload altogether +type MetricType string + +const ( + RateMetric MetricType = "rate" + CountMetric MetricType = "count" + GaugeMetric MetricType = "gauge" + DistMetric MetricType = "distribution" +) + +// MetricData is a sequence of observations for a single named metric. +type MetricData struct { + Metric string `json:"metric"` + // Points stores pairs of timestamps and values + // This first value should be an int64 timestamp and the second should be a float64 value + Points [][2]any `json:"points"` + // Interval is required only for gauge and rate metrics + Interval int64 `json:"interval,omitempty"` + // Type cannot be of type distribution because there is a different payload for it + Type MetricType `json:"type,omitempty"` + Tags []string `json:"tags,omitempty"` + + // Common distinguishes metrics which are cross-language vs. + // language-specific. + // + // NOTE: If this field isn't present in the request, the API assumes + // the metric is common. So we can't "omitempty" even though the + // field is technically optional. + Common bool `json:"common"` + Namespace Namespace `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/logs.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/logs.go new file mode 100644 index 00000000..c0cebe28 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/logs.go @@ -0,0 +1,31 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +type Logs struct { + Logs []LogMessage `json:"logs,omitempty"` +} + +func (Logs) RequestType() RequestType { + return RequestTypeLogs +} + +type LogLevel string + +const ( + LogLevelDebug LogLevel = "DEBUG" + LogLevelWarn LogLevel = "WARN" + LogLevelError LogLevel = "ERROR" +) + +type LogMessage struct { + Message string `json:"message"` + Level LogLevel `json:"level"` + Count uint32 `json:"count,omitempty"` + Tags string `json:"tags,omitempty"` // comma separated list of tags, e.g. "tag1:1,tag2:toto" + StackTrace string `json:"stack_trace,omitempty"` + TracerTime int64 `json:"tracer_time,omitempty"` // Unix timestamp in seconds +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/message_batch.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/message_batch.go new file mode 100644 index 00000000..d1f9f964 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/message_batch.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +type MessageBatch []Message + +func (MessageBatch) RequestType() RequestType { + return RequestTypeMessageBatch +} + +type Message struct { + RequestType RequestType `json:"request_type"` + Payload Payload `json:"payload"` +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/namespace.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/namespace.go new file mode 100644 index 00000000..02d94aca --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/namespace.go @@ -0,0 +1,20 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +type Namespace string + +const ( + NamespaceGeneral Namespace = "general" + NamespaceTracers Namespace = "tracers" + NamespaceProfilers Namespace = "profilers" + NamespaceAppSec Namespace = "appsec" + NamespaceIAST Namespace = "iast" + NamespaceTelemetry Namespace = "telemetry" + NamespaceCIVisibility Namespace = "civisibility" + NamespaceMLOps Namespace = "mlops" + NamespaceRUM Namespace = "rum" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/origin.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/origin.go new file mode 100644 index 00000000..3084d12c --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/origin.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +// Origin describes the source of a configuration change +type Origin string + +const ( + OriginDefault Origin = "default" + OriginCode Origin = "code" + OriginDDConfig Origin = "dd_config" + OriginEnvVar Origin = "env_var" + OriginRemoteConfig Origin = "remote_config" + OriginLocalStableConfig Origin = "local_stable_config" + OriginManagedStableConfig Origin = "fleet_stable_config" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/payload.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/payload.go new file mode 100644 index 00000000..1a4c0a1d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/payload.go @@ -0,0 +1,13 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +// Payload is the interface implemented by all telemetry top-level structures, AKA payloads. +// All structs are a strict representation of what is described in Instrumentation Telemetry v2 documentation schemas: +// https://github.com/DataDog/instrumentation-telemetry-api-docs/tree/dad49961203d74ec8236b68ce4b54bbb7ed8716f/GeneratedDocumentation/ApiDocs/v2/SchemaDocumentation/Schemas +type Payload interface { + RequestType() RequestType +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/requesttype.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/requesttype.go new file mode 100644 index 00000000..c1f1bd40 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport/requesttype.go @@ -0,0 +1,55 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package transport + +// RequestType determines how the Payload of a request should be handled +type RequestType string + +const ( + // RequestTypeAppStarted is the first message sent by the telemetry + // client, containing the configuration loaded at startup + RequestTypeAppStarted RequestType = "app-started" + + // RequestTypeAppHeartbeat is sent periodically by the client to indicate + // that the app is still running + RequestTypeAppHeartbeat RequestType = "app-heartbeat" + + // RequestTypeGenerateMetrics contains count, gauge, or rate metrics accumulated by the + // client, and is sent periodically along with the heartbeat + RequestTypeGenerateMetrics RequestType = "generate-metrics" + + // RequestTypeDistributions is to send distribution type metrics accumulated by the + // client, and is sent periodically along with the heartbeat + RequestTypeDistributions RequestType = "distributions" + + // RequestTypeAppClosing is sent when the telemetry client is stopped + RequestTypeAppClosing RequestType = "app-closing" + + // RequestTypeAppDependenciesLoaded is sent if DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED + // is enabled. Sent when Start is called for the telemetry client. + RequestTypeAppDependenciesLoaded RequestType = "app-dependencies-loaded" + + // RequestTypeAppClientConfigurationChange is sent if there are changes + // to the client library configuration + RequestTypeAppClientConfigurationChange RequestType = "app-client-configuration-change" + + // RequestTypeAppProductChange is sent when products are enabled/disabled + RequestTypeAppProductChange RequestType = "app-product-change" + + // RequestTypeAppIntegrationsChange is sent when the telemetry client starts + // with info on which integrations are used. + RequestTypeAppIntegrationsChange RequestType = "app-integrations-change" + + // RequestTypeMessageBatch is a wrapper over a list of payloads + RequestTypeMessageBatch RequestType = "message-batch" + + // RequestTypeAppExtendedHeartBeat This event will be used as a failsafe if there are any catastrophic data failure. + // The data will be used to reconstruct application records in our db. + RequestTypeAppExtendedHeartBeat RequestType = "app-extended-heartbeat" + + // RequestTypeLogs is used to send logs to the backend + RequestTypeLogs RequestType = "logs" +) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/writer.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/writer.go new file mode 100644 index 00000000..fc01dec2 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/writer.go @@ -0,0 +1,343 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package internal + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "runtime" + "sync" + "time" + + "github.com/DataDog/dd-trace-go/v2/internal" + "github.com/DataDog/dd-trace-go/v2/internal/globalconfig" + "github.com/DataDog/dd-trace-go/v2/internal/hostname" + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/osinfo" + "github.com/DataDog/dd-trace-go/v2/internal/processtags" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" + "github.com/DataDog/dd-trace-go/v2/internal/version" +) + +// We copy the transport to avoid using the default one, as it might be +// augmented with tracing and we don't want these calls to be recorded. +// See https://golang.org/pkg/net/http/#DefaultTransport . +var defaultHTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + Timeout: 5 * time.Second, +} + +func newBody(config TracerConfig, debugMode bool) *transport.Body { + osHostname, err := os.Hostname() + if err != nil { + osHostname = hostname.Get() + } + + if osHostname == "" { + osHostname = "unknown" // hostname field is not allowed to be empty + } + + return &transport.Body{ + APIVersion: "v2", + RuntimeID: globalconfig.RuntimeID(), + Debug: debugMode, + Application: transport.Application{ + ServiceName: config.Service, + Env: config.Env, + ServiceVersion: config.Version, + TracerVersion: version.Tag, + LanguageName: "go", + LanguageVersion: runtime.Version(), + ProcessTags: processtags.GlobalTags().String(), + }, + Host: transport.Host{ + Hostname: osHostname, + OS: osinfo.OSName(), + OSVersion: osinfo.OSVersion(), + Architecture: osinfo.Architecture(), + KernelName: osinfo.KernelName(), + KernelRelease: osinfo.KernelRelease(), + KernelVersion: osinfo.KernelVersion(), + }, + } +} + +// Writer is an interface that allows to send telemetry data to any endpoint that implements the instrumentation telemetry v2 API. +// The telemetry data is sent as a JSON payload as described in the API documentation. +type Writer interface { + // Flush does a synchronous call to the telemetry endpoint with the given payload. Thread-safe. + // It returns a non-empty [EndpointRequestResult] slice and a nil error if the payload was sent successfully. + // Otherwise, the error is a call to [errors.Join] on all errors that occurred. + Flush(transport.Payload) ([]EndpointRequestResult, error) +} + +// EndpointRequestResult is returned by the Flush method of the Writer interface. +type EndpointRequestResult struct { + // Error is the error that occurred when sending the payload to the endpoint. This is nil if the payload was sent successfully. + Error error + // PayloadByteSize is the number of bytes that were sent to the endpoint, zero if the payload was not sent. + PayloadByteSize int + // CallDuration is the duration of the call to the endpoint if the call was successful + CallDuration time.Duration + // StatusCode is the status code of the response from the endpoint even if the call failed but only with an actual HTTP error + StatusCode int +} + +type writer struct { + mu sync.Mutex + body *transport.Body + bodyMu sync.Mutex + httpClient *http.Client + endpoints []*http.Request +} + +type WriterConfig struct { + // TracerConfig is the configuration the tracer sent when the telemetry client was created (required) + TracerConfig + // Endpoints is a list of requests that will be used alongside the body of the telemetry data to create the requests to the telemetry endpoint (required to not be empty) + // The writer will try each endpoint in order until it gets a 2XX HTTP response from the server + Endpoints []*http.Request + // HTTPClient is the http client that will be used to send the telemetry data (defaults to a copy of [http.DefaultClient]) + HTTPClient *http.Client + // Debug is a flag that indicates whether the telemetry client is in debug mode (defaults to false) + Debug bool +} + +func NewWriter(config WriterConfig) (Writer, error) { + if len(config.Endpoints) == 0 { + return nil, fmt.Errorf("telemetry/writer: no endpoints provided") + } + + if config.HTTPClient == nil { + config.HTTPClient = defaultHTTPClient + } + + // Don't allow the client to have a timeout higher than 5 seconds + // This is to avoid blocking the client for too long in case of network issues + if config.HTTPClient.Timeout > 5*time.Second { + copyClient := *config.HTTPClient + config.HTTPClient = ©Client + config.HTTPClient.Timeout = 5 * time.Second + } + + body := newBody(config.TracerConfig, config.Debug) + endpoints := make([]*http.Request, len(config.Endpoints)) + for i, endpoint := range config.Endpoints { + endpoints[i] = preBakeRequest(body, endpoint) + } + + return &writer{ + body: body, + httpClient: config.HTTPClient, + endpoints: endpoints, + }, nil +} + +// preBakeRequest adds all the *static* headers that we already know at the time of the creation of the writer. +// This is useful to avoid querying too many things at the time of the request. +// Headers necessary are described here: +// https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/cf17b41a30fbf31d54e2cfbfc983875d58b02fe1/GeneratedDocumentation/ApiDocs/v2/overview.md#required-http-headers +func preBakeRequest(body *transport.Body, endpoint *http.Request) *http.Request { + clonedEndpoint := endpoint.Clone(context.Background()) + if clonedEndpoint.Header == nil { + clonedEndpoint.Header = make(http.Header, 11) + } + + for key, val := range map[string]string{ + "Content-Type": "application/json", + "DD-Telemetry-API-Version": body.APIVersion, + "DD-Client-Library-Language": body.Application.LanguageName, + "DD-Client-Library-Version": body.Application.TracerVersion, + "DD-Agent-Env": body.Application.Env, + "DD-Agent-Hostname": body.Host.Hostname, + "DD-Agent-Install-Id": globalconfig.InstrumentationInstallID(), + "DD-Agent-Install-Type": globalconfig.InstrumentationInstallType(), + "DD-Agent-Install-Time": globalconfig.InstrumentationInstallTime(), + "Datadog-Container-ID": internal.ContainerID(), + "Datadog-Entity-ID": internal.EntityID(), + // TODO: add support for Cloud provider/resource-type/resource-id headers in another PR and package + // Described here: https://github.com/DataDog/instrumentation-telemetry-api-docs/blob/cf17b41a30fbf31d54e2cfbfc983875d58b02fe1/GeneratedDocumentation/ApiDocs/v2/overview.md#setting-the-serverless-telemetry-headers + } { + if val == "" { + continue + } + clonedEndpoint.Header.Add(key, val) + } + + if body.Debug { + clonedEndpoint.Header.Add("DD-Telemetry-Debug-Enabled", "true") + } + + return clonedEndpoint +} + +// setPayloadToBody sets the payload to the body of the writer and misc fields that are necessary for the payload to be sent. +func (w *writer) setPayloadToBody(payload transport.Payload) { + w.bodyMu.Lock() + defer w.bodyMu.Unlock() + w.body.SeqID++ + w.body.TracerTime = time.Now().Unix() + w.body.RequestType = payload.RequestType() + w.body.Payload = payload +} + +// newRequest creates a new http.Request with the given payload and the necessary headers. +func (w *writer) newRequest(endpoint *http.Request, requestType transport.RequestType) *http.Request { + request := endpoint.Clone(context.Background()) + request.Header.Set("DD-Telemetry-Request-Type", string(requestType)) + + pipeReader, pipeWriter := io.Pipe() + request.Body = pipeReader + go func() { + var err error + defer func() { + // This should normally never happen but since we are encoding arbitrary data in client configuration values payload we need to be careful. + if panicValue := recover(); panicValue != nil { + log.Error("telemetry/writer: panic while encoding payload!") + if err == nil { + panicErr, ok := panicValue.(error) // check if we can use the panic value as an error + if ok { + log.Error("telemetry/writer: panic while encoding payload: %v", panicErr.Error()) + } + pipeWriter.CloseWithError(panicErr) // CloseWithError with nil as parameter is like Close() + } + } + }() + + // If a previous endpoint is still trying to marshall the body, we need to wait for it to realize the pipe is closed and exit. + w.bodyMu.Lock() + defer w.bodyMu.Unlock() + + // No need to wait on this because the http client will close the pipeReader which will close the pipeWriter and finish the goroutine + err = json.NewEncoder(pipeWriter).Encode(w.body) + pipeWriter.CloseWithError(err) + }() + + return request +} + +// SumReaderCloser is a ReadCloser that wraps another ReadCloser and counts the number of bytes read. +type SumReaderCloser struct { + io.ReadCloser + n int +} + +func (s *SumReaderCloser) Read(p []byte) (n int, err error) { + n, err = s.ReadCloser.Read(p) + s.n += n + return +} + +// WriterStatusCodeError is an error that is returned when the writer receives an unexpected status code from the server. +type WriterStatusCodeError struct { + Status string + Body string +} + +func (w *WriterStatusCodeError) Error() string { + return fmt.Sprintf("unexpected status code: %q (received body: %d bytes)", w.Status, len(w.Body)) +} + +func (w *writer) Flush(payload transport.Payload) ([]EndpointRequestResult, error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.setPayloadToBody(payload) + requestType := payload.RequestType() + + var results []EndpointRequestResult + for _, endpoint := range w.endpoints { + var ( + request = w.newRequest(endpoint, requestType) + sumReaderCloser = &SumReaderCloser{ReadCloser: request.Body} + now = time.Now() + ) + + request.Body = sumReaderCloser + response, err := w.httpClient.Do(request) + if err != nil { + results = append(results, EndpointRequestResult{Error: err}) + continue + } + + // We only have a few endpoints, so we can afford to keep the response body stream open until we are done with it + defer response.Body.Close() + + if response.StatusCode >= 300 || response.StatusCode < 200 { + respBodyBytes, _ := io.ReadAll(io.LimitReader(response.Body, 256)) // maybe we can find an error reason in the response body + results = append(results, EndpointRequestResult{Error: &WriterStatusCodeError{ + Status: response.Status, + Body: string(respBodyBytes), + }, StatusCode: response.StatusCode}) + continue + } + + results = append(results, EndpointRequestResult{ + PayloadByteSize: sumReaderCloser.n, + CallDuration: time.Since(now), + StatusCode: response.StatusCode, + }) + + // We succeeded, no need to try the other endpoints + break + } + + var err error + if results[len(results)-1].Error != nil { + var errs []error + for _, result := range results { + errs = append(errs, result.Error) + } + err = errors.Join(errs...) + } + + return results, err +} + +// RecordWriter is a Writer that stores the payloads in memory. Used for testing purposes +type RecordWriter struct { + mu sync.Mutex + payloads []transport.Payload +} + +func (w *RecordWriter) Flush(payload transport.Payload) ([]EndpointRequestResult, error) { + w.mu.Lock() + defer w.mu.Unlock() + w.payloads = append(w.payloads, payload) + return []EndpointRequestResult{ + { + PayloadByteSize: 1, + CallDuration: time.Nanosecond, + }, + }, nil +} + +func (w *RecordWriter) Payloads() []transport.Payload { + w.mu.Lock() + defer w.mu.Unlock() + copyPayloads := make([]transport.Payload, len(w.payloads)) + copy(copyPayloads, w.payloads) + return copyPayloads +} + +var _ Writer = (*RecordWriter)(nil) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/log/log.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/log/log.go new file mode 100644 index 00000000..cc23c2e8 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/log/log.go @@ -0,0 +1,54 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package log + +import ( + "fmt" + + internallog "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry" +) + +func divideArgs(args []any) ([]telemetry.LogOption, []any) { + if len(args) == 0 { + return nil, nil + } + + var options []telemetry.LogOption + var fmtArgs []any + for _, arg := range args { + if opt, ok := arg.(telemetry.LogOption); ok { + options = append(options, opt) + } else { + fmtArgs = append(fmtArgs, arg) + } + } + return options, fmtArgs +} + +// Debug sends a telemetry payload with a debug log message to the backend. +func Debug(format string, args ...any) { + log(telemetry.LogDebug, format, args) +} + +// Warn sends a telemetry payload with a warning log message to the backend and the console as a debug log. +func Warn(format string, args ...any) { + log(telemetry.LogWarn, format, args) +} + +// Error sends a telemetry payload with an error log message to the backend and the console as a debug log. +func Error(format string, args ...any) { + log(telemetry.LogError, format, args) +} + +func log(lvl telemetry.LogLevel, format string, args []any) { + opts, fmtArgs := divideArgs(args) + telemetry.Log(lvl, fmt.Sprintf(format, fmtArgs...), opts...) + + if lvl != telemetry.LogDebug { + internallog.Debug(format, fmtArgs...) //nolint:gocritic // Telemetry log plumbing needs to pass through variable format strings + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/logger.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/logger.go new file mode 100644 index 00000000..c3db3c02 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/logger.go @@ -0,0 +1,122 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package telemetry + +import ( + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/puzpuzpuz/xsync/v3" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +// WithTags returns a LogOption that sets the tags for the telemetry log message. Tags are key-value pairs that are then +// serialized into a simple "key:value,key2:value2" format. No quoting or escaping is performed. +func WithTags(tags []string) LogOption { + compiledTags := strings.Join(tags, ",") + return func(key *loggerKey, _ *loggerValue) { + if key == nil { + return + } + key.tags = compiledTags + } +} + +// WithStacktrace returns a LogOption that sets the stacktrace for the telemetry log message. The stacktrace is a string +// that is generated inside the WithStacktrace function. Logs demultiplication does not take the stacktrace into account. +// This means that a log that has been demultiplicated will only show of the first log. +func WithStacktrace() LogOption { + buf := make([]byte, 4_096) + buf = buf[:runtime.Stack(buf, false)] + return func(_ *loggerKey, value *loggerValue) { + if value == nil { + return + } + value.stacktrace = string(buf) + } +} + +type loggerKey struct { + tags string + message string + level LogLevel +} + +type loggerValue struct { + count atomic.Uint32 + stacktrace string + time int64 // Unix timestamp +} + +type logger struct { + store *xsync.MapOf[loggerKey, *loggerValue] + + distinctLogs atomic.Int32 + maxDistinctLogs int32 + onceMaxLogsReached sync.Once +} + +func (logger *logger) Add(level LogLevel, text string, opts ...LogOption) { + if logger.distinctLogs.Load() >= logger.maxDistinctLogs { + logger.onceMaxLogsReached.Do(func() { + logger.add(LogError, "telemetry: log count exceeded maximum, dropping log", WithStacktrace()) + }) + return + } + + logger.add(level, text, opts...) +} + +func (logger *logger) add(level LogLevel, text string, opts ...LogOption) { + key := loggerKey{ + message: text, + level: level, + } + + for _, opt := range opts { + opt(&key, nil) + } + + value, _ := logger.store.LoadOrCompute(key, func() *loggerValue { + value := &loggerValue{ + time: time.Now().Unix(), + } + for _, opt := range opts { + opt(nil, value) + } + logger.distinctLogs.Add(1) + return value + }) + + value.count.Add(1) +} + +func (logger *logger) Payload() transport.Payload { + logs := make([]transport.LogMessage, 0, logger.store.Size()+1) + logger.store.Range(func(key loggerKey, value *loggerValue) bool { + logger.store.Delete(key) + logger.distinctLogs.Add(-1) + logs = append(logs, transport.LogMessage{ + Message: key.message, + Level: key.level, + Tags: key.tags, + Count: value.count.Load(), + StackTrace: value.stacktrace, + TracerTime: value.time, + }) + return true + }) + + if len(logs) == 0 { + return nil + } + + return transport.Logs{Logs: logs} +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/metrichandle.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/metrichandle.go new file mode 100644 index 00000000..65e5644d --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/metrichandle.go @@ -0,0 +1,72 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package telemetry + +import ( + "math" + "sync" + "sync/atomic" + + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal" +) + +// noopMetricHandle is a no-op implementation of a metric handle. +type noopMetricHandle struct{} + +func (noopMetricHandle) Submit(_ float64) {} + +func (noopMetricHandle) Get() float64 { + return math.NaN() +} + +var metricLogLossOnce sync.Once + +// swappableMetricHandle is a MetricHandle that holds a pointer to another MetricHandle and a recorder to replay actions done before the actual MetricHandle is set. +type swappableMetricHandle struct { + ptr atomic.Pointer[MetricHandle] + recorder internal.Recorder[MetricHandle] + maker func(client Client) MetricHandle +} + +func (t *swappableMetricHandle) Submit(value float64) { + if Disabled() { + return + } + + inner := t.ptr.Load() + if inner == nil || *inner == nil { + if !t.recorder.Record(func(handle MetricHandle) { + handle.Submit(value) + }) { + metricLogLossOnce.Do(func() { + msg := "telemetry: metric is losing values because the telemetry client has not been started yet, dropping telemetry data, please start the telemetry client earlier to avoid data loss" + log.Debug("%s\n", msg) + Log(LogError, msg, WithStacktrace()) + }) + } + return + } + + (*inner).Submit(value) +} + +func (t *swappableMetricHandle) Get() float64 { + inner := t.ptr.Load() + if inner == nil || *inner == nil { + return 0 + } + + return (*inner).Get() +} + +func (t *swappableMetricHandle) swap(handle MetricHandle) { + if t.ptr.Swap(&handle) == nil { + t.recorder.Replay(handle) + } +} + +var _ MetricHandle = (*swappableMetricHandle)(nil) diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/metrics.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/metrics.go new file mode 100644 index 00000000..8715cd7b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/metrics.go @@ -0,0 +1,256 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package telemetry + +import ( + "fmt" + "math" + "sort" + "strings" + "sync/atomic" + "time" + + "github.com/puzpuzpuz/xsync/v3" + + "github.com/DataDog/dd-trace-go/v2/internal/log" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/knownmetrics" + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +// metricKey is used as a key in the metrics store hash map. +type metricKey struct { + namespace Namespace + kind transport.MetricType + name string + tags string +} + +func (k metricKey) SplitTags() []string { + if k.tags == "" { + return nil + } + return strings.Split(k.tags, ",") +} + +func (k metricKey) String() string { + return fmt.Sprintf("%s.%s.%s.%s", k.namespace, k.kind, k.name, k.tags) +} + +func validateMetricKey(namespace Namespace, kind transport.MetricType, name string, tags []string) error { + if len(name) == 0 { + return fmt.Errorf("metric name with tags %v should be empty", tags) + } + + if !knownmetrics.IsKnownMetric(namespace, kind, name) { + return fmt.Errorf("metric name %q of kind %q in namespace %q is not a known metric, please update the list of metric names running ./scripts/gen_known_metrics.sh or check that you wrote the name correctly. "+ + "The metric will still be sent", name, string(kind), namespace) + } + + for _, tag := range tags { + if len(tag) == 0 { + return fmt.Errorf("metric %q should not have empty tags", name) + } + + if strings.Contains(tag, ",") { + return fmt.Errorf("metric %q tag %q should not contain commas", name, tag) + } + } + + return nil +} + +// newMetricKey returns a new metricKey with the given parameters with the tags sorted and joined by commas. +func newMetricKey(namespace Namespace, kind transport.MetricType, name string, tags []string) metricKey { + sort.Strings(tags) + return metricKey{namespace: namespace, kind: kind, name: name, tags: strings.Join(tags, ",")} +} + +// metricsHandle is the internal equivalent of MetricHandle for Count/Rate/Gauge metrics that are sent via the payload [transport.GenerateMetrics]. +type metricHandle interface { + MetricHandle + Payload() transport.MetricData +} + +type metrics struct { + store *xsync.MapOf[metricKey, metricHandle] + skipAllowlist bool // Debugging feature to skip the allowlist of known metrics +} + +// LoadOrStore returns a MetricHandle for the given metric key. If the metric key does not exist, it will be created. +func (m *metrics) LoadOrStore(namespace Namespace, kind transport.MetricType, name string, tags []string) MetricHandle { + + var ( + key = newMetricKey(namespace, kind, name, tags) + handle MetricHandle + loaded bool + ) + switch kind { + case transport.CountMetric: + handle, loaded = m.store.LoadOrCompute(key, func() metricHandle { return &count{metric: metric{key: key}} }) + case transport.GaugeMetric: + handle, loaded = m.store.LoadOrCompute(key, func() metricHandle { return &gauge{metric: metric{key: key}} }) + case transport.RateMetric: + handle, loaded = m.store.LoadOrCompute(key, func() metricHandle { + rate := &rate{count: count{metric: metric{key: key}}} + now := time.Now() + rate.intervalStart.Store(&now) + return rate + }) + default: + log.Warn("telemetry: unknown metric type %q", kind) + return nil + } + + if !loaded && !m.skipAllowlist { // The metric is new: validate and log issues about it + if err := validateMetricKey(namespace, kind, name, tags); err != nil { + log.Warn("telemetry: %s", err.Error()) + } + } + + return handle +} + +func (m *metrics) Payload() transport.Payload { + series := make([]transport.MetricData, 0, m.store.Size()) + m.store.Range(func(_ metricKey, handle metricHandle) bool { + if payload := handle.Payload(); payload.Type != "" { + series = append(series, payload) + } + return true + }) + + if len(series) == 0 { + return nil + } + + return transport.GenerateMetrics{Series: series, SkipAllowlist: m.skipAllowlist} +} + +type metricPoint struct { + value float64 + time time.Time +} + +// metric is a meta t +type metric struct { + key metricKey + ptr atomic.Pointer[metricPoint] +} + +func (m *metric) Get() float64 { + if ptr := m.ptr.Load(); ptr != nil { + return ptr.value + } + + return math.NaN() +} + +func (m *metric) Payload() transport.MetricData { + point := m.ptr.Swap(nil) + if point == nil { + return transport.MetricData{} + } + return m.payload(point) +} + +func (m *metric) payload(point *metricPoint) transport.MetricData { + if point == nil { + return transport.MetricData{} + } + + return transport.MetricData{ + Metric: m.key.name, + Namespace: m.key.namespace, + Tags: m.key.SplitTags(), + Type: m.key.kind, + Common: knownmetrics.IsCommonMetric(m.key.namespace, m.key.kind, m.key.name), + Points: [][2]any{ + {point.time.Unix(), point.value}, + }, + } +} + +// count is a metric that represents a single value that is incremented over time and flush and reset at zero when flushed +type count struct { + metric +} + +func (m *count) Submit(newValue float64) { + newPoint := new(metricPoint) + newPoint.time = time.Now() + for { + oldPoint := m.ptr.Load() + var oldValue float64 + if oldPoint != nil { + oldValue = oldPoint.value + } + newPoint.value = oldValue + newValue + if m.ptr.CompareAndSwap(oldPoint, newPoint) { + return + } + } +} + +// gauge is a metric that represents a single value at a point in time that is not incremental +type gauge struct { + metric +} + +func (g *gauge) Submit(value float64) { + newPoint := new(metricPoint) + newPoint.time = time.Now() + newPoint.value = value + for { + oldPoint := g.ptr.Load() + if g.ptr.CompareAndSwap(oldPoint, newPoint) { + return + } + } +} + +// rate is like a count metric but the value sent is divided by an interval of time that is also sent/ +type rate struct { + count + intervalStart atomic.Pointer[time.Time] +} + +func (r *rate) Get() float64 { + sum := r.count.Get() + intervalStart := r.intervalStart.Load() + if intervalStart == nil { + return math.NaN() + } + + intervalSeconds := time.Since(*intervalStart).Seconds() + if int64(intervalSeconds) == 0 { // Interval for rate is too small, we prefer not sending data over sending something wrong + return math.NaN() + } + + return sum / intervalSeconds +} + +func (r *rate) Payload() transport.MetricData { + now := time.Now() + intervalStart := r.intervalStart.Swap(&now) + if intervalStart == nil { + return transport.MetricData{} + } + + intervalSeconds := time.Since(*intervalStart).Seconds() + if int64(intervalSeconds) == 0 { // Interval for rate is too small, we prefer not sending data over sending something wrong + return transport.MetricData{} + } + + point := r.ptr.Swap(nil) + if point == nil { + return transport.MetricData{} + } + + point.value /= intervalSeconds + payload := r.metric.payload(point) + payload.Interval = int64(intervalSeconds) + return payload +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/product.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/product.go new file mode 100644 index 00000000..16642874 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/product.go @@ -0,0 +1,51 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package telemetry + +import ( + "sync" + + "github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/transport" +) + +type products struct { + mu sync.Mutex + products map[Namespace]transport.Product +} + +func (p *products) Add(namespace Namespace, enabled bool, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.products == nil { + p.products = make(map[Namespace]transport.Product) + } + + product := transport.Product{ + Enabled: enabled, + } + + if err != nil { + product.Error = transport.Error{ + Message: err.Error(), + } + } + + p.products[namespace] = product +} + +func (p *products) Payload() transport.Payload { + p.mu.Lock() + defer p.mu.Unlock() + if len(p.products) == 0 { + return nil + } + + res := transport.AppProductChange{ + Products: p.products, + } + p.products = nil + return res +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/trace_context.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/trace_context.go similarity index 97% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/trace_context.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/trace_context.go index 78620b69..57551220 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/trace_context.go +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/trace_context.go @@ -8,7 +8,7 @@ package internal import ( "context" - "gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion" + "github.com/DataDog/dd-trace-go/v2/internal/orchestrion" ) type executionTracedKey struct{} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/trace_source.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/trace_source.go new file mode 100644 index 00000000..3c92bf31 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/trace_source.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package internal + +import ( + "fmt" + "strconv" + + "github.com/DataDog/dd-trace-go/v2/internal/log" +) + +// TraceSource represents the 8-bit bitmask for the _dd.p.ts tag +type TraceSource uint8 + +const ( + APMTraceSource TraceSource = 0x01 + ASMTraceSource TraceSource = 0x02 + DSMTraceSource TraceSource = 0x04 + DJMTraceSource TraceSource = 0x08 + DBMTraceSource TraceSource = 0x10 +) + +// String converts the bitmask to a two-character hexadecimal string +func (ts TraceSource) String() string { + return fmt.Sprintf("%02X", uint8(ts)) +} + +// ParseTraceSource parses a hexadecimal string into a TraceSource bitmask +func ParseTraceSource(hexStr string) (TraceSource, error) { + // Ensure at least 2 chars, allowing up to 8 for forward compatibility (32-bit) + if len(hexStr) < 2 || len(hexStr) > 8 { + return 0, fmt.Errorf("invalid length for TraceSource mask, expected 2 to 8 characters") + } + + // Parse the full mask as a 32-bit unsigned integer + value, err := strconv.ParseUint(hexStr, 16, 32) + if err != nil { + return 0, fmt.Errorf("invalid hexadecimal format: %w", err) + } + + // Extract only the least significant 8 bits (ensuring compliance with 8-bit mask) + return TraceSource(value & 0xFF), nil +} + +func VerifyTraceSourceEnabled(hexStr string, target TraceSource) bool { + ts, err := ParseTraceSource(hexStr) + if err != nil { + if len(hexStr) != 0 { // Empty trace source should not trigger an error log. + log.Error("invalid trace-source hex string given for source verification: %s", err.Error()) + } + return false + } + + return ts.IsSet(target) +} + +// Set enables specific TraceSource (bit) in the bitmask +func (ts *TraceSource) Set(src TraceSource) { + *ts |= src +} + +// Unset disables specific TraceSource (bit) in the bitmask +func (ts *TraceSource) Unset(src TraceSource) { + *ts &^= src +} + +// IsSet checks if a specific TraceSource (bit) is enabled +func (ts TraceSource) IsSet(src TraceSource) bool { + return ts&src != 0 +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/endpoint_counter.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/traceprof/endpoint_counter.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/endpoint_counter.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/traceprof/endpoint_counter.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/profiler.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/traceprof/profiler.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/profiler.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/traceprof/profiler.go diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/traceprof.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/traceprof/traceprof.go similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof/traceprof.go rename to vendor/github.com/DataDog/dd-trace-go/v2/internal/traceprof/traceprof.go diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/tracer.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/tracer.go new file mode 100644 index 00000000..da7a4c5f --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/tracer.go @@ -0,0 +1,20 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package internal + +import "sync/atomic" + +var ( + tracerInit atomic.Bool +) + +func SetTracerInitialized(val bool) { + tracerInit.Store(val) +} + +func TracerInitialized() bool { + return tracerInit.Load() +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/uds.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/uds.go new file mode 100644 index 00000000..4f549490 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/uds.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025 Datadog, Inc. + +package internal + +import ( + "fmt" + "net/url" + "strings" +) + +func UnixDataSocketURL(path string) *url.URL { + return &url.URL{ + Scheme: "http", + Host: fmt.Sprintf("UDS_%s", strings.NewReplacer(":", "_", "/", "_", `\`, "_").Replace(path)), + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/urlsanitizer/sanitizer.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/urlsanitizer/sanitizer.go new file mode 100644 index 00000000..11cf8a64 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/urlsanitizer/sanitizer.go @@ -0,0 +1,70 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/) +// Copyright 2016 Datadog, Inc. + +// Package urlsanitizer provides utilities for sanitizing URLs and DSNs by removing sensitive information. +package urlsanitizer + +import ( + "net/url" + "strings" +) + +// SanitizeURL removes user credentials from URLs for safe logging. +// It uses Go's built-in url.Redacted() when possible, which preserves usernames but redacts passwords. +// If the URL can't be parsed but appears to contain credentials, it's fully redacted for security. +func SanitizeURL(rawURL string) string { + if rawURL == "" { + return rawURL + } + + parsedURL, err := url.Parse(rawURL) + if err != nil { + // If parsing fails but we suspect credentials, + // redact entirely for safety. + if containsCredentials(rawURL) { + return "[REDACTED_URL_WITH_CREDENTIALS]" + } + // If no credentials suspected, + // return as-is (might just be a malformed URL without sensitive data) + return rawURL + } + + // If URL has user info, + // use Go's built-in redaction (preserves username, redacts password). + if parsedURL.User != nil { + return parsedURL.Redacted() + } + + // No credentials detected, return as-is + return rawURL +} + +// containsCredentials checks if a URL string might contain credentials +func containsCredentials(rawURL string) bool { + // Look for patterns that suggest credentials: username:password@host + // This is a simple heuristic - if we see :...@ we assume credentials. + if !strings.Contains(rawURL, "://") { + return false + } + + // Find the scheme part. + schemeEnd := strings.Index(rawURL, "://") + if schemeEnd == -1 { + return false + } + + // Look in the part after the scheme for credentials. + rest := rawURL[schemeEnd+3:] + + // If we see a colon followed by an @ sign, likely credentials. + colonIndex := strings.Index(rest, ":") + if colonIndex == -1 { + return false + } + + // Check if there's an @ after the colon + atIndex := strings.Index(rest[colonIndex:], "@") + return atIndex != -1 +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/utils.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/utils.go new file mode 100644 index 00000000..9c18108b --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/utils.go @@ -0,0 +1,152 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package internal + +import ( + "sync" + "sync/atomic" + + "github.com/DataDog/dd-trace-go/v2/internal/samplernames" + xsync "github.com/puzpuzpuz/xsync/v3" +) + +// OtelTagsDelimeter is the separator between key-val pairs for OTEL env vars +const OtelTagsDelimeter = "=" + +// DDTagsDelimiter is the separator between key-val pairs for DD env vars +const DDTagsDelimiter = ":" + +// LockMap uses an RWMutex to synchronize map access to allow for concurrent access. +// This should not be used for cases with heavy write load and performance concerns. +type LockMap struct { + sync.RWMutex + c uint32 + m map[string]string +} + +func NewLockMap(m map[string]string) *LockMap { + return &LockMap{m: m, c: uint32(len(m))} +} + +// Iter iterates over all the map entries passing in keys and values to provided func f. Note this is READ ONLY. +func (l *LockMap) Iter(f func(key string, val string)) { + c := atomic.LoadUint32(&l.c) + if c == 0 { //Fast exit to avoid the cost of RLock/RUnlock for empty maps + return + } + l.RLock() + defer l.RUnlock() + for k, v := range l.m { + f(k, v) + } +} + +func (l *LockMap) Len() int { + l.RLock() + defer l.RUnlock() + return len(l.m) +} + +func (l *LockMap) Clear() { + l.Lock() + defer l.Unlock() + l.m = map[string]string{} + atomic.StoreUint32(&l.c, 0) +} + +func (l *LockMap) Set(k, v string) { + l.Lock() + defer l.Unlock() + if _, ok := l.m[k]; !ok { + atomic.AddUint32(&l.c, 1) + } + l.m[k] = v +} + +func (l *LockMap) Get(k string) string { + l.RLock() + defer l.RUnlock() + return l.m[k] +} + +// XSyncMapCounterMap uses xsync protect counter increments and reads during +// concurrent access. +// Implementation and related tests were taken/inspired by felixge/countermap +// https://github.com/felixge/countermap/pull/2 +type XSyncMapCounterMap struct { + counts *xsync.MapOf[string, *xsync.Counter] +} + +func NewXSyncMapCounterMap() *XSyncMapCounterMap { + return &XSyncMapCounterMap{counts: xsync.NewMapOf[string, *xsync.Counter]()} +} + +func (cm *XSyncMapCounterMap) Inc(key string) { + val, ok := cm.counts.Load(key) + if !ok { + val, _ = cm.counts.LoadOrStore(key, xsync.NewCounter()) + } + val.Inc() +} + +func (cm *XSyncMapCounterMap) GetAndReset() map[string]int64 { + ret := map[string]int64{} + cm.counts.Range(func(key string, _ *xsync.Counter) bool { + v, ok := cm.counts.LoadAndDelete(key) + if ok { + ret[key] = v.Value() + } + return true + }) + return ret +} + +// ToFloat64 attempts to convert value into a float64. If the value is an integer +// greater or equal to 2^53 or less than or equal to -2^53, it will not be converted +// into a float64 to avoid losing precision. If it succeeds in converting, toFloat64 +// returns the value and true, otherwise 0 and false. +func ToFloat64(value any) (f float64, ok bool) { + const maxFloat = (int64(1) << 53) - 1 + const minFloat = -maxFloat + // If any other type is added here, remember to add it to the type switch in + // the `span.SetTag` function to handle pointers to these supported types. + switch i := value.(type) { + case byte: + return float64(i), true + case float32: + return float64(i), true + case float64: + return i, true + case int: + return float64(i), true + case int8: + return float64(i), true + case int16: + return float64(i), true + case int32: + return float64(i), true + case int64: + if i > maxFloat || i < minFloat { + return 0, false + } + return float64(i), true + case uint: + return float64(i), true + case uint16: + return float64(i), true + case uint32: + return float64(i), true + case uint64: + if i > uint64(maxFloat) { + return 0, false + } + return float64(i), true + case samplernames.SamplerName: + return float64(i), true + default: + return 0, false + } +} diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/version/version.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/version/version.go new file mode 100644 index 00000000..b8a01031 --- /dev/null +++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/version/version.go @@ -0,0 +1,117 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package version + +import ( + "runtime/debug" + "strconv" + "strings" + "sync" + + "github.com/Masterminds/semver/v3" +) + +// Tag specifies the current release tag. It needs to be manually +// updated. A test checks that the value of Tag never points to a +// git tag that is older than HEAD. +var Tag = "v2.3.0" + +type v1version struct { + Transitional bool + Version string +} + +var v1Tag *v1version + +// Dissected version number. Filled during init() +var ( + // Major is the current major version number + Major int + // Minor is the current minor version number + Minor int + // Patch is the current patch version number + Patch int + // RC is the current release candidate version number + RC int + // once is used to ensure that the v1 version is only found once + once sync.Once +) + +func FindV1Version() (string, bool, bool) { + once.Do(func() { + info, ok := debug.ReadBuildInfo() + if !ok { + return + } + v1Tag = findV1Version(info.Deps) + }) + if v1Tag == nil { + return "", false, false + } + return v1Tag.Version, v1Tag.Transitional, true +} + +func init() { + // Check if we are using a transitional v1.74.x or later version + vt, _, found := FindV1Version() + if found { + Tag = vt + } + v := parseVersion(Tag) + Major, Minor, Patch, RC = v.Major, v.Minor, v.Patch, v.RC +} + +func findV1Version(deps []*debug.Module) *v1version { + var version string + for _, dep := range deps { + if dep.Path != "gopkg.in/DataDog/dd-trace-go.v1" { + continue + } + version = dep.Version + break + } + if version == "" { + return nil + } + vt := &v1version{ + Version: version, + } + v := parseVersion(vt.Version) + if v.Major == 1 && v.Minor >= 74 { + vt.Transitional = true + } + return vt +} + +type version struct { + Major int + Minor int + Patch int + RC int +} + +func parseVersion(value string) version { + var ( + parsedVersion = semver.MustParse(value) + v = version{ + Major: int(parsedVersion.Major()), + Minor: int(parsedVersion.Minor()), + Patch: int(parsedVersion.Patch()), + } + ) + + pr := parsedVersion.Prerelease() + if pr == "" || pr == "dev" { + return v + } + + split := strings.Split(pr, ".") + if len(split) > 1 { + v.RC, _ = strconv.Atoi(split[1]) + } + + return v +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/LICENSE b/vendor/github.com/DataDog/go-libddwaf/v3/LICENSE deleted file mode 100644 index 9301dd7a..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/LICENSE +++ /dev/null @@ -1,200 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-present Datadog, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/README.md b/vendor/github.com/DataDog/go-libddwaf/v3/README.md deleted file mode 100644 index 56607916..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# go-libddwaf - -This project's goal is to produce a higher level API for the go bindings to [libddwaf](https://github.com/DataDog/libddwaf): DataDog in-app WAF. -It consists of 2 separate entities: the bindings for the calls to libddwaf, and the encoder which job is to convert _any_ go value to its libddwaf object representation. - -An example usage would be: - -```go -import waf "github.com/DataDog/go-libddwaf/v3" - -//go:embed -var ruleset []byte - -func main() { - var parsedRuleset any - - if err := json.Unmarshal(ruleset, &parsedRuleset); err != nil { - panic(err) - } - - wafHandle, err := waf.NewHandle(parsedRuleset, "", "") - if err != nil { - panic(err) - } - - defer wafHandle.Close() - - wafCtx := wafHandle.NewContext() - defer wafCtx.Close() - - matches, actions := wafCtx.Run(RunAddressData{ - Persistent: map[string]any{ - "server.request.path_params": "/rfiinc.txt", - }, - }) -} -``` - -The API documentation details can be found on [pkg.go.dev](https://pkg.go.dev/github.com/DataDog/go-libddwaf/v3). - -Originally this project was only here to provide CGO Wrappers to the calls to libddwaf. -But with the appearance of `ddwaf_object` tree like structure, -but also with the intention to build CGO-less bindings, this project size has grown to be a fully integrated brick in the DataDog tracer structure. -Which in turn made it necessary to document the project, to maintain it in an orderly fashion. - -## Supported platforms - -This library currently support the following platform doublets: - -| OS | Arch | -| ----- | ------- | -| Linux | amd64 | -| Linux | aarch64 | -| OSX | amd64 | -| OSX | arm64 | - -This means that when the platform is not supported, top-level functions will return a `WafDisabledError` error including the purpose of it. - -Note that: -* Linux support include for glibc and musl variants -* OSX under 10.9 is not supported -* A build tag named `datadog.no_waf` can be manually added to force the WAF to be disabled. - -## Design - -The WAF bindings have multiple moving parts that are necessary to understand: - -- Handle: a object wrapper over the pointer to the C WAF Handle -- Context: a object wrapper over a pointer to the C WAF Context -- Encoder: its goal is to construct a tree of Waf Objects to send to the WAF -- CGORefPool: Does all allocation operations for the construction of Waf Objects and keeps track of the equivalent go pointers -- Decoder: Transforms Waf Objects returned from the WAF to usual go objects (e.g. maps, arrays, ...) -- Library: The low-level go bindings to the C library, providing improved typing - -```mermaid -flowchart LR - - START:::hidden -->|NewHandle| Handle -->|NewContext| Context - - Context -->|Encode Inputs| Encoder - - Handle -->|Encode Ruleset| Encoder - Handle -->|Init WAF| Library - Context -->|Decode Result| Decoder - - Handle -->|Decode Init Errors| Decoder - - Context -->|Run| Library - Context -->|Store Go References| CGORefPool - - Encoder -->|Allocate Waf Objects| TempCGORefPool - - TempCGORefPool -->|Copy after each encoding| CGORefPool - - Library -->|Call C code| libddwaf - - classDef hidden display: none; -``` - -### CGO Reference Pool - -The cgoRefPool type is a pure Go pointer pool of `ddwaf_object` C values on the Go memory heap. -the `cgoRefPool` go type is a way to make sure we can safely send Go allocated data to the C side of the WAF -The main issue is the following: the `WafObject` uses a C union to store the tree structure of the full object, -union equivalent in go are interfaces and they are not compatible with C unions. The only way to be 100% sure -that the Go `WafObject` struct has the same layout as the C one is to only use primitive types. So the only way to -store a raw pointer is to use the `uintptr` type. But since `uintptr` do not have pointer semantics (and are just -basically integers), we need another method to store the value as Go pointer because the GC will delete our data if it -is not referenced by Go pointers. - -That's where the `cgoRefPool` object comes into play: all new `WafObject` elements are created via this API which is especially -built to make sure there is no gap for the Garbage Collector to exploit. From there, since underlying values of the -`wafObject` are either arrays of WafObjects (for maps, structs and arrays) or string (for all ints, booleans and strings), -we can store 2 slices of arrays and use `unsafe.KeepAlive` in each code path to protect them from the GC. - -All these objects stored in the reference pool need to live throughout the use of the associated Waf Context. - -### Typical call to Run() - -Here is an example of the flow of operations on a simple call to Run(): - -- Encode input data into WAF Objects and store references in the temporary pool -- Lock the context mutex until the end of the call -- Store references from the temporary pool into the context level pool -- Call `ddwaf_run` -- Decode the matches and actions - -### CGO-less C Bindings - -This library uses [purego](https://github.com/ebitengine/purego) to implement C bindings without requiring use of CGO at compilation time. The high-level workflow -is to embed the C shared library using `go:embed`, dump it into a file, open the library using `dlopen`, load the -symbols using `dlsym`, and finally call them. - -> :warning: Keep in mind that **purego only works on linux/darwin for amd64/arm64 and so does go-libddwaf.** - -Another requirement of `libddwaf` is to have a FHS filesystem on your machine and, for linux, to provide `libc.so.6`, -`libpthread.so.0`, and `libdl.so.2` as dynamic libraries. - -## Contributing pitfalls - -- Cannot dlopen twice in the app lifetime on OSX. It messes with Thread Local Storage and usually finishes with a `std::bad_alloc()` -- `keepAlive()` calls are here to prevent the GC from destroying objects too early -- Since there is a stack switch between the Go code and the C code, usually the only C stacktrace you will ever get is from GDB -- If a segfault happens during a call to the C code, the goroutine stacktrace which has done the call is the one annotated with `[syscall]` -- [GoLand](https://www.jetbrains.com/go/) does not support `CGO_ENABLED=0` (as of June 2023) -- Keep in mind that we fully escape the type system. If you send the wrong data it will segfault in the best cases but not always! -- The structs in `ctypes.go` are here to reproduce the memory layout of the structs in `include/ddwaf.h` because pointers to these structs will be passed directly -- Do not use `uintptr` as function arguments or results types, coming from `unsafe.Pointer` casts of Go values, because they escape the pointer analysis which can create wrongly optimized code and crash. Pointer arithmetic is of course necessary in such a library but must be kept in the same function scope. -- GDB is available on arm64 but is not officially supported so it usually crashes pretty fast (as of June 2023) -- No pointer to variables on the stack shall be sent to the C code because Go stacks can be moved during the C call. More on this [here](https://medium.com/@trinad536/escape-analysis-in-golang-fc81b78f3550) diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/cgo_ref_pool.go b/vendor/github.com/DataDog/go-libddwaf/v3/cgo_ref_pool.go deleted file mode 100644 index 9de4c2fb..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/cgo_ref_pool.go +++ /dev/null @@ -1,106 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package waf - -import ( - "strconv" - - "github.com/DataDog/go-libddwaf/v3/internal/bindings" - "github.com/DataDog/go-libddwaf/v3/internal/unsafe" -) - -// cgoRefPool is a way to make sure we can safely send go allocated data on the C side of the WAF -// The main issue is the following: the wafObject uses a C union to store the tree structure of the full object, -// union equivalent in go are interfaces and they are not compatible with C unions. The only way to be 100% sure -// that the Go wafObject struct have the same layout as the C one is to only use primitive types. So the only way to -// store a raw pointer is to use the uintptr type. But since uintptr do not have pointer semantics (and are just -// basically integers), we need another structure to store the value as Go pointer because the GC is lurking. That's -// where the cgoRefPool object comes into play: All new wafObject elements are created via this API whose especially -// built to make sure there is no gap for the Garbage Collector to exploit. From there, since underlying values of the -// wafObject are either arrays (for maps, structs and arrays) or string (for all ints, booleans and strings), -// we can store 2 slices of arrays and use runtime.KeepAlive in each code path to protect them from the GC. -type cgoRefPool struct { - stringRefs []string - arrayRefs [][]bindings.WafObject -} - -// This is used when passing empty Go strings to the WAF in order to avoid passing null string pointers, -// which are not handled by the WAF in all cases. -// FIXME: to be removed when the WAF handles null ptr strings in all expected places -var emptyWAFStringValue = unsafe.NativeStringUnwrap("\x00").Data - -func (refPool *cgoRefPool) append(newRefs cgoRefPool) { - refPool.stringRefs = append(refPool.stringRefs, newRefs.stringRefs...) - refPool.arrayRefs = append(refPool.arrayRefs, newRefs.arrayRefs...) -} - -// AllocCString is used in the rare cases where we need the WAF to receive standard null-terminated strings. -// All cases where strings a wrapped in wafObject are handled by AllocWafString -func (refPool *cgoRefPool) AllocCString(str string) uintptr { - if len(str) > 0 && str[len(str)-1] != '\x00' { - str = str + "\x00" - } - - refPool.stringRefs = append(refPool.stringRefs, str) - return unsafe.NativeStringUnwrap(str).Data -} - -// AllocWafString fills the obj parameter wafObject with all parameters needed for the WAF interpret it as a string. -// We take full advantage of the fact that the WAF can receive non-null-terminated strings by directly retrieving the -// underlying array in the string value using the nativeStringUnwrap function. Hence, removing any copy in the process -func (refPool *cgoRefPool) AllocWafString(obj *bindings.WafObject, str string) { - obj.Type = bindings.WafStringType - - if len(str) == 0 { - obj.NbEntries = 0 - // FIXME: use obj.Value = 0 when the WAF handles null string ptr in all expected places - obj.Value = emptyWAFStringValue - return - } - - refPool.stringRefs = append(refPool.stringRefs, str) - stringHeader := unsafe.NativeStringUnwrap(str) - obj.Value = stringHeader.Data - obj.NbEntries = uint64(stringHeader.Len) -} - -// AllocWafArray is used to create a tree-like structure since we allocate a wafObject array inside another wafOject. -// wafObject can also represent a map, in that case we use the AllocWafMapKey function to make the wafObject key-value-pair -// like objects. -func (refPool *cgoRefPool) AllocWafArray(obj *bindings.WafObject, typ bindings.WafObjectType, size uint64) []bindings.WafObject { - if typ != bindings.WafMapType && typ != bindings.WafArrayType { - panic("Cannot allocate this waf object data type as an array: " + strconv.Itoa(int(typ))) - } - - obj.Type = typ - obj.NbEntries = size - - // If the array size is zero no need to allocate anything - if size == 0 { - obj.Value = 0 - return nil - } - - goArray := make([]bindings.WafObject, size) - refPool.arrayRefs = append(refPool.arrayRefs, goArray) - - obj.Value = unsafe.SliceToUintptr(goArray) - return goArray -} - -// AllocWafMapKey is used to store a string map key in a wafObject. -// We take full advantage of the fact that the WAF can receive non-null-terminated strings by directly retrieving the -// underlying array in the string value using the nativeStringUnwrap function. Hence, removing any copy in the process -func (refPool *cgoRefPool) AllocWafMapKey(obj *bindings.WafObject, str string) { - if len(str) == 0 { - return - } - - refPool.stringRefs = append(refPool.stringRefs, str) - stringHeader := unsafe.NativeStringUnwrap(str) - obj.ParameterName = stringHeader.Data - obj.ParameterNameLength = uint64(stringHeader.Len) -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/context.go b/vendor/github.com/DataDog/go-libddwaf/v3/context.go deleted file mode 100644 index a6dc7677..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/context.go +++ /dev/null @@ -1,299 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package waf - -import ( - "sync" - "time" - - "github.com/DataDog/go-libddwaf/v3/errors" - "github.com/DataDog/go-libddwaf/v3/internal/bindings" - "github.com/DataDog/go-libddwaf/v3/internal/unsafe" - "github.com/DataDog/go-libddwaf/v3/timer" - - "sync/atomic" -) - -// Context is a WAF execution context. It allows running the WAF incrementally -// when calling it multiple times to run its rules every time new addresses -// become available. Each request must have its own Context. -type Context struct { - handle *Handle // Instance of the WAF - - cgoRefs cgoRefPool // Used to retain go data referenced by WAF Objects the context holds - cContext bindings.WafContext // The C ddwaf_context pointer - - timeoutCount atomic.Uint64 // Cumulative timeout count for this context. - - // Mutex protecting the use of cContext which is not thread-safe and cgoRefs. - mutex sync.Mutex - - // timer registers the time spent in the WAF and go-libddwaf - timer timer.NodeTimer - - // metrics stores the cumulative time spent in various parts of the WAF - metrics metricsStore - - // truncations provides details about truncations that occurred while - // encoding address data for WAF execution. - truncations map[TruncationReason][]int -} - -// RunAddressData provides address data to the Context.Run method. If a given key is present in both -// RunAddressData.Persistent and RunAddressData.Ephemeral, the value from RunAddressData.Persistent will take precedence. -type RunAddressData struct { - // Persistent address data is scoped to the lifetime of a given Context, and subsquent calls to Context.Run with the - // same address name will be silently ignored. - Persistent map[string]any - // Ephemeral address data is scoped to a given Context.Run call and is not persisted across calls. This is used for - // protocols such as gRPC client/server streaming or GraphQL, where a single request can incur multiple subrequests. - Ephemeral map[string]any -} - -func (d RunAddressData) isEmpty() bool { - return len(d.Persistent) == 0 && len(d.Ephemeral) == 0 -} - -// Run encodes the given addressData values and runs them against the WAF rules within the given timeout value. If a -// given address is present both as persistent and ephemeral, the persistent value takes precedence. It returns the -// matches as a JSON string (usually opaquely used) along with the corresponding actions in any. In case of an error, -// matches and actions can still be returned, for instance in the case of a timeout error. Errors can be tested against -// the RunError type. -// Struct fields having the tag `ddwaf:"ignore"` will not be encoded and sent to the WAF -// if the output of TotalTime() exceeds the value of Timeout, the function will immediately return with errors.ErrTimeout -// The second parameter is deprecated and should be passed to NewContextWithBudget instead. -func (context *Context) Run(addressData RunAddressData) (res Result, err error) { - if addressData.isEmpty() { - return - } - - defer func() { - if err == errors.ErrTimeout { - context.timeoutCount.Add(1) - } - }() - - // If the context has already timed out, we don't need to run the WAF again - if context.timer.SumExhausted() { - return Result{}, errors.ErrTimeout - } - - runTimer, err := context.timer.NewNode(wafRunTag, - timer.WithComponents( - wafEncodeTag, - wafDecodeTag, - wafDurationTag, - ), - ) - if err != nil { - return Result{}, err - } - - runTimer.Start() - defer func() { - context.metrics.add(wafRunTag, runTimer.Stop()) - context.metrics.merge(runTimer.Stats()) - }() - - wafEncodeTimer := runTimer.MustLeaf(wafEncodeTag) - wafEncodeTimer.Start() - persistentData, persistentEncoder, err := context.encodeOneAddressType(addressData.Persistent, wafEncodeTimer) - if err != nil { - wafEncodeTimer.Stop() - return res, err - } - - // The WAF releases ephemeral address data at the max of each run call, so we need not keep the Go values live beyond - // that in the same way we need for persistent data. We hence use a separate encoder. - ephemeralData, ephemeralEncoder, err := context.encodeOneAddressType(addressData.Ephemeral, wafEncodeTimer) - if err != nil { - wafEncodeTimer.Stop() - return res, err - } - - wafEncodeTimer.Stop() - - // ddwaf_run cannot run concurrently and we are going to mutate the context.cgoRefs, so we need to lock the context - context.mutex.Lock() - defer context.mutex.Unlock() - - if runTimer.SumExhausted() { - return res, errors.ErrTimeout - } - - // Save the Go pointer references to addressesToData that were referenced by the encoder - // into C ddwaf_objects. libddwaf's API requires to keep this data for the lifetime of the ddwaf_context. - defer context.cgoRefs.append(persistentEncoder.cgoRefs) - - wafDecodeTimer := runTimer.MustLeaf(wafDecodeTag) - res, err = context.run(persistentData, ephemeralData, wafDecodeTimer, runTimer.SumRemaining()) - - runTimer.AddTime(wafDurationTag, res.TimeSpent) - - // Ensure the ephemerals don't get optimized away by the compiler before the WAF had a chance to use them. - unsafe.KeepAlive(ephemeralEncoder.cgoRefs) - unsafe.KeepAlive(persistentEncoder.cgoRefs) - - return -} - -// merge merges two maps of slices into a single map of slices. The resulting map will contain all -// keys from both a and b, with the corresponding value from a and b concatenated (in this order) in -// a single slice. The implementation tries to minimize reallocations. -func merge[K comparable, V any](a, b map[K][]V) (merged map[K][]V) { - count := len(a) + len(b) - if count == 0 { - return - } - - keys := make(map[K]struct{}, count) - nothing := struct{}{} - totalCount := 0 - for _, m := range [2]map[K][]V{a, b} { - for k, v := range m { - keys[k] = nothing - totalCount += len(v) - } - } - - merged = make(map[K][]V, count) - values := make([]V, 0, totalCount) - - for k := range keys { - idxS := len(values) // Start index - values = append(values, a[k]...) - values = append(values, b[k]...) - idxE := len(values) // End index - - merged[k] = values[idxS:idxE] - } - - return -} - -// encodeOneAddressType encodes the given addressData values and returns the corresponding WAF object and its refs. -// If the addressData is empty, it returns nil for the WAF object and an empty ref pool. -// At this point, if the encoder does not timeout, the only error we can get is an error in case the top level object -// is a nil map, but this behaviour is expected since either persistent or ephemeral addresses are allowed to be null -// one at a time. In this case, Encode will return nil contrary to Encode which will return a nil wafObject, -// which is what we need to send to ddwaf_run to signal that the address data is empty. -func (context *Context) encodeOneAddressType(addressData map[string]any, timer timer.Timer) (*bindings.WafObject, encoder, error) { - encoder := newLimitedEncoder(timer) - if addressData == nil { - return nil, encoder, nil - } - - data, _ := encoder.Encode(addressData) - if len(encoder.truncations) > 0 { - context.mutex.Lock() - defer context.mutex.Unlock() - - context.truncations = merge(context.truncations, encoder.truncations) - } - - if timer.Exhausted() { - return nil, encoder, errors.ErrTimeout - } - - return data, encoder, nil -} - -// run executes the ddwaf_run call with the provided data on this context. The caller is responsible for locking the -// context appropriately around this call. -func (context *Context) run(persistentData, ephemeralData *bindings.WafObject, wafDecodeTimer timer.Timer, timeBudget time.Duration) (Result, error) { - result := new(bindings.WafResult) - defer wafLib.WafResultFree(result) - - // The value of the timeout cannot exceed 2^55 - // cf. https://en.cppreference.com/w/cpp/chrono/duration - timeout := uint64(timeBudget.Microseconds()) & 0x008FFFFFFFFFFFFF - ret := wafLib.WafRun(context.cContext, persistentData, ephemeralData, result, timeout) - - wafDecodeTimer.Start() - defer wafDecodeTimer.Stop() - - return unwrapWafResult(ret, result) -} - -func unwrapWafResult(ret bindings.WafReturnCode, result *bindings.WafResult) (res Result, err error) { - if result.Timeout > 0 { - err = errors.ErrTimeout - } else { - // Derivatives can be generated even if no security event gets detected, so we decode them as long as the WAF - // didn't timeout - res.Derivatives, err = decodeMap(&result.Derivatives) - } - - res.TimeSpent = time.Duration(result.TotalRuntime) * time.Nanosecond - - if ret == bindings.WafOK { - return res, err - } - - if ret != bindings.WafMatch { - return res, goRunError(ret) - } - - res.Events, err = decodeArray(&result.Events) - if err != nil { - return res, err - } - if size := result.Actions.NbEntries; size > 0 { - res.Actions, err = decodeMap(&result.Actions) - if err != nil { - return res, err - } - } - - return res, err -} - -// Close the underlying `ddwaf_context` and releases the associated internal -// data. Also decreases the reference count of the `ddwaf_hadnle` which created -// this context, possibly releasing it completely (if this was the last context -// created from this handle & it was released by its creator). -func (context *Context) Close() { - context.mutex.Lock() - defer context.mutex.Unlock() - - wafLib.WafContextDestroy(context.cContext) - unsafe.KeepAlive(context.cgoRefs) // Keep the Go pointer references until the max of the context - defer context.handle.release() // Reduce the reference counter of the Handle. - - context.cgoRefs = cgoRefPool{} // The data in context.cgoRefs is no longer needed, explicitly release - context.cContext = 0 // Makes it easy to spot use-after-free/double-free issues -} - -// TotalRuntime returns the cumulated WAF runtime across various run calls within the same WAF context. -// Returned time is in nanoseconds. -// Deprecated: use Timings instead -func (context *Context) TotalRuntime() (uint64, uint64) { - return uint64(context.metrics.get(wafRunTag)), uint64(context.metrics.get(wafDurationTag)) -} - -// TotalTimeouts returns the cumulated amount of WAF timeouts across various run calls within the same WAF context. -func (context *Context) TotalTimeouts() uint64 { - return context.timeoutCount.Load() -} - -// Stats returns the cumulative time spent in various parts of the WAF, all in nanoseconds -// and the timeout value used -func (context *Context) Stats() Stats { - context.mutex.Lock() - defer context.mutex.Unlock() - - truncations := make(map[TruncationReason][]int, len(context.truncations)) - for reason, counts := range context.truncations { - truncations[reason] = make([]int, len(counts)) - copy(truncations[reason], counts) - } - - return Stats{ - Timers: context.metrics.copy(), - TimeoutCount: context.timeoutCount.Load(), - Truncations: truncations, - } -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/decoder.go b/vendor/github.com/DataDog/go-libddwaf/v3/decoder.go deleted file mode 100644 index 2e68f772..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/decoder.go +++ /dev/null @@ -1,247 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package waf - -import ( - "github.com/DataDog/go-libddwaf/v3/errors" - "github.com/DataDog/go-libddwaf/v3/internal/bindings" - "github.com/DataDog/go-libddwaf/v3/internal/unsafe" -) - -// decodeErrors transforms the wafObject received by the wafRulesetInfo after the call to wafDl.wafInit to a map where -// keys are the error message and the value is a array of all the rule ids which triggered this specific error -func decodeErrors(obj *bindings.WafObject) (map[string][]string, error) { - if !obj.IsMap() { - return nil, errors.ErrInvalidObjectType - } - - if obj.Value == 0 && obj.NbEntries > 0 { - return nil, errors.ErrNilObjectPtr - } - - wafErrors := map[string][]string{} - for i := uint64(0); i < obj.NbEntries; i++ { - objElem := unsafe.CastWithOffset[bindings.WafObject](obj.Value, i) - - errorMessage := unsafe.GostringSized(unsafe.Cast[byte](objElem.ParameterName), objElem.ParameterNameLength) - ruleIds, err := decodeStringArray(objElem) - if err != nil { - return nil, err - } - - wafErrors[errorMessage] = ruleIds - } - - return wafErrors, nil -} - -func decodeDiagnostics(obj *bindings.WafObject) (*Diagnostics, error) { - if !obj.IsMap() { - return nil, errors.ErrInvalidObjectType - } - if obj.Value == 0 && obj.NbEntries > 0 { - return nil, errors.ErrNilObjectPtr - } - - var diags Diagnostics - var err error - for i := uint64(0); i < obj.NbEntries; i++ { - objElem := unsafe.CastWithOffset[bindings.WafObject](obj.Value, i) - key := unsafe.GostringSized(unsafe.Cast[byte](objElem.ParameterName), objElem.ParameterNameLength) - switch key { - case "actions": - diags.Actions, err = decodeDiagnosticsEntry(objElem) - case "custom_rules": - diags.CustomRules, err = decodeDiagnosticsEntry(objElem) - case "exclusions": - diags.Exclusions, err = decodeDiagnosticsEntry(objElem) - case "rules": - diags.Rules, err = decodeDiagnosticsEntry(objElem) - case "rules_data": - diags.RulesData, err = decodeDiagnosticsEntry(objElem) - case "rules_override": - diags.RulesOverrides, err = decodeDiagnosticsEntry(objElem) - case "processors": - diags.Processors, err = decodeDiagnosticsEntry(objElem) - case "scanners": - diags.Scanners, err = decodeDiagnosticsEntry(objElem) - case "ruleset_version": - diags.Version = unsafe.GostringSized(unsafe.Cast[byte](objElem.Value), objElem.NbEntries) - default: - // ignore? - } - if err != nil { - return nil, err - } - } - - return &diags, nil -} - -func decodeDiagnosticsEntry(obj *bindings.WafObject) (*DiagnosticEntry, error) { - if !obj.IsMap() { - return nil, errors.ErrInvalidObjectType - } - if obj.Value == 0 && obj.NbEntries > 0 { - return nil, errors.ErrNilObjectPtr - } - var entry DiagnosticEntry - var err error - - for i := uint64(0); i < obj.NbEntries; i++ { - objElem := unsafe.CastWithOffset[bindings.WafObject](obj.Value, i) - key := unsafe.GostringSized(unsafe.Cast[byte](objElem.ParameterName), objElem.ParameterNameLength) - switch key { - case "addresses": - entry.Addresses, err = decodeDiagnosticAddresses(objElem) - case "error": - entry.Error = unsafe.GostringSized(unsafe.Cast[byte](objElem.Value), objElem.NbEntries) - case "errors": - entry.Errors, err = decodeErrors(objElem) - case "failed": - entry.Failed, err = decodeStringArray(objElem) - case "loaded": - entry.Loaded, err = decodeStringArray(objElem) - default: - return nil, errors.ErrUnsupportedValue - } - - if err != nil { - return nil, err - } - } - - return &entry, nil -} - -func decodeDiagnosticAddresses(obj *bindings.WafObject) (*DiagnosticAddresses, error) { - if !obj.IsMap() { - return nil, errors.ErrInvalidObjectType - } - if obj.Value == 0 && obj.NbEntries > 0 { - return nil, errors.ErrNilObjectPtr - } - - addrs := &DiagnosticAddresses{} - - var err error - for i := uint64(0); i < obj.NbEntries; i++ { - objElem := unsafe.CastWithOffset[bindings.WafObject](obj.Value, i) - key := unsafe.GostringSized(unsafe.Cast[byte](objElem.ParameterName), objElem.ParameterNameLength) - switch key { - case "required": - addrs.Required, err = decodeStringArray(objElem) - if err != nil { - return nil, err - } - case "optional": - addrs.Optional, err = decodeStringArray(objElem) - if err != nil { - return nil, err - } - default: - return nil, errors.ErrUnsupportedValue - } - } - - return addrs, nil -} - -func decodeStringArray(obj *bindings.WafObject) ([]string, error) { - // We consider that nil is an empty array - if obj.IsNil() { - return nil, nil - } - - if !obj.IsArray() { - return nil, errors.ErrInvalidObjectType - } - - if obj.Value == 0 && obj.NbEntries > 0 { - return nil, errors.ErrNilObjectPtr - } - - var strArr []string - for i := uint64(0); i < obj.NbEntries; i++ { - objElem := unsafe.CastWithOffset[bindings.WafObject](obj.Value, i) - if objElem.Type != bindings.WafStringType { - return nil, errors.ErrInvalidObjectType - } - - strArr = append(strArr, unsafe.GostringSized(unsafe.Cast[byte](objElem.Value), objElem.NbEntries)) - } - - return strArr, nil -} - -func decodeObject(obj *bindings.WafObject) (any, error) { - switch obj.Type { - case bindings.WafMapType: - return decodeMap(obj) - case bindings.WafArrayType: - return decodeArray(obj) - case bindings.WafStringType: - return unsafe.GostringSized(unsafe.Cast[byte](obj.Value), obj.NbEntries), nil - case bindings.WafIntType: - return int64(obj.Value), nil - case bindings.WafUintType: - return uint64(obj.Value), nil - case bindings.WafFloatType: - return unsafe.UintptrToNative[float64](obj.Value), nil - case bindings.WafBoolType: - return unsafe.UintptrToNative[bool](obj.Value), nil - case bindings.WafNilType: - return nil, nil - default: - return nil, errors.ErrUnsupportedValue - } -} - -func decodeArray(obj *bindings.WafObject) ([]any, error) { - if obj.IsNil() { - return nil, nil - } - - if !obj.IsArray() { - return nil, errors.ErrInvalidObjectType - } - - events := make([]any, obj.NbEntries) - - for i := uint64(0); i < obj.NbEntries; i++ { - objElem := unsafe.CastWithOffset[bindings.WafObject](obj.Value, i) - val, err := decodeObject(objElem) - if err != nil { - return nil, err - } - events[i] = val - } - - return events, nil -} - -func decodeMap(obj *bindings.WafObject) (map[string]any, error) { - if obj.IsNil() { - return nil, nil - } - - if !obj.IsMap() { - return nil, errors.ErrInvalidObjectType - } - - result := make(map[string]any, obj.NbEntries) - for i := uint64(0); i < obj.NbEntries; i++ { - objElem := unsafe.CastWithOffset[bindings.WafObject](obj.Value, i) - key := unsafe.GostringSized(unsafe.Cast[byte](objElem.ParameterName), objElem.ParameterNameLength) - val, err := decodeObject(objElem) - if err != nil { - return nil, err - } - result[key] = val - } - - return result, nil -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/encoder.go b/vendor/github.com/DataDog/go-libddwaf/v3/encoder.go deleted file mode 100644 index dcf6b488..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/encoder.go +++ /dev/null @@ -1,507 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package waf - -import ( - "context" - "fmt" - "math" - "reflect" - "strings" - "time" - "unicode" - - "github.com/DataDog/go-libddwaf/v3/errors" - "github.com/DataDog/go-libddwaf/v3/internal/bindings" - "github.com/DataDog/go-libddwaf/v3/internal/unsafe" - "github.com/DataDog/go-libddwaf/v3/timer" -) - -// Encode Go values into wafObjects. Only the subset of Go types representable into wafObjects -// will be encoded while ignoring the rest of it. -// The encoder allocates the memory required for new wafObjects into the Go memory, which must be kept -// referenced for their lifetime in the C world. This lifetime depends on the ddwaf function being used with. -// the encoded result. The Go references of the allocated wafObjects, along with every Go pointer they may -// reference now or in the future, are stored and referenced in the `cgoRefs` field. The user MUST leverage -// `keepAlive()` with it according to its ddwaf use-case. -type encoder struct { - // timer makes sure the encoder doesn't spend too much time doing its job. - timer timer.Timer - - // For each TruncationReason, holds the size that is required to avoid truncation for each truncation that happened. - truncations map[TruncationReason][]int - - cgoRefs cgoRefPool - containerMaxSize int - stringMaxSize int - objectMaxDepth int -} - -// TruncationReason is a flag representing reasons why some input was not encoded in full. -type TruncationReason uint8 - -const ( - // StringTooLong indicates a string exceeded the maximum string length configured. The truncation - // values indicate the actual length of truncated strings. - StringTooLong TruncationReason = 1 << iota - // ContainerTooLarge indicates a container (list, map, struct) exceeded the maximum number of - // elements configured. The truncation values indicate the actual number of elements in the - // truncated container. - ContainerTooLarge - // ObjectTooDeep indicates an overall object exceeded the maximum encoding depths configured. The - // truncation values indicate an estimated actual depth of the truncated object. The value is - // guaranteed to be less than or equal to the actual depth (it may not be more). - ObjectTooDeep -) - -func (reason TruncationReason) String() string { - switch reason { - case ObjectTooDeep: - return "depth" - case ContainerTooLarge: - return "container-size" - case StringTooLong: - return "string-size" - default: - return fmt.Sprintf("TruncationReason(%v)", int(reason)) - } -} - -const ( - AppsecFieldTag = "ddwaf" - AppsecFieldTagValueIgnore = "ignore" -) - -type native interface { - int64 | uint64 | uintptr -} - -func newLimitedEncoder(timer timer.Timer) encoder { - return encoder{ - timer: timer, - containerMaxSize: bindings.WafMaxContainerSize, - stringMaxSize: bindings.WafMaxStringLength, - objectMaxDepth: bindings.WafMaxContainerDepth, - } -} - -func newMaxEncoder() encoder { - timer, _ := timer.NewTimer(timer.WithUnlimitedBudget()) - return encoder{ - timer: timer, - containerMaxSize: math.MaxInt, - stringMaxSize: math.MaxInt, - objectMaxDepth: math.MaxInt, - } -} - -// Encode takes a Go value and returns a wafObject pointer and an error. -// The returned wafObject is the root of the tree of nested wafObjects representing the Go value. -// The only error case is if the top-level object is "Unusable" which means that the data is nil or a non-data type -// like a function or a channel. -func (encoder *encoder) Encode(data any) (wo *bindings.WafObject, err error) { - value := reflect.ValueOf(data) - wo = &bindings.WafObject{} - - err = encoder.encode(value, wo, encoder.objectMaxDepth) - - if len(encoder.truncations[ObjectTooDeep]) != 0 && !encoder.timer.Exhausted() { - encoder.measureObjectDepth(value, encoder.timer.Remaining()) - } - - return -} - -// Truncations returns all truncations that happened since the last call to `Truncations()`, and clears the internal -// list. This is a map from truncation reason to the list of un-truncated value sizes. -func (encoder *encoder) Truncations() map[TruncationReason][]int { - result := encoder.truncations - encoder.truncations = nil - return result -} - -func encodeNative[T native](val T, t bindings.WafObjectType, obj *bindings.WafObject) { - obj.Type = t - obj.Value = (uintptr)(val) -} - -var nullableTypeKinds = map[reflect.Kind]struct{}{ - reflect.Interface: {}, - reflect.Pointer: {}, - reflect.UnsafePointer: {}, - reflect.Map: {}, - reflect.Slice: {}, - reflect.Func: {}, - reflect.Chan: {}, -} - -// isValueNil check if the value is nullable and if it is actually nil -// we cannot directly use value.IsNil() because it panics on non-pointer values -func isValueNil(value reflect.Value) bool { - _, nullable := nullableTypeKinds[value.Kind()] - return nullable && value.IsNil() -} - -func (encoder *encoder) encode(value reflect.Value, obj *bindings.WafObject, depth int) error { - if encoder.timer.Exhausted() { - return errors.ErrTimeout - } - - value, kind := resolvePointer(value) - if (kind == reflect.Interface || kind == reflect.Pointer) && !value.IsNil() { - // resolvePointer failed to resolve to something that's not a pointer, it - // has indirected too many times... - return errors.ErrTooManyIndirections - } - - // Measure-only runs for leaves - if obj == nil && kind != reflect.Array && kind != reflect.Slice && kind != reflect.Map && kind != reflect.Struct { - // Nothing to do, we were only here to measure object depth! - return nil - } - - switch { - // Terminal cases (leaves of the tree) - // Is invalid type: nil interfaces for example, cannot be used to run any reflect method or it's susceptible to panic - case !value.IsValid() || kind == reflect.Invalid: - return errors.ErrUnsupportedValue - // Is nullable type: nil pointers, channels, maps or functions - case isValueNil(value): - encodeNative[uintptr](0, bindings.WafNilType, obj) - - // Booleans - case kind == reflect.Bool: - encodeNative(unsafe.NativeToUintptr(value.Bool()), bindings.WafBoolType, obj) - - // Numbers - case value.CanInt(): // any int type or alias - encodeNative(value.Int(), bindings.WafIntType, obj) - case value.CanUint(): // any Uint type or alias - encodeNative(value.Uint(), bindings.WafUintType, obj) - case value.CanFloat(): // any float type or alias - encodeNative(unsafe.NativeToUintptr(value.Float()), bindings.WafFloatType, obj) - - // Strings - case kind == reflect.String: // string type - encoder.encodeString(value.String(), obj) - - case (kind == reflect.Array || kind == reflect.Slice) && value.Type().Elem().Kind() == reflect.Uint8: - // Byte Arrays are skipped voluntarily because they are often used - // to do partial parsing which leads to false positives - return nil - - // Containers (internal nodes of the tree) - - // All recursive cases can only execute if the depth is superior to 0. - case depth <= 0: - // Record that there was a truncation; we will try to measure the actual depth of the object afterwards. - encoder.addTruncation(ObjectTooDeep, -1) - return errors.ErrMaxDepthExceeded - - // Either an array or a slice of an array - case kind == reflect.Array || kind == reflect.Slice: - encoder.encodeArray(value, obj, depth-1) - case kind == reflect.Map: - encoder.encodeMap(value, obj, depth-1) - case kind == reflect.Struct: - encoder.encodeStruct(value, obj, depth-1) - - default: - return errors.ErrUnsupportedValue - } - - return nil -} - -func (encoder *encoder) encodeString(str string, obj *bindings.WafObject) { - size := len(str) - if size > encoder.stringMaxSize { - str = str[:encoder.stringMaxSize] - encoder.addTruncation(StringTooLong, size) - } - encoder.cgoRefs.AllocWafString(obj, str) -} - -func getFieldNameFromType(field reflect.StructField) (string, bool) { - fieldName := field.Name - - // Private and synthetics fields - if len(fieldName) < 1 || unicode.IsLower(rune(fieldName[0])) { - return "", false - } - - // Use the json tag name as field name if present - if tag, ok := field.Tag.Lookup("json"); ok { - if i := strings.IndexByte(tag, byte(',')); i > 0 { - tag = tag[:i] - } - if len(tag) > 0 { - fieldName = tag - } - } - - return fieldName, true -} - -// encodeStruct takes a reflect.Value and a wafObject pointer and iterates on the struct field to build -// a wafObject map of type wafMapType. The specificities are the following: -// - It will only take the first encoder.containerMaxSize elements of the struct -// - If the field has a json tag it will become the field name -// - Private fields and also values producing an error at encoding will be skipped -// - Even if the element values are invalid or null we still keep them to report the field name -func (encoder *encoder) encodeStruct(value reflect.Value, obj *bindings.WafObject, depth int) { - if encoder.timer.Exhausted() { - return - } - - typ := value.Type() - nbFields := typ.NumField() - - capacity := nbFields - length := 0 - if capacity > encoder.containerMaxSize { - capacity = encoder.containerMaxSize - } - - objArray := encoder.cgoRefs.AllocWafArray(obj, bindings.WafMapType, uint64(capacity)) - for i := 0; i < nbFields; i++ { - if encoder.timer.Exhausted() { - return - } - - if length == capacity { - encoder.addTruncation(ContainerTooLarge, nbFields) - break - } - - fieldType := typ.Field(i) - fieldName, usable := getFieldNameFromType(fieldType) - if tag, ok := fieldType.Tag.Lookup(AppsecFieldTag); !usable || ok && tag == AppsecFieldTagValueIgnore { - // Either the struct field is ignored by json marshaling so can we, - // or the field was explicitly set with `ddwaf:ignore` - continue - } - - objElem := &objArray[length] - // If the Map key is of unsupported type, skip it - encoder.encodeMapKeyFromString(fieldName, objElem) - - if err := encoder.encode(value.Field(i), objElem, depth); err != nil { - // We still need to keep the map key, so we can't discard the full object, instead, we make the value a noop - encodeNative[uintptr](0, bindings.WafInvalidType, objElem) - } - - length++ - } - - // Set the length to the final number of successfully encoded elements - obj.NbEntries = uint64(length) -} - -// encodeMap takes a reflect.Value and a wafObject pointer and iterates on the map elements and returns -// a wafObject map of type wafMapType. The specificities are the following: -// - It will only take the first encoder.containerMaxSize elements of the map -// - Even if the element values are invalid or null we still keep them to report the map key -func (encoder *encoder) encodeMap(value reflect.Value, obj *bindings.WafObject, depth int) { - capacity := value.Len() - if capacity > encoder.containerMaxSize { - capacity = encoder.containerMaxSize - } - - objArray := encoder.cgoRefs.AllocWafArray(obj, bindings.WafMapType, uint64(capacity)) - - length := 0 - for iter := value.MapRange(); iter.Next(); { - if encoder.timer.Exhausted() { - return - } - - if length == capacity { - encoder.addTruncation(ContainerTooLarge, value.Len()) - break - } - - objElem := &objArray[length] - if err := encoder.encodeMapKey(iter.Key(), objElem); err != nil { - continue - } - - if err := encoder.encode(iter.Value(), objElem, depth); err != nil { - // We still need to keep the map key, so we can't discard the full object, instead, we make the value a noop - encodeNative[uintptr](0, bindings.WafInvalidType, objElem) - } - - length++ - } - - // Fix the size because we skipped map entries - obj.NbEntries = uint64(length) -} - -// encodeMapKey takes a reflect.Value and a wafObject and returns a wafObject ready to be considered a map entry. We use -// the function cgoRefPool.AllocWafMapKey to store the key in the wafObject. But first we need to grab the real -// underlying value by recursing through the pointer and interface values. -func (encoder *encoder) encodeMapKey(value reflect.Value, obj *bindings.WafObject) error { - value, kind := resolvePointer(value) - - var keyStr string - switch { - case kind == reflect.Invalid: - return errors.ErrInvalidMapKey - case kind == reflect.String: - keyStr = value.String() - case value.Type() == reflect.TypeOf([]byte(nil)): - keyStr = string(value.Bytes()) - default: - return errors.ErrInvalidMapKey - } - - encoder.encodeMapKeyFromString(keyStr, obj) - return nil -} - -// encodeMapKeyFromString takes a string and a wafObject and sets the map key attribute on the wafObject to the supplied -// string. The key may be truncated if it exceeds the maximum string size allowed by the encoder. -func (encoder *encoder) encodeMapKeyFromString(keyStr string, obj *bindings.WafObject) { - size := len(keyStr) - if size > encoder.stringMaxSize { - keyStr = keyStr[:encoder.stringMaxSize] - encoder.addTruncation(StringTooLong, size) - } - - encoder.cgoRefs.AllocWafMapKey(obj, keyStr) -} - -// encodeArray takes a reflect.Value and a wafObject pointer and iterates on the elements and returns -// a wafObject array of type wafArrayType. The specificities are the following: -// - It will only take the first encoder.containerMaxSize elements of the array -// - Elements producing an error at encoding or null values will be skipped -func (encoder *encoder) encodeArray(value reflect.Value, obj *bindings.WafObject, depth int) { - length := value.Len() - - capacity := length - if capacity > encoder.containerMaxSize { - capacity = encoder.containerMaxSize - } - - currIndex := 0 - - objArray := encoder.cgoRefs.AllocWafArray(obj, bindings.WafArrayType, uint64(capacity)) - - for i := 0; i < length; i++ { - if encoder.timer.Exhausted() { - return - } - if currIndex == capacity { - encoder.addTruncation(ContainerTooLarge, length) - break - } - - objElem := &objArray[currIndex] - if err := encoder.encode(value.Index(i), objElem, depth); err != nil { - continue - } - - // If the element is null or invalid it has no impact on the waf execution, therefore we can skip its - // encoding. In this specific case we just overwrite it at the next loop iteration. - if objElem == nil || objElem.IsUnusable() { - continue - } - - currIndex++ - } - - // Fix the size because we skipped map entries - obj.NbEntries = uint64(currIndex) -} - -func (encoder *encoder) addTruncation(reason TruncationReason, size int) { - if encoder.truncations == nil { - encoder.truncations = make(map[TruncationReason][]int, 3) - } - encoder.truncations[reason] = append(encoder.truncations[reason], size) -} - -// mesureObjectDepth traverses the provided object recursively to try and obtain -// the real object depth, but limits itself to about 1ms of time budget, past -// which it'll stop and return whatever it has go to so far. -func (encoder *encoder) measureObjectDepth(obj reflect.Value, timeout time.Duration) { - ctx, cancelCtx := context.WithTimeout(context.Background(), timeout) - defer cancelCtx() - - depth, _ := depthOf(ctx, obj) - encoder.truncations[ObjectTooDeep] = []int{depth} -} - -// depthOf returns the depth of the provided object. This is 0 for scalar values, -// such as strings. -func depthOf(ctx context.Context, obj reflect.Value) (depth int, err error) { - if err = ctx.Err(); err != nil { - // Timed out, won't go any deeper - return 0, err - } - - obj, kind := resolvePointer(obj) - - var itemDepth int - switch kind { - case reflect.Array, reflect.Slice: - if obj.Type() == reflect.TypeOf([]byte(nil)) { - // We treat byte slices as strings - return 0, nil - } - for i := 0; i < obj.Len(); i++ { - itemDepth, err = depthOf(ctx, obj.Index(i)) - depth = max(depth, itemDepth) - if err != nil { - break - } - } - return depth + 1, err - case reflect.Map: - for iter := obj.MapRange(); iter.Next(); { - itemDepth, err = depthOf(ctx, iter.Value()) - depth = max(depth, itemDepth) - if err != nil { - break - } - } - return depth + 1, err - case reflect.Struct: - typ := obj.Type() - for i := 0; i < obj.NumField(); i++ { - fieldType := typ.Field(i) - _, usable := getFieldNameFromType(fieldType) - if !usable { - continue - } - - itemDepth, err = depthOf(ctx, obj.Field(i)) - depth = max(depth, itemDepth) - if err != nil { - break - } - } - return depth + 1, err - default: - return 0, nil - } -} - -// resovlePointer attempts to resolve a pointer while limiting the pointer depth -// to be traversed, so that this is not susceptible to an infinite loop when -// provided a self-referencing pointer. -func resolvePointer(obj reflect.Value) (reflect.Value, reflect.Kind) { - kind := obj.Kind() - for limit := 8; limit > 0 && kind == reflect.Pointer || kind == reflect.Interface; limit-- { - if obj.IsNil() { - return obj, kind - } - obj = obj.Elem() - kind = obj.Kind() - } - return obj, kind -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/errors/waf.go b/vendor/github.com/DataDog/go-libddwaf/v3/errors/waf.go deleted file mode 100644 index a991bcb9..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/errors/waf.go +++ /dev/null @@ -1,77 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package errors - -import ( - "errors" - "fmt" -) - -// Encoder/Decoder errors -var ( - ErrMaxDepthExceeded = errors.New("max depth exceeded") - ErrUnsupportedValue = errors.New("unsupported Go value") - ErrInvalidMapKey = errors.New("invalid WAF object map key") - ErrNilObjectPtr = errors.New("nil WAF object pointer") - ErrInvalidObjectType = errors.New("invalid type encountered when decoding") - ErrTooManyIndirections = errors.New("too many indirections") -) - -// RunError the WAF can return when running it. -type RunError int - -// Errors the WAF can return when running it. -const ( - ErrInternal RunError = iota + 1 - ErrInvalidObject - ErrInvalidArgument - ErrTimeout - ErrOutOfMemory - ErrEmptyRuleAddresses -) - -// Error returns the string representation of the RunError. -func (e RunError) Error() string { - switch e { - case ErrInternal: - return "internal waf error" - case ErrTimeout: - return "waf timeout" - case ErrInvalidObject: - return "invalid waf object" - case ErrInvalidArgument: - return "invalid waf argument" - case ErrOutOfMemory: - return "out of memory" - case ErrEmptyRuleAddresses: - return "empty rule addresses" - default: - return fmt.Sprintf("unknown waf error %d", e) - } -} - -// PanicError is an error type wrapping a recovered panic value that happened -// during a function call. Such error must be considered unrecoverable and be -// used to try to gracefully abort. Keeping using this package after such an -// error is unreliable and the caller must rather stop using the library. -// Examples include safety checks errors. -type PanicError struct { - // The recovered panic error while executing the function `in`. - Err error - // The function symbol name that was given to `tryCall()`. - In string -} - -// Unwrap the error and return it. -// Required by errors.Is and errors.As functions. -func (e *PanicError) Unwrap() error { - return e.Err -} - -// Error returns the error string representation. -func (e *PanicError) Error() string { - return fmt.Sprintf("panic while executing %s: %#+v", e.In, e.Err) -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/handle.go b/vendor/github.com/DataDog/go-libddwaf/v3/handle.go deleted file mode 100644 index daa6cb61..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/handle.go +++ /dev/null @@ -1,263 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package waf - -import ( - "errors" - "fmt" - "time" - - wafErrors "github.com/DataDog/go-libddwaf/v3/errors" - "github.com/DataDog/go-libddwaf/v3/internal/bindings" - "github.com/DataDog/go-libddwaf/v3/internal/unsafe" - "github.com/DataDog/go-libddwaf/v3/timer" - - "sync/atomic" -) - -// Handle represents an instance of the WAF for a given ruleset. -type Handle struct { - // diagnostics holds information about rules initialization - diagnostics Diagnostics - - // Lock-less reference counter avoiding blocking calls to the Close() method - // while WAF contexts are still using the WAF handle. Instead, we let the - // release actually happen only when the reference counter reaches 0. - // This can happen either from a request handler calling its WAF context's - // Close() method, or either from the appsec instance calling the WAF - // handle's Close() method when creating a new WAF handle with new rules. - // Note that this means several instances of the WAF can exist at the same - // time with their own set of rules. This choice was done to be able to - // efficiently update the security rules concurrently, without having to - // block the request handlers for the time of the security rules update. - refCounter atomic.Int32 - - // Instance of the WAF - cHandle bindings.WafHandle -} - -// NewHandle creates and returns a new instance of the WAF with the given security rules and configuration -// of the sensitive data obfuscator. The returned handle is nil in case of an error. -// Rules-related metrics, including errors, are accessible with the `RulesetInfo()` method. -func NewHandle(rules any, keyObfuscatorRegex string, valueObfuscatorRegex string) (*Handle, error) { - // The order of action is the following: - // - Open the ddwaf C library - // - Encode the security rules as a ddwaf_object - // - Create a ddwaf_config object and fill the values - // - Run ddwaf_init to create a new handle based on the given rules and config - // - Check for errors and streamline the ddwaf_ruleset_info returned - - if ok, err := Load(); !ok { - return nil, err - // The case where ok == true && err != nil is ignored on purpose, as - // this is out of the scope of NewHandle which only requires a properly - // loaded libddwaf in order to use it - } - - encoder := newMaxEncoder() - obj, err := encoder.Encode(rules) - if err != nil { - return nil, fmt.Errorf("could not encode the WAF ruleset into a WAF object: %w", err) - } - - config := newConfig(&encoder.cgoRefs, keyObfuscatorRegex, valueObfuscatorRegex) - diagnosticsWafObj := new(bindings.WafObject) - defer wafLib.WafObjectFree(diagnosticsWafObj) - - cHandle := wafLib.WafInit(obj, config, diagnosticsWafObj) - // Upon failure, the WAF may have produced some diagnostics to help signal what went wrong... - var ( - diags *Diagnostics - diagsErr error - ) - if !diagnosticsWafObj.IsInvalid() { - diags, diagsErr = decodeDiagnostics(diagnosticsWafObj) - } - - if cHandle == 0 { - // WAF Failed initialization, report the best possible error... - if diags != nil && diagsErr == nil { - // We were able to parse out some diagnostics from the WAF! - err = diags.TopLevelError() - if err != nil { - return nil, fmt.Errorf("could not instantiate the WAF: %w", err) - } - } - return nil, errors.New("could not instantiate the WAF") - } - - // The WAF successfully initialized at this stage... - if diagsErr != nil { - wafLib.WafDestroy(cHandle) - return nil, fmt.Errorf("could not decode the WAF diagnostics: %w", diagsErr) - } - - unsafe.KeepAlive(encoder.cgoRefs) - - handle := &Handle{ - cHandle: cHandle, - diagnostics: *diags, - } - - handle.refCounter.Store(1) // We count the handle itself in the counter - return handle, nil -} - -// NewContext returns a new WAF context for the given WAF handle. -// A nil value is returned when the WAF handle was released or when the -// WAF context couldn't be created. -func (handle *Handle) NewContext() (*Context, error) { - return handle.NewContextWithBudget(timer.UnlimitedBudget) -} - -// NewContextWithBudget returns a new WAF context for the given WAF handle. -// A nil value is returned when the WAF handle was released or when the -// WAF context couldn't be created. -func (handle *Handle) NewContextWithBudget(budget time.Duration) (*Context, error) { - // Handle has been released - if !handle.retain() { - return nil, fmt.Errorf("handle was released") - } - - cContext := wafLib.WafContextInit(handle.cHandle) - if cContext == 0 { - handle.release() // We couldn't get a context, so we no longer have an implicit reference to the Handle in it... - return nil, fmt.Errorf("could not get C context") - } - - timer, err := timer.NewTreeTimer(timer.WithBudget(budget), timer.WithComponents(wafRunTag)) - if err != nil { - return nil, err - } - - return &Context{handle: handle, cContext: cContext, timer: timer, metrics: metricsStore{data: make(map[string]time.Duration, 5)}}, nil -} - -// Diagnostics returns the rules initialization metrics for the current WAF handle -func (handle *Handle) Diagnostics() Diagnostics { - return handle.diagnostics -} - -// Addresses returns the list of addresses the WAF rule is expecting. -func (handle *Handle) Addresses() []string { - return wafLib.WafKnownAddresses(handle.cHandle) -} - -// Update the ruleset of a WAF instance into a new handle on its own -// the previous handle still needs to be closed manually -func (handle *Handle) Update(newRules any) (*Handle, error) { - encoder := newMaxEncoder() - obj, err := encoder.Encode(newRules) - if err != nil { - return nil, fmt.Errorf("could not encode the WAF ruleset into a WAF object: %w", err) - } - - diagnosticsWafObj := new(bindings.WafObject) - - cHandle := wafLib.WafUpdate(handle.cHandle, obj, diagnosticsWafObj) - unsafe.KeepAlive(encoder.cgoRefs) - if cHandle == 0 { - return nil, errors.New("could not update the WAF instance") - } - - defer wafLib.WafObjectFree(diagnosticsWafObj) - - if err != nil { // Something is very wrong - return nil, fmt.Errorf("could not decode the WAF ruleset errors: %w", err) - } - - newHandle := &Handle{ - cHandle: cHandle, - } - - newHandle.refCounter.Store(1) // We count the handle itself in the counter - return newHandle, nil -} - -// Close puts the handle in termination state, when all the contexts are closed the handle will be destroyed -func (handle *Handle) Close() { - if handle.addRefCounter(-1) != 0 { - // Either the counter is still positive (this Handle is still referenced), or it had previously - // reached 0 and some other call has done the cleanup already. - return - } - - wafLib.WafDestroy(handle.cHandle) - handle.diagnostics = Diagnostics{} // Data in diagnostics may no longer be valid (e.g: strings from libddwaf) - handle.cHandle = 0 // Makes it easy to spot use-after-free/double-free issues -} - -// retain increments the reference counter of this Handle. Returns true if the -// Handle is still valid, false if it is no longer usable. Calls to retain() -// must be balanced with calls to release() in order to avoid leaking Handles. -func (handle *Handle) retain() bool { - return handle.addRefCounter(1) > 0 -} - -// release decrements the reference counter of this Handle, possibly causing it -// to be completely closed if no other reference to it exist. -func (handle *Handle) release() { - handle.Close() -} - -// addRefCounter adds x to Handle.refCounter. The return valid indicates whether the refCounter reached 0 as part of -// this call or not, which can be used to perform "only-once" activities: -// - result > 0 => the Handle is still usable -// - result == 0 => the handle is no longer usable, ref counter reached 0 as part of this call -// - result == -1 => the handle is no longer usable, ref counter was already 0 previously -func (handle *Handle) addRefCounter(x int32) int32 { - // We use a CAS loop to avoid setting the refCounter to a negative value. - for { - current := handle.refCounter.Load() - if current <= 0 { - // The object had already been released - return -1 - } - - next := current + x - if swapped := handle.refCounter.CompareAndSwap(current, next); swapped { - if next < 0 { - // TODO(romain.marcadier): somehow signal unexpected behavior to the - // caller (panic? error?). We currently clamp to 0 in order to avoid - // causing a customer program crash, but this is the symptom of a bug - // and should be investigated (however this clamping hides the issue). - return 0 - } - return next - } - } -} - -func newConfig(cgoRefs *cgoRefPool, keyObfuscatorRegex string, valueObfuscatorRegex string) *bindings.WafConfig { - config := new(bindings.WafConfig) - *config = bindings.WafConfig{ - Limits: bindings.WafConfigLimits{ - MaxContainerDepth: bindings.WafMaxContainerDepth, - MaxContainerSize: bindings.WafMaxContainerSize, - MaxStringLength: bindings.WafMaxStringLength, - }, - Obfuscator: bindings.WafConfigObfuscator{ - KeyRegex: cgoRefs.AllocCString(keyObfuscatorRegex), - ValueRegex: cgoRefs.AllocCString(valueObfuscatorRegex), - }, - // Prevent libddwaf from freeing our Go-memory-allocated ddwaf_objects - FreeFn: 0, - } - return config -} - -func goRunError(rc bindings.WafReturnCode) error { - switch rc { - case bindings.WafErrInternal: - return wafErrors.ErrInternal - case bindings.WafErrInvalidObject: - return wafErrors.ErrInvalidObject - case bindings.WafErrInvalidArgument: - return wafErrors.ErrInvalidArgument - default: - return fmt.Errorf("unknown waf return code %d", int(rc)) - } -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/ctypes.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/ctypes.go deleted file mode 100644 index ba21f902..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/ctypes.go +++ /dev/null @@ -1,113 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package bindings - -const ( - WafMaxStringLength = 4096 - WafMaxContainerDepth = 20 - WafMaxContainerSize = 256 - WafRunTimeout = 5000 -) - -type WafReturnCode int32 - -const ( - WafErrInternal WafReturnCode = iota - 3 - WafErrInvalidObject - WafErrInvalidArgument - WafOK - WafMatch -) - -// wafObjectType is an enum in C which has the size of DWORD. -// But DWORD is 4 bytes in amd64 and arm64 so uint32 it is. -type WafObjectType uint32 - -const WafInvalidType WafObjectType = 0 -const ( - WafIntType WafObjectType = 1 << iota - WafUintType - WafStringType - WafArrayType - WafMapType - WafBoolType - WafFloatType - WafNilType -) - -type WafObject struct { - ParameterName uintptr - ParameterNameLength uint64 - Value uintptr - NbEntries uint64 - Type WafObjectType - _ [4]byte - // Forced padding - // We only support 2 archs and cgo generated the same padding to both. - // We don't want the C struct to be packed because actually go will do the same padding itself, - // we just add it explicitly to not take any chance. - // And we cannot pack a struct in go so it will get tricky if the struct is - // packed (apart from breaking all tracers of course) -} - -// isInvalid determines whether this WAF Object has the invalid type (which is the 0-value). -func (w *WafObject) IsInvalid() bool { - return w.Type == WafInvalidType -} - -// isNil determines whether this WAF Object is nil or not. -func (w *WafObject) IsNil() bool { - return w.Type == WafNilType -} - -// isArray determines whether this WAF Object is an array or not. -func (w *WafObject) IsArray() bool { - return w.Type == WafArrayType -} - -// isMap determines whether this WAF Object is a map or not. -func (w *WafObject) IsMap() bool { - return w.Type == WafMapType -} - -// IsUnusable returns true if the wafObject has no impact on the WAF execution -// But we still need this kind of objects to forward map keys in case the value of the map is invalid -func (wo *WafObject) IsUnusable() bool { - return wo.Type == WafInvalidType || wo.Type == WafNilType -} - -type WafConfig struct { - Limits WafConfigLimits - Obfuscator WafConfigObfuscator - FreeFn uintptr -} - -type WafConfigLimits struct { - MaxContainerSize uint32 - MaxContainerDepth uint32 - MaxStringLength uint32 -} - -type WafConfigObfuscator struct { - KeyRegex uintptr // char * - ValueRegex uintptr // char * -} - -type WafResult struct { - Timeout byte - Events WafObject - Actions WafObject - Derivatives WafObject - TotalRuntime uint64 -} - -// wafHandle is a forward declaration in ddwaf.h header -// We basically don't need to modify it, only to give it to the waf -type WafHandle uintptr - -// wafContext is a forward declaration in ddwaf.h header -// We basically don't need to modify it, only to give it to the waf -type WafContext uintptr diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl.go deleted file mode 100644 index 802cd345..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl.go +++ /dev/null @@ -1,230 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build (linux || darwin) && (amd64 || arm64) && !go1.24 && !datadog.no_waf && (cgo || appsec) - -package bindings - -import ( - "errors" - "fmt" - "os" - - "github.com/DataDog/go-libddwaf/v3/internal/lib" - "github.com/DataDog/go-libddwaf/v3/internal/log" - "github.com/DataDog/go-libddwaf/v3/internal/unsafe" - "github.com/ebitengine/purego" -) - -// wafDl is the type wrapper for all C calls to the waf -// It uses `libwaf` to make C calls -// All calls must go through this one-liner to be type safe -// since purego calls are not type safe -type WafDl struct { - wafSymbols - handle uintptr -} - -type wafSymbols struct { - init uintptr - update uintptr - destroy uintptr - knownAddresses uintptr - getVersion uintptr - contextInit uintptr - contextDestroy uintptr - objectFree uintptr - resultFree uintptr - run uintptr -} - -// newWafDl loads the libddwaf shared library and resolves all tge relevant symbols. -// The caller is responsible for calling wafDl.Close on the returned object once they -// are done with it so that associated resources can be released. -func NewWafDl() (dl *WafDl, err error) { - file, err := lib.DumpEmbeddedWAF() - if err != nil { - return - } - defer func() { - if rmErr := os.Remove(file); rmErr != nil { - err = errors.Join(err, fmt.Errorf("error removing %s: %w", file, rmErr)) - } - }() - - var handle uintptr - if handle, err = purego.Dlopen(file, purego.RTLD_GLOBAL|purego.RTLD_NOW); err != nil { - return - } - - var symbols wafSymbols - if symbols, err = resolveWafSymbols(handle); err != nil { - if closeErr := purego.Dlclose(handle); closeErr != nil { - err = errors.Join(err, fmt.Errorf("error released the shared libddwaf library: %w", closeErr)) - } - return - } - - dl = &WafDl{symbols, handle} - - // Try calling the waf to make sure everything is fine - err = tryCall(func() error { - dl.WafGetVersion() - return nil - }) - if err != nil { - if closeErr := purego.Dlclose(handle); closeErr != nil { - err = errors.Join(err, fmt.Errorf("error released the shared libddwaf library: %w", closeErr)) - } - return - } - - if val := os.Getenv(log.EnvVarLogLevel); val != "" { - setLogSym, symErr := purego.Dlsym(handle, "ddwaf_set_log_cb") - if symErr != nil { - return - } - logLevel := log.LevelNamed(val) - dl.syscall(setLogSym, log.CallbackFunctionPointer(), uintptr(logLevel)) - } - - return -} - -func (waf *WafDl) Close() error { - return purego.Dlclose(waf.handle) -} - -// wafGetVersion returned string is a static string so we do not need to free it -func (waf *WafDl) WafGetVersion() string { - return unsafe.Gostring(unsafe.Cast[byte](waf.syscall(waf.getVersion))) -} - -// wafInit initializes a new WAF with the provided ruleset, configuration and info objects. A -// cgoRefPool ensures that the provided input values are not moved or garbage collected by the Go -// runtime during the WAF call. -func (waf *WafDl) WafInit(ruleset *WafObject, config *WafConfig, info *WafObject) WafHandle { - handle := WafHandle(waf.syscall(waf.init, unsafe.PtrToUintptr(ruleset), unsafe.PtrToUintptr(config), unsafe.PtrToUintptr(info))) - unsafe.KeepAlive(ruleset) - unsafe.KeepAlive(config) - unsafe.KeepAlive(info) - return handle -} - -func (waf *WafDl) WafUpdate(handle WafHandle, ruleset *WafObject, info *WafObject) WafHandle { - newHandle := WafHandle(waf.syscall(waf.update, uintptr(handle), unsafe.PtrToUintptr(ruleset), unsafe.PtrToUintptr(info))) - unsafe.KeepAlive(ruleset) - unsafe.KeepAlive(info) - return newHandle -} - -func (waf *WafDl) WafDestroy(handle WafHandle) { - waf.syscall(waf.destroy, uintptr(handle)) - unsafe.KeepAlive(handle) -} - -// wafKnownAddresses returns static strings so we do not need to free them -func (waf *WafDl) WafKnownAddresses(handle WafHandle) []string { - var nbAddresses uint32 - - arrayVoidC := waf.syscall(waf.knownAddresses, uintptr(handle), unsafe.PtrToUintptr(&nbAddresses)) - if arrayVoidC == 0 { - return nil - } - - addresses := make([]string, int(nbAddresses)) - for i := 0; i < int(nbAddresses); i++ { - addresses[i] = unsafe.Gostring(*unsafe.CastWithOffset[*byte](arrayVoidC, uint64(i))) - } - - unsafe.KeepAlive(&nbAddresses) - unsafe.KeepAlive(handle) - - return addresses -} - -func (waf *WafDl) WafContextInit(handle WafHandle) WafContext { - ctx := WafContext(waf.syscall(waf.contextInit, uintptr(handle))) - unsafe.KeepAlive(handle) - return ctx -} - -func (waf *WafDl) WafContextDestroy(context WafContext) { - waf.syscall(waf.contextDestroy, uintptr(context)) - unsafe.KeepAlive(context) -} - -func (waf *WafDl) WafResultFree(result *WafResult) { - waf.syscall(waf.resultFree, unsafe.PtrToUintptr(result)) - unsafe.KeepAlive(result) -} - -func (waf *WafDl) WafObjectFree(obj *WafObject) { - waf.syscall(waf.objectFree, unsafe.PtrToUintptr(obj)) - unsafe.KeepAlive(obj) -} - -func (waf *WafDl) WafRun(context WafContext, persistentData, ephemeralData *WafObject, result *WafResult, timeout uint64) WafReturnCode { - rc := WafReturnCode(waf.syscall(waf.run, uintptr(context), unsafe.PtrToUintptr(persistentData), unsafe.PtrToUintptr(ephemeralData), unsafe.PtrToUintptr(result), uintptr(timeout))) - unsafe.KeepAlive(context) - unsafe.KeepAlive(persistentData) - unsafe.KeepAlive(ephemeralData) - unsafe.KeepAlive(result) - unsafe.KeepAlive(timeout) - return rc -} - -func (waf *WafDl) Handle() uintptr { - return waf.handle -} - -// syscall is the only way to make C calls with this interface. -// purego implementation limits the number of arguments to 9, it will panic if more are provided -// Note: `purego.SyscallN` has 3 return values: these are the following: -// -// 1st - The return value is a pointer or a int of any type -// 2nd - The return value is a float -// 3rd - The value of `errno` at the end of the call -func (waf *WafDl) syscall(fn uintptr, args ...uintptr) uintptr { - ret, _, _ := purego.SyscallN(fn, args...) - return ret -} - -// resolveWafSymbols resolves relevant symbols from the libddwaf shared library using the provided -// purego.Dlopen handle. -func resolveWafSymbols(handle uintptr) (symbols wafSymbols, err error) { - if symbols.init, err = purego.Dlsym(handle, "ddwaf_init"); err != nil { - return - } - if symbols.update, err = purego.Dlsym(handle, "ddwaf_update"); err != nil { - return - } - if symbols.destroy, err = purego.Dlsym(handle, "ddwaf_destroy"); err != nil { - return - } - if symbols.knownAddresses, err = purego.Dlsym(handle, "ddwaf_known_addresses"); err != nil { - return - } - if symbols.getVersion, err = purego.Dlsym(handle, "ddwaf_get_version"); err != nil { - return - } - if symbols.contextInit, err = purego.Dlsym(handle, "ddwaf_context_init"); err != nil { - return - } - if symbols.contextDestroy, err = purego.Dlsym(handle, "ddwaf_context_destroy"); err != nil { - return - } - if symbols.resultFree, err = purego.Dlsym(handle, "ddwaf_result_free"); err != nil { - return - } - if symbols.objectFree, err = purego.Dlsym(handle, "ddwaf_object_free"); err != nil { - return - } - if symbols.run, err = purego.Dlsym(handle, "ddwaf_run"); err != nil { - return - } - - return -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl_unsupported.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl_unsupported.go deleted file mode 100644 index 9e370827..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl_unsupported.go +++ /dev/null @@ -1,51 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Build when the target OS or architecture are not supported -//go:build (!linux && !darwin) || (!amd64 && !arm64) || go1.24 || datadog.no_waf || (!cgo && !appsec) - -package bindings - -type WafDl struct{} - -func NewWafDl() (dl *WafDl, err error) { - return nil, nil -} - -func (waf *WafDl) WafGetVersion() string { - return "" -} - -func (waf *WafDl) WafInit(obj *WafObject, config *WafConfig, info *WafObject) WafHandle { - return 0 -} - -func (waf *WafDl) WafUpdate(handle WafHandle, ruleset *WafObject, info *WafObject) WafHandle { - return 0 -} - -func (waf *WafDl) WafDestroy(handle WafHandle) { -} - -func (waf *WafDl) WafKnownAddresses(handle WafHandle) []string { - return nil -} - -func (waf *WafDl) WafContextInit(handle WafHandle) WafContext { - return 0 -} - -func (waf *WafDl) WafContextDestroy(context WafContext) { -} - -func (waf *WafDl) WafResultFree(result *WafResult) { -} - -func (waf *WafDl) WafObjectFree(obj *WafObject) { -} - -func (waf *WafDl) WafRun(context WafContext, persistentData, ephemeralData *WafObject, result *WafResult, timeout uint64) WafReturnCode { - return WafErrInternal -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/.version b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/.version deleted file mode 100644 index 74406836..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/.version +++ /dev/null @@ -1 +0,0 @@ -1.18.0 \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib.go deleted file mode 100644 index f656122a..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib.go +++ /dev/null @@ -1,61 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build ((darwin && (amd64 || arm64)) || (linux && (amd64 || arm64))) && !go1.24 && !datadog.no_waf && (cgo || appsec) - -package lib - -import ( - "bytes" - "compress/gzip" - "errors" - "fmt" - "io" - "os" - - _ "embed" -) - -//go:embed .version -var EmbeddedWAFVersion string - -func DumpEmbeddedWAF() (path string, err error) { - file, err := os.CreateTemp("", embedNamePattern) - if err != nil { - return path, fmt.Errorf("error creating temp file: %w", err) - } - path = file.Name() - - defer func() { - if closeErr := file.Close(); closeErr != nil { - err = errors.Join(err, fmt.Errorf("error closing file: %w", closeErr)) - } - if path != "" && err != nil { - if rmErr := os.Remove(path); rmErr != nil { - err = errors.Join(err, fmt.Errorf("error removing file: %w", rmErr)) - } - } - }() - - gr, err := gzip.NewReader(bytes.NewReader(libddwaf)) - if err != nil { - return path, fmt.Errorf("error creating gzip reader: %w", err) - } - - uncompressedLibddwaf, err := io.ReadAll(gr) - if err != nil { - return path, fmt.Errorf("error reading gzip content: %w", err) - } - - if err := gr.Close(); err != nil { - return path, fmt.Errorf("error closing gzip reader: %w", err) - } - - if err := os.WriteFile(file.Name(), uncompressedLibddwaf, 0400); err != nil { - return path, fmt.Errorf("error writing file: %w", err) - } - - return path, nil -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-amd64.dylib.gz b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-amd64.dylib.gz deleted file mode 100644 index 75ed8874..00000000 Binary files a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-amd64.dylib.gz and /dev/null differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-arm64.dylib.gz b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-arm64.dylib.gz deleted file mode 100644 index 99e0af6a..00000000 Binary files a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-arm64.dylib.gz and /dev/null differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-amd64.so.gz b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-amd64.so.gz deleted file mode 100644 index ddde4e44..00000000 Binary files a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-amd64.so.gz and /dev/null differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-arm64.so.gz b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-arm64.so.gz deleted file mode 100644 index b1e50616..00000000 Binary files a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-arm64.so.gz and /dev/null differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log_cgo.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log_cgo.go deleted file mode 100644 index 1d82844a..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log_cgo.go +++ /dev/null @@ -1,35 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build cgo && !datadog.no_waf - -package log - -// #include "./ddwaf.h" -// extern void ddwafLogCallbackFnv3( -// DDWAF_LOG_LEVEL level, -// char* function, -// char* file, -// unsigned line, -// char* message, -// uint64_t message_len -// ); -import "C" -import "github.com/DataDog/go-libddwaf/v3/internal/unsafe" - -// CallbackFunctionPointer returns a pointer to the log callback function which -// can be used with libddwaf. -func CallbackFunctionPointer() uintptr { - return uintptr(C.ddwafLogCallbackFnv3) -} - -//export ddwafLogCallbackFnv3 -func ddwafLogCallbackFnv3(level C.DDWAF_LOG_LEVEL, fnPtr, filePtr *C.char, line C.unsigned, msgPtr *C.char, _ C.uint64_t) { - function := unsafe.Gostring(unsafe.CastNative[C.char, byte](fnPtr)) - file := unsafe.Gostring(unsafe.CastNative[C.char, byte](filePtr)) - message := unsafe.Gostring(unsafe.CastNative[C.char, byte](msgPtr)) - - logMessage(Level(level), function, file, uint(line), message) -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log_purego.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log_purego.go deleted file mode 100644 index e48036b5..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log_purego.go +++ /dev/null @@ -1,37 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build !cgo && (darwin || freebsd) && !datadog.no_waf && !go1.24 - -package log - -import ( - "github.com/DataDog/go-libddwaf/v3/internal/unsafe" - "sync" - - "github.com/ebitengine/purego" -) - -var ( - once sync.Once - functionPointer uintptr -) - -// CallbackFunctionPointer returns a pointer to the log callback function which -// can be used with libddwaf. -func CallbackFunctionPointer() uintptr { - once.Do(func() { - functionPointer = purego.NewCallback(ddwafLogCallbackFn) - }) - return functionPointer -} - -func ddwafLogCallbackFn(level Level, fnPtr, filePtr *byte, line uint, msgPtr *byte, _ uint64) { - function := unsafe.Gostring(fnPtr) - file := unsafe.Gostring(filePtr) - message := unsafe.Gostring(msgPtr) - - logMessage(level, function, file, line, message) -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_unsupported_go.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_unsupported_go.go deleted file mode 100644 index a6b60c23..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_unsupported_go.go +++ /dev/null @@ -1,15 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Unsupported Go versions (>=) -//go:build go1.24 - -package support - -import "github.com/DataDog/go-libddwaf/v3/errors" - -func init() { - wafSupportErrors = append(wafSupportErrors, errors.UnsupportedGoVersionError{}) -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_unsupported_target.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_unsupported_target.go deleted file mode 100644 index 04546c1f..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_unsupported_target.go +++ /dev/null @@ -1,20 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Unsupported target OS or architecture -// Unsupported OS Unsupported Arch -//go:build (!linux && !darwin) || (!amd64 && !arm64) - -package support - -import ( - "runtime" - - "github.com/DataDog/go-libddwaf/v3/errors" -) - -func init() { - wafSupportErrors = append(wafSupportErrors, errors.UnsupportedOSArchError{runtime.GOOS, runtime.GOARCH}) -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/unsafe/utils.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/unsafe/utils.go deleted file mode 100644 index 44d97282..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/unsafe/utils.go +++ /dev/null @@ -1,107 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package unsafe - -import ( - "reflect" - stdUnsafe "unsafe" -) - -// Gostring copies a char* to a Go string. -func Gostring(ptr *byte) string { - if ptr == nil { - return "" - } - var length int - for { - if *(*byte)(stdUnsafe.Add(stdUnsafe.Pointer(ptr), uintptr(length))) == '\x00' { - break - } - length++ - } - //string builtin copies the slice - return string(stdUnsafe.Slice(ptr, length)) -} - -// NativeStringUnwrap cast a native string type into it's runtime value. Exported as the struct reflect.StringHeader -func NativeStringUnwrap(str string) reflect.StringHeader { - return *(*reflect.StringHeader)(stdUnsafe.Pointer(&str)) -} - -func GostringSized(ptr *byte, size uint64) string { - if ptr == nil { - return "" - } - return string(stdUnsafe.Slice(ptr, size)) -} - -// Cstring converts a go string to *byte that can be passed to C code. -func Cstring(name string) *byte { - var b = make([]byte, len(name)+1) - copy(b, name) - return &b[0] -} - -// Cast is used to centralize unsafe use C of allocated pointer. -// We take the address and then dereference it to trick go vet from creating a possible misuse of unsafe.Pointer -func Cast[T any](ptr uintptr) *T { - return (*T)(*(*stdUnsafe.Pointer)(stdUnsafe.Pointer(&ptr))) -} - -type Native interface { - ~byte | ~float64 | ~float32 | ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~bool | ~uintptr -} - -func CastNative[N Native, T Native](ptr *N) *T { - return (*T)(*(*stdUnsafe.Pointer)(stdUnsafe.Pointer(&ptr))) -} - -// NativeToUintptr is a helper used by populate WafObject values -// with Go values -func NativeToUintptr[T any](x T) uintptr { - return *(*uintptr)(stdUnsafe.Pointer(&x)) -} - -// UintToNative is a helper used retrieve Go values from an uintptr encoded -// value from a WafObject -func UintptrToNative[T any](x uintptr) T { - return *(*T)(stdUnsafe.Pointer(&x)) -} - -// CastWithOffset is the same as cast but adding an offset to the pointer by a multiple of the size -// of the type pointed. -func CastWithOffset[T any](ptr uintptr, offset uint64) *T { - return (*T)(stdUnsafe.Add(*(*stdUnsafe.Pointer)(stdUnsafe.Pointer(&ptr)), offset*uint64(stdUnsafe.Sizeof(*new(T))))) -} - -// PtrToUintptr is a helper to centralize of usage of unsafe.Pointer -// do not use this function to cast interfaces -func PtrToUintptr[T any](arg *T) uintptr { - return uintptr(stdUnsafe.Pointer(arg)) -} - -func SliceToUintptr[T any](arg []T) uintptr { - return (*reflect.SliceHeader)(stdUnsafe.Pointer(&arg)).Data -} - -// KeepAlive() globals -var ( - alwaysFalse bool - escapeSink any -) - -// KeepAlive is a copy of runtime.KeepAlive -// keepAlive has 2 usages: -// - It forces the deallocation of the memory to take place later than expected (just like runtime.KeepAlive) -// - It forces the given argument x to be escaped on the heap by saving it into a global value (Go doesn't provide a standard way to do it as of today) -// It is implemented so that the compiler cannot optimize it. -// -//go:noinline -func KeepAlive[T any](x T) { - if alwaysFalse { - escapeSink = x - } -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/metrics.go b/vendor/github.com/DataDog/go-libddwaf/v3/metrics.go deleted file mode 100644 index 86130995..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/metrics.go +++ /dev/null @@ -1,101 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package waf - -import ( - "fmt" - "sync" - "time" -) - -// Stats stores the metrics collected by the WAF. -type Stats struct { - // Timers returns a map of metrics and their durations. - Timers map[string]time.Duration - - // Timeout - TimeoutCount uint64 - - // Truncations provides details about truncations that occurred while - // encoding address data for WAF execution. - Truncations map[TruncationReason][]int -} - -const ( - wafEncodeTag = "_dd.appsec.waf.encode" - wafRunTag = "_dd.appsec.waf.duration_ext" - wafDurationTag = "_dd.appsec.waf.duration" - wafDecodeTag = "_dd.appsec.waf.decode" - wafTimeoutTag = "_dd.appsec.waf.timeouts" - wafTruncationTag = "_dd.appsec.waf.truncations" -) - -// Metrics transform the stats returned by the WAF into a map of key value metrics for datadog backend -func (stats Stats) Metrics() map[string]any { - tags := make(map[string]any, len(stats.Timers)+len(stats.Truncations)+1) - for k, v := range stats.Timers { - tags[k] = float64(v.Nanoseconds()) / float64(time.Microsecond) // The metrics should be in microseconds - } - - tags[wafTimeoutTag] = stats.TimeoutCount - for reason, list := range stats.Truncations { - tags[fmt.Sprintf("%s.%s", wafTruncationTag, reason.String())] = list - } - - return tags -} - -type metricsStore struct { - data map[string]time.Duration - mutex sync.RWMutex -} - -func (metrics *metricsStore) add(key string, duration time.Duration) { - metrics.mutex.Lock() - defer metrics.mutex.Unlock() - if metrics.data == nil { - metrics.data = make(map[string]time.Duration, 5) - } - - metrics.data[key] += duration -} - -func (metrics *metricsStore) get(key string) time.Duration { - metrics.mutex.RLock() - defer metrics.mutex.RUnlock() - return metrics.data[key] -} - -func (metrics *metricsStore) copy() map[string]time.Duration { - metrics.mutex.Lock() - defer metrics.mutex.Unlock() - if metrics.data == nil { - return nil - } - - copy := make(map[string]time.Duration, len(metrics.data)) - for k, v := range metrics.data { - copy[k] = v - } - return copy -} - -// merge merges the current metrics with new ones -func (metrics *metricsStore) merge(other map[string]time.Duration) { - metrics.mutex.Lock() - defer metrics.mutex.Unlock() - if metrics.data == nil { - metrics.data = make(map[string]time.Duration, 5) - } - - for key, val := range other { - prev, ok := metrics.data[key] - if !ok { - prev = 0 - } - metrics.data[key] = prev + val - } -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/timer/component.go b/vendor/github.com/DataDog/go-libddwaf/v3/timer/component.go deleted file mode 100644 index 3b754587..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/timer/component.go +++ /dev/null @@ -1,28 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2022 Datadog, Inc. - -package timer - -import ( - "sync/atomic" -) - -// components store the data shared between child timers of the same component name -type components struct { - lookup map[string]*atomic.Int64 - storage []atomic.Int64 -} - -func newComponents(names []string) components { - lookup := make(map[string]*atomic.Int64, len(names)) - storage := make([]atomic.Int64, len(names)) - for i, name := range names { - lookup[name] = &storage[i] - } - return components{ - lookup: lookup, - storage: storage, - } -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/timer/config.go b/vendor/github.com/DataDog/go-libddwaf/v3/timer/config.go deleted file mode 100644 index ecf70795..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/timer/config.go +++ /dev/null @@ -1,86 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2022 Datadog, Inc. - -package timer - -import ( - "math" - "time" -) - -const ( - // UnlimitedBudget is a special value for the budget that means the timer has no budget - UnlimitedBudget = time.Duration(math.MaxInt64) - - // DynamicBudget is a special value for the budget that means the timer should inherit the budget from its parent - // It is the default value if no options such as WithBudget, WithUnlimitedBudget or WithInheritedBudget are provided - DynamicBudget = ^time.Duration(0) -) - -// DynamicBudgetFunc is a function that is called on all children when a change to the parent happens -type DynamicBudgetFunc func(timer NodeTimer) time.Duration - -// config is the configuration of a timer. It can be created through the use of options -type config struct { - dynamicBudget DynamicBudgetFunc - // components store all the components of the timer - components []string - // budget is the time budget for the timer - budget time.Duration -} - -func newConfig(options ...Option) config { - config := config{} - // Make sure the budget is inherited by default - WithInheritedSumBudget()(&config) - for _, option := range options { - option(&config) - } - return config -} - -// Option are the configuration options for any type of timer. Please read the documentation of said timer to see which options are available -type Option func(*config) - -// WithBudget is an Option that sets the budget value -func WithBudget(budget time.Duration) Option { - return func(c *config) { - c.budget = budget - } -} - -// WithUnlimitedBudget is an Option that sets the UnlimitedBudget flag on config.budget -func WithUnlimitedBudget() Option { - return func(c *config) { - c.budget = UnlimitedBudget - } -} - -// WithInheritedBudget is an Option that sets the DynamicBudget flag on config.budget -func WithInheritedBudget() Option { - return func(c *config) { - c.budget = DynamicBudget - c.dynamicBudget = func(timer NodeTimer) time.Duration { - return timer.Remaining() - } - } -} - -// WithInheritedSumBudget is an Option that sets the DynamicBudget flag on config.budget and sets the DynamicBudgetFunc to sum the remaining time of all children -func WithInheritedSumBudget() Option { - return func(c *config) { - c.budget = DynamicBudget - c.dynamicBudget = func(timer NodeTimer) time.Duration { - return timer.SumRemaining() - } - } -} - -// WithComponents is an Option that adds multiple components to the components list -func WithComponents(components ...string) Option { - return func(c *config) { - c.components = append(c.components, components...) - } -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/timer/timer.go b/vendor/github.com/DataDog/go-libddwaf/v3/timer/timer.go deleted file mode 100644 index 05cfd62f..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/timer/timer.go +++ /dev/null @@ -1,115 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2022 Datadog, Inc. - -package timer - -import ( - "time" -) - -// Timer is the default interface for all timers. NewTimer will provide you with a Timer. -// Keep in mind that they are NOT thread-safe and once Stop() is called, the Timer cannot be restarted. -type Timer interface { - // Start starts the timer and returns the start time. - // If the timer was already started, it returns the previous start time. - // If the timer was started without specifying a budget, it will inherit the budget from its parent when calling Start(). - // if the timer has no parent and no budget was specified, the call creating the timer (either NewTreeTimer or NewTimer) will return an error asking to specify a budget (which can be unlimited). - // Start is NOT thread-safe - Start() time.Time - - // Stop ends the timer and returns the time spent on the timer as Spent() would. - // Stop will trigger the computation of sum timers if the timer is part of a tree. See NodeTimer for more information. - // Stop is NOT thread-safe - Stop() time.Duration - - // Spent returns the current time spent between Start() and Stop() or between Start() and now if the timer is still running. - // Spent is thread-safe - Spent() time.Duration - - // Remaining returns the time remaining before the timer reaches its budget. (budget - Spent()) - // It returns 0 if the timer is exhausted. Remaining may never return a value below zero. - // Remaining only makes sense if the timer has a budget. If the timer has no budget, it returns the special value UnlimitedBudget. - // Remaining is thread-safe - Remaining() time.Duration - - // Exhausted returns true if the timer spent in the timer is greater than the budget. (Spent() > budget) - // Exhausted may return true only in case the time has a budget. If the timer has n, it returns false. - // Exhausted is thread-safe - Exhausted() bool - - // Timed is a convenience function that starts the timer, calls the provided function and stops the timer. - // Timed is panic-safe and will stop the timer even if the function panics. - // Timed is NOT thread-safe - Timed(timedFunc func(timer Timer)) time.Duration -} - -// SumTimer is a sub-interface for timers capable of having children and making the sum of their time spent. -// NewTreeTimer will provide you with a timer supporting this interface -type SumTimer interface { - // SumSpent returns the sum of the time spent in each component of the timer. - // SumSpent is thread-safe - SumSpent() time.Duration - - // SumRemaining returns the sum of the time remaining in each component of the timer. - // SumRemaining returns UnlimitedBudget if the timer has no budget. (UnlimitedBudget) - // SumRemaining is thread-safe - SumRemaining() time.Duration - - // SumExhausted returns true if the sum of the time spent in each component of the timer is greater than the budget. - // SumExhausted returns false if the timer has no budget. (UnlimitedBudget) - // SumExhausted is thread-safe - SumExhausted() bool -} - -// NodeTimer is the interface for tree timers. NewTreeTimer will provide you with a NodeTimer. -// NodeTimer can have children (NodeTimer or Timer) and will compute the sum of their spent time each time a children timer calls its Stop() method. -// To add children to a NodeTimer, you have to specify component names when creating the timer with the WithComponent and WithComponents options. -// The component names must be unique and cannot be empty. The component names are used to identify the children timers. -// The returned timer can now create children timers using the NewNode and NewLeaf functions using the names provided when creating the parent timer. -// Multiple timers from the same component can be used in parallel and will be summed together. -// In parallel to that, NodeTimer can have their own wall time timer and budget that will apply to the sum of their children and their own timer. -// The following functions are the same as the Timer interface but works using the sum of the children timers: -// - SumSpent() -> Spent() -// - SumRemaining() -> Remaining() -// - SumExhausted() -> Exhausted() -// Keep in mind that the timer itself (only Start and Stop) is NOT thread-safe and once Stop() is called, the NodeTimer cannot be restarted. -type NodeTimer interface { - Timer - SumTimer - - // NewNode creates a new NodeTimer with the given name and options. The given name must match one of the component name of the parent timer. - // A node timer is required to have at least one component. If no component is provided, it will return an error asking you to use NewLeaf instead. - // If no budget is provided, it will inherit the budget from its parent when calling Start(). - // NewNode is thread-safe - NewNode(name string, options ...Option) (NodeTimer, error) - - // NewLeaf creates a new Timer with the given name and options. The given name must match one of the component name of the parent timer. - // A leaf timer is forbidden to have components. If a component is provided, it will return an error asking you to use NewNode instead. - // If no budget is provided, it will inherit the budget from its parent when calling Start(). - // NewLeaf is thread-safe - NewLeaf(name string, options ...Option) (Timer, error) - - // MustLeaf creates a new Timer with the given name and options. The given name must match one of the component name of the parent timer. - // MustLeaf wraps a call to NewLeaf but will panic if the error is not nil. - // MustLeaf is thread-safe - MustLeaf(name string, options ...Option) Timer - - // AddTime adds the given duration to the component of the timer with the given name. - // AddTime is thread-safe - AddTime(name string, duration time.Duration) - - // Stats returns a map of the time spent in each component of the timer. - // Stats is thread-safe - Stats() map[string]time.Duration - - // childStarted is used to propagate the start of a child timer to the parent timer through the whole tree. - childStarted() - - // childStopped is used to propagate the time spent in a child timer to the parent timer through the whole tree. - childStopped(componentName string, duration time.Duration) - - // now is a convenience wrapper to swap the time.Now() function for testing and performance purposes. - now() time.Time -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/waf.go b/vendor/github.com/DataDog/go-libddwaf/v3/waf.go deleted file mode 100644 index 8b902e63..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/waf.go +++ /dev/null @@ -1,183 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package waf - -import ( - "errors" - "fmt" - "sync" - "time" - - wafErrors "github.com/DataDog/go-libddwaf/v3/errors" - "github.com/DataDog/go-libddwaf/v3/internal/bindings" - "github.com/DataDog/go-libddwaf/v3/internal/support" -) - -// ErrTimeout is the error returned when the WAF times out while processing a request. -// Deprecated: use github.com/DataDog/go-libddwaf/errors.ErrTimeout instead. -const ErrTimeout = wafErrors.ErrTimeout - -// Diagnostics stores the information - provided by the WAF - about WAF rules initialization. -type Diagnostics struct { - Rules *DiagnosticEntry - CustomRules *DiagnosticEntry - Actions *DiagnosticEntry - Exclusions *DiagnosticEntry - RulesOverrides *DiagnosticEntry - RulesData *DiagnosticEntry - Processors *DiagnosticEntry - Scanners *DiagnosticEntry - Version string -} - -// TopLevelError returns the list of top-level errors reported by the WAF on any of the Diagnostics -// entries, rolled up into a single error value. Returns nil if no top-level errors were reported. -// Individual, item-level errors might still exist. -func (d *Diagnostics) TopLevelError() error { - fields := map[string]*DiagnosticEntry{ - "rules": d.Rules, - "actions": d.Actions, - "custom_rules": d.CustomRules, - "exclusions": d.Exclusions, - "rules_override": d.RulesOverrides, - "rules_data": d.RulesData, - "processors": d.Processors, - "scanners": d.Scanners, - } - - var errs []error - for field, entry := range fields { - if entry == nil || entry.Error == "" { - // No entry or no error => we're all good. - continue - } - errs = append(errs, fmt.Errorf("in %#v: %s", field, entry.Error)) - } - - return errors.Join(errs...) -} - -// DiagnosticEntry stores the information - provided by the WAF - about loaded and failed rules -// for a specific entry in the WAF ruleset -type DiagnosticEntry struct { - Addresses *DiagnosticAddresses - Errors map[string][]string // Item-level errors (map of error message to entity identifiers or index:#) - Error string // If the entire entry was in error (e.g: invalid format) - Loaded []string // Successfully loaded entity identifiers (or index:#) - Failed []string // Failed entity identifiers (or index:#) -} - -// DiagnosticAddresses stores the information - provided by the WAF - about the known addresses and -// whether they are required or optional. Addresses used by WAF rules are always required. Addresses -// used by WAF exclusion filters may be required or (rarely) optional. Addresses used by WAF -// processors may be required or optional. -type DiagnosticAddresses struct { - Required []string - Optional []string -} - -// Result stores the multiple values returned by a call to ddwaf_run -type Result struct { - // Events is the list of events the WAF detected, together with any relevant - // details. - Events []any - - // Derivatives is the set of key-value pairs generated by the WAF, and which - // need to be reported on the trace to provide additional data to the backend. - Derivatives map[string]any - - // Actions is the set of actions the WAF decided on when evaluating rules - // against the provided address data. It maps action types to their dynamic parameter values - Actions map[string]any - - // TimeSpent is the time the WAF self-reported as spent processing the call to ddwaf_run - TimeSpent time.Duration -} - -// Globally dlopen() libddwaf only once because several dlopens (eg. in tests) -// aren't supported by macOS. -var ( - // libddwaf's dynamic library handle and entrypoints - wafLib *bindings.WafDl - // libddwaf's dlopen error if any - wafLoadErr error - openWafOnce sync.Once -) - -// Load loads libddwaf's dynamic library. The dynamic library is opened only -// once by the first call to this function and internally stored globally, and -// no function is currently provided in this API to close the opened handle. -// Calling this function is not mandatory and is automatically performed by -// calls to NewHandle, the entrypoint of libddwaf, but Load is useful in order -// to explicitly check libddwaf's general health where calling NewHandle doesn't -// necessarily apply nor is doable. -// The function returns ok when libddwaf was successfully loaded, along with a -// non-nil error if any. Note that both ok and err can be set, meaning that -// libddwaf is usable but some non-critical errors happened, such as failures -// to remove temporary files. It is safe to continue using libddwaf in such -// case. -func Load() (ok bool, err error) { - if ok, err = Health(); !ok { - return false, err - } - - openWafOnce.Do(func() { - wafLib, wafLoadErr = bindings.NewWafDl() - if wafLoadErr != nil { - return - } - wafVersion = wafLib.WafGetVersion() - }) - - return wafLib != nil, wafLoadErr -} - -var wafVersion string - -// Version returns the version returned by libddwaf. -// It relies on the dynamic loading of the library, which can fail and return -// an empty string or the previously loaded version, if any. -func Version() string { - Load() - return wafVersion -} - -// HasEvents return true if the result holds at least 1 event -func (r *Result) HasEvents() bool { - return len(r.Events) > 0 -} - -// HasDerivatives return true if the result holds at least 1 derivative -func (r *Result) HasDerivatives() bool { - return len(r.Derivatives) > 0 -} - -// HasActions return true if the result holds at least 1 action -func (r *Result) HasActions() bool { - return len(r.Actions) > 0 -} - -// SupportsTarget returns true and a nil error when the target host environment -// is supported by this package and can be further used. -// Otherwise, it returns false along with an error detailing why. -func SupportsTarget() (bool, error) { - wafSupportErrors := support.WafSupportErrors() - return wafSupportErrors == nil, errors.Join(wafSupportErrors...) -} - -// Health returns true if the waf is usable, false otherwise. At the same time it can return an error -// if the waf is not usable, but the error is not blocking if true is returned, otherwise it is. -// The following conditions are checked: -// - The Waf library has been loaded successfully (you need to call `Load()` first for this case to be taken into account) -// - The Waf library has not been manually disabled with the `datadog.no_waf` go build tag -// - The Waf library is not in an unsupported OS/Arch -// - The Waf library is not in an unsupported Go version -func Health() (bool, error) { - wafSupportErrors := errors.Join(support.WafSupportErrors()...) - wafManuallyDisabledErr := support.WafManuallyDisabledError() - - return (wafLib != nil || wafLoadErr == nil) && wafSupportErrors == nil && wafManuallyDisabledErr == nil, errors.Join(wafLoadErr, wafSupportErrors, wafManuallyDisabledErr) -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/.gitattributes b/vendor/github.com/DataDog/go-libddwaf/v4/.gitattributes similarity index 100% rename from vendor/github.com/DataDog/go-libddwaf/v3/.gitattributes rename to vendor/github.com/DataDog/go-libddwaf/v4/.gitattributes diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/.gitignore b/vendor/github.com/DataDog/go-libddwaf/v4/.gitignore similarity index 100% rename from vendor/github.com/DataDog/go-libddwaf/v3/.gitignore rename to vendor/github.com/DataDog/go-libddwaf/v4/.gitignore diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/CODEOWNERS b/vendor/github.com/DataDog/go-libddwaf/v4/CODEOWNERS similarity index 100% rename from vendor/github.com/DataDog/go-libddwaf/v3/CODEOWNERS rename to vendor/github.com/DataDog/go-libddwaf/v4/CODEOWNERS diff --git a/vendor/github.com/DataDog/appsec-internal-go/LICENSE b/vendor/github.com/DataDog/go-libddwaf/v4/LICENSE similarity index 100% rename from vendor/github.com/DataDog/appsec-internal-go/LICENSE rename to vendor/github.com/DataDog/go-libddwaf/v4/LICENSE diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/README.md b/vendor/github.com/DataDog/go-libddwaf/v4/README.md new file mode 100644 index 00000000..2432adc9 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/README.md @@ -0,0 +1,155 @@ +# go-libddwaf + +This project's goal is to produce a higher level API for the go bindings to [libddwaf](https://github.com/DataDog/libddwaf): DataDog in-app WAF. +It consists of 2 separate entities: the bindings for the calls to libddwaf, and the encoder which job is to convert _any_ go value to its libddwaf object representation. + +An example usage would be: + +```go +import waf "github.com/DataDog/go-libddwaf/v4" + +//go:embed +var ruleset []byte + +func main() { + var parsedRuleset any + + if err := json.Unmarshal(ruleset, &parsedRuleset); err != nil { + panic(err) + } + + builder, err := waf.NewBuilder("", "") + if err != nil { + panic(err) + } + _, err := builder.AddOrUpdateConfig(parsedRuleset) + if err != nil { + panic(err) + } + + wafHandle := builder.Build() + if wafHandle == nil { + panic("WAF handle is nil") + } + defer wafHandle.Close() + + wafCtx := wafHandle.NewContext(timer.WithUnlimitedBudget(), timer.WithComponent("waf", "rasp")) + defer wafCtx.Close() + + matches, actions := wafCtx.Run(RunAddressData{ + Persistent: map[string]any{ + "server.request.path_params": "/rfiinc.txt", + }, + TimerKey: "waf", + }) +} +``` + +The API documentation details can be found on [pkg.go.dev](https://pkg.go.dev/github.com/DataDog/go-libddwaf/v4). + +Originally this project was only here to provide CGO Wrappers to the calls to libddwaf. +But with the appearance of `ddwaf_object` tree like structure, +but also with the intention to build CGO-less bindings, this project size has grown to be a fully integrated brick in the DataDog tracer structure. +Which in turn made it necessary to document the project, to maintain it in an orderly fashion. + +## Supported platforms + +This library currently support the following platform doublets: + +| OS | Arch | +| ----- | ------- | +| Linux | amd64 | +| Linux | aarch64 | +| OSX | amd64 | +| OSX | arm64 | + +This means that when the platform is not supported, top-level functions will return a `WafDisabledError` error including the purpose of it. + +Note that: +* Linux support include for glibc and musl variants +* OSX under 10.9 is not supported +* A build tag named `datadog.no_waf` can be manually added to force the WAF to be disabled. + +## Design + +The WAF bindings have multiple moving parts that are necessary to understand: + +- `Builder`: an object wrapper over the pointer to the C WAF Builder +- `Handle`: an object wrapper over the pointer to the C WAF Handle +- `Context`: an object wrapper over a pointer to the C WAF Context +- Encoder: its goal is to construct a tree of Waf Objects to send to the WAF +- Decoder: Transforms Waf Objects returned from the WAF to usual go objects (e.g. maps, arrays, ...) +- Library: The low-level go bindings to the C library, providing improved typing + +```mermaid +flowchart LR + START:::hidden -->|NewBuilder| Builder -->|Build| Handle + + Handle -->|NewContext| Context + + Context -->|Encode Inputs| Encoder + + Handle -->|Encode Ruleset| Encoder + Handle -->|Init WAF| Library + Context -->|Decode Result| Decoder + + Handle -->|Decode Init Errors| Decoder + + Context -->|Run| Library + Encoder -->|Allocate Waf Objects| runtime.Pinner + + Library -->|Call C code| libddwaf + + classDef hidden display: none; +``` + +### `runtime.Pinner` + +When passing Go values to the WAF, it is necessary to make sure that memory remains valid and does +not move until the WAF no longer has any pointers to it. We do this by using a `runtime.Pinner`. +Persistent address data is added to a `Context`-associated `runtime.Pinner`; while ephemeral address +data is managed by a transient `runtime.Pinner` that only exists for the duration of the call. + +### Typical call to Run() + +Here is an example of the flow of operations on a simple call to `Run()`: + +- Encode input data into WAF Objects and store references in the temporary pool +- Lock the context mutex until the end of the call +- Store references from the temporary pool into the context level pool +- Call `ddwaf_run` +- Decode the matches and actions + +### CGO-less C Bindings + +This library uses [purego](https://github.com/ebitengine/purego) to implement C bindings without requiring use of CGO at compilation time. The high-level workflow +is to embed the C shared library using `go:embed`, dump it into a file, open the library using `dlopen`, load the +symbols using `dlsym`, and finally call them. On Linux systems, using `memfd_create(2)` enables the library to be loaded without +writing to the filesystem. + +Another requirement of `libddwaf` is to have a FHS filesystem on your machine and, for Linux, to provide `libc.so.6`, +`libpthread.so.0`, and `libdl.so.2` as dynamic libraries. + +> :warning: Keep in mind that **purego only works on linux/darwin for amd64/arm64 and so does go-libddwaf.** + +## Contributing pitfalls + +- Cannot dlopen twice in the app lifetime on OSX. It messes with Thread Local Storage and usually finishes with a `std::bad_alloc()` +- `keepAlive()` calls are here to prevent the GC from destroying objects too early +- Since there is a stack switch between the Go code and the C code, usually the only C stacktrace you will ever get is from GDB +- If a segfault happens during a call to the C code, the goroutine stacktrace which has done the call is the one annotated with `[syscall]` +- [GoLand](https://www.jetbrains.com/go/) does not support `CGO_ENABLED=0` (as of June 2023) +- Keep in mind that we fully escape the type system. If you send the wrong data it will segfault in the best cases but not always! +- The structs in `ctypes.go` are here to reproduce the memory layout of the structs in `include/ddwaf.h` because pointers to these structs will be passed directly +- Do not use `uintptr` as function arguments or results types, coming from `unsafe.Pointer` casts of Go values, because they escape the pointer analysis which can create wrongly optimized code and crash. Pointer arithmetic is of course necessary in such a library but must be kept in the same function scope. +- GDB is available on arm64 but is not officially supported so it usually crashes pretty fast (as of June 2023) +- No pointer to variables on the stack shall be sent to the C code because Go stacks can be moved during the C call. More on this [here](https://medium.com/@trinad536/escape-analysis-in-golang-fc81b78f3550) + +## Debugging + +Debug-logging can be enabled for underlying C/C++ library by building (or testing) by setting the +`DD_APPSEC_WAF_LOG_LEVEL` environment variable to one of: `trace`, `debug`, `info`, `warn` (or +`warning`), `error`, `off` (which is the default behavior and logs nothing). + +The `DD_APPSEC_WAF_LOG_FILTER` environment variable can be set to a valid (per the `regexp` package) +regular expression to limit logging to only messages that match the regular expression. diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/builder.go b/vendor/github.com/DataDog/go-libddwaf/v4/builder.go new file mode 100644 index 00000000..e8ed0b3c --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/builder.go @@ -0,0 +1,180 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package libddwaf + +import ( + "errors" + "fmt" + "runtime" + + "github.com/DataDog/go-libddwaf/v4/internal/bindings" + "github.com/DataDog/go-libddwaf/v4/internal/ruleset" +) + +// Builder manages an evolving WAF configuration over time. Its lifecycle is +// typically tied to that of a remote configuration client, as its purpose is to +// keep an up-to-date view of the current coniguration with low overhead. This +// type is not safe for concurrent use, and users should protect it with a mutex +// or similar when sharing it across multiple goroutines. All methods of this +// type are safe to call with a nil receiver. +type Builder struct { + handle bindings.WAFBuilder + defaultLoaded bool +} + +// NewBuilder creates a new [Builder] instance. Its lifecycle is typically tied +// to that of a remote configuration client, as its purpose is to keep an +// up-to-date view of the current coniguration with low overhead. Returns nil if +// an error occurs when initializing the builder. The caller is responsible for +// calling [Builder.Close] when the builder is no longer needed. +func NewBuilder(keyObfuscatorRegex string, valueObfuscatorRegex string) (*Builder, error) { + if ok, err := Load(); !ok { + return nil, err + } + + var pinner runtime.Pinner + defer pinner.Unpin() + hdl := wafLib.BuilderInit(newConfig(&pinner, keyObfuscatorRegex, valueObfuscatorRegex)) + + if hdl == 0 { + return nil, errors.New("failed to initialize the WAF builder") + } + + return &Builder{handle: hdl}, nil +} + +// Close releases all resources associated with this builder. +func (b *Builder) Close() { + if b == nil || b.handle == 0 { + return + } + wafLib.BuilderDestroy(b.handle) + b.handle = 0 +} + +var ( + errUpdateFailed = errors.New("failed to update WAF Builder instance") + errBuilderClosed = errors.New("builder has already been closed") +) + +const defaultRecommendedRulesetPath = "::/go-libddwaf/default/recommended.json" + +// AddDefaultRecommendedRuleset adds the default recommended ruleset to the +// receiving [Builder], and returns the [Diagnostics] produced in the process. +func (b *Builder) AddDefaultRecommendedRuleset() (Diagnostics, error) { + var pinner runtime.Pinner + defer pinner.Unpin() + + ruleset, err := ruleset.DefaultRuleset(&pinner) + if err != nil { + return Diagnostics{}, fmt.Errorf("failed to load default recommended ruleset: %w", err) + } + + diag, err := b.addOrUpdateConfig(defaultRecommendedRulesetPath, &ruleset) + if err == nil { + b.defaultLoaded = true + } + return diag, err +} + +// RemoveDefaultRecommendedRuleset removes the default recommended ruleset from +// the receiving [Builder]. Returns true if the removal occurred (meaning the +// default recommended ruleset was indeed present in the builder). +func (b *Builder) RemoveDefaultRecommendedRuleset() bool { + if b.RemoveConfig(defaultRecommendedRulesetPath) { + b.defaultLoaded = false + return true + } + return false +} + +// AddOrUpdateConfig adds or updates a configuration fragment to this [Builder]. +// Returns the [Diagnostics] produced by adding or updating this configuration. +func (b *Builder) AddOrUpdateConfig(path string, fragment any) (Diagnostics, error) { + if b == nil || b.handle == 0 { + return Diagnostics{}, errBuilderClosed + } + + if path == "" { + return Diagnostics{}, errors.New("path cannot be blank") + } + + var pinner runtime.Pinner + defer pinner.Unpin() + + encoder, err := newEncoder(newUnlimitedEncoderConfig(&pinner)) + if err != nil { + return Diagnostics{}, fmt.Errorf("could not create encoder: %w", err) + } + + frag, err := encoder.Encode(fragment) + if err != nil { + return Diagnostics{}, fmt.Errorf("could not encode the config fragment into a WAF object; %w", err) + } + + return b.addOrUpdateConfig(path, frag) +} + +// addOrUpdateConfig adds or updates a configuration fragment to this [Builder]. +// Returns the [Diagnostics] produced by adding or updating this configuration. +func (b *Builder) addOrUpdateConfig(path string, cfg *bindings.WAFObject) (Diagnostics, error) { + var diagnosticsWafObj bindings.WAFObject + defer wafLib.ObjectFree(&diagnosticsWafObj) + + res := wafLib.BuilderAddOrUpdateConfig(b.handle, path, cfg, &diagnosticsWafObj) + + var diags Diagnostics + if !diagnosticsWafObj.IsInvalid() { + // The Diagnostics object will be invalid if the config was completely + // rejected. + var err error + diags, err = decodeDiagnostics(&diagnosticsWafObj) + if err != nil { + return diags, fmt.Errorf("failed to decode WAF diagnostics: %w", err) + } + } + + if !res { + return diags, errUpdateFailed + } + return diags, nil +} + +// RemoveConfig removes the configuration associated with the given path from +// this [Builder]. Returns true if the removal was successful. +func (b *Builder) RemoveConfig(path string) bool { + if b == nil || b.handle == 0 { + return false + } + + return wafLib.BuilderRemoveConfig(b.handle, path) +} + +// ConfigPaths returns the list of currently loaded configuration paths. +func (b *Builder) ConfigPaths(filter string) []string { + if b == nil || b.handle == 0 { + return nil + } + + return wafLib.BuilderGetConfigPaths(b.handle, filter) +} + +// Build creates a new [Handle] instance that uses the current configuration. +// Returns nil if an error occurs when building the handle. The caller is +// responsible for calling [Handle.Close] when the handle is no longer needed. +// This function may return nil. +func (b *Builder) Build() *Handle { + if b == nil || b.handle == 0 { + return nil + } + + hdl := wafLib.BuilderBuildInstance(b.handle) + if hdl == 0 { + return nil + } + + return wrapHandle(hdl) +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/context.go b/vendor/github.com/DataDog/go-libddwaf/v4/context.go new file mode 100644 index 00000000..56fa4acb --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/context.go @@ -0,0 +1,343 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package libddwaf + +import ( + "fmt" + "maps" + "runtime" + "sync" + "time" + + "github.com/DataDog/go-libddwaf/v4/internal/bindings" + "github.com/DataDog/go-libddwaf/v4/internal/pin" + "github.com/DataDog/go-libddwaf/v4/timer" + "github.com/DataDog/go-libddwaf/v4/waferrors" +) + +// Context is a WAF execution context. It allows running the WAF incrementally when calling it +// multiple times to run its rules every time new addresses become available. Each request must have +// its own [Context]. New [Context] instances can be created by calling +// [Handle.NewContext]. +type Context struct { + // Timer registers the time spent in the WAF and go-libddwaf. It is created alongside the Context using the options + // passed in to NewContext. Once its time budget is exhausted, each new call to Context.Run will return a timeout error. + Timer timer.NodeTimer + + handle *Handle // Instance of the WAF + + cContext bindings.WAFContext // The C ddwaf_context pointer + + // mutex protecting the use of cContext which is not thread-safe and truncations + mutex sync.Mutex + + // truncations provides details about truncations that occurred while encoding address data for the WAF execution. + truncations map[TruncationReason][]int + + // pinner is used to retain Go data that is being passed to the WAF as part of + // [RunAddressData.Persistent] until the [Context.Close] method results in the context being + // destroyed. + pinner pin.ConcurrentPinner +} + +// RunAddressData provides address data to the [Context.Run] method. If a given key is present in +// both `Persistent` and `Ephemeral`, the value from `Persistent` will take precedence. +// When encoding Go structs to the WAF-compatible format, fields with the `ddwaf:"ignore"` tag are +// ignored and will not be visible to the WAF. +type RunAddressData struct { + // Persistent address data is scoped to the lifetime of a given Context, and subsquent calls to + // Context.Run with the same address name will be silently ignored. + Persistent map[string]any + // Ephemeral address data is scoped to a given Context.Run call and is not persisted across + // calls. This is used for protocols such as gRPC client/server streaming or GraphQL, where a + // single request can incur multiple subrequests. + Ephemeral map[string]any + + // TimerKey is the key used to track the time spent in the WAF for this run. + // If left empty, a new timer with unlimited budget is started. + TimerKey timer.Key +} + +func (d RunAddressData) isEmpty() bool { + return len(d.Persistent) == 0 && len(d.Ephemeral) == 0 +} + +// newTimer creates a new timer for this run. If the TimerKey is empty, a new timer without taking the parent into account is created. +func (d RunAddressData) newTimer(parent timer.NodeTimer) (timer.NodeTimer, error) { + if d.TimerKey == "" { + return timer.NewTreeTimer( + timer.WithComponents( + EncodeTimeKey, + DurationTimeKey, + DecodeTimeKey, + ), + timer.WithBudget(parent.SumRemaining()), + ) + } + + return parent.NewNode(d.TimerKey, + timer.WithComponents( + EncodeTimeKey, + DurationTimeKey, + DecodeTimeKey, + ), + timer.WithInheritedSumBudget(), + ) +} + +// Run encodes the given [RunAddressData] values and runs them against the WAF rules. +// Callers must check the returned [Result] object even when an error is returned, as the WAF might +// have been able to match some rules and generate events or actions before the error was reached; +// especially when the error is [waferrors.ErrTimeout]. +func (context *Context) Run(addressData RunAddressData) (res Result, err error) { + if addressData.isEmpty() { + return Result{}, nil + } + + // If the context has already timed out, we don't need to run the WAF again + if context.Timer.SumExhausted() { + return Result{}, waferrors.ErrTimeout + } + + runTimer, err := addressData.newTimer(context.Timer) + if err != nil { + return Result{}, err + } + + defer func() { + res.TimerStats = runTimer.Stats() + }() + + runTimer.Start() + defer runTimer.Stop() + + wafEncodeTimer := runTimer.MustLeaf(EncodeTimeKey) + wafEncodeTimer.Start() + defer wafEncodeTimer.Stop() + + persistentData, err := context.encodeOneAddressType(&context.pinner, addressData.Persistent, wafEncodeTimer) + if err != nil { + return Result{}, err + } + + // The WAF releases ephemeral address data at the max of each run call, so we need not keep the Go + // values live beyond that in the same way we need for persistent data. We hence use a separate + // encoder. + var ephemeralPinner runtime.Pinner + defer ephemeralPinner.Unpin() + ephemeralData, err := context.encodeOneAddressType(&ephemeralPinner, addressData.Ephemeral, wafEncodeTimer) + if err != nil { + return Result{}, err + } + + wafEncodeTimer.Stop() + + // ddwaf_run cannot run concurrently, so we need to lock the context + context.mutex.Lock() + defer context.mutex.Unlock() + + if context.cContext == 0 { + // Context has been closed, returning an empty result... + return Result{}, waferrors.ErrContextClosed + } + + if runTimer.SumExhausted() { + return Result{}, waferrors.ErrTimeout + } + + return context.run(persistentData, ephemeralData, runTimer) +} + +// merge merges two maps of slices into a single map of slices. The resulting map will contain all +// keys from both a and b, with the corresponding value from a and b concatenated (in this order) in +// a single slice. The implementation tries to minimize reallocations. +func merge[K comparable, V any](a, b map[K][]V) (merged map[K][]V) { + count := len(a) + len(b) + if count == 0 { + return + } + + keys := make(map[K]struct{}, count) + nothing := struct{}{} + totalCount := 0 + for _, m := range [2]map[K][]V{a, b} { + for k, v := range m { + keys[k] = nothing + totalCount += len(v) + } + } + + merged = make(map[K][]V, count) + values := make([]V, 0, totalCount) + + for k := range keys { + idxS := len(values) // Start index + values = append(values, a[k]...) + values = append(values, b[k]...) + idxE := len(values) // End index + + merged[k] = values[idxS:idxE] + } + + return +} + +// encodeOneAddressType encodes the given addressData values and returns the corresponding WAF +// object and its refs. If the addressData is empty, it returns nil for the WAF object and an empty +// ref pool. +// At this point, if the encoder does not timeout, the only error we can get is an error in case the +// top level object is a nil map, but this behaviour is expected since either persistent or +// ephemeral addresses are allowed to be null one at a time. In this case, Encode will return nil, +// which is what we need to send to ddwaf_run to signal that the address data is empty. +func (context *Context) encodeOneAddressType(pinner pin.Pinner, addressData map[string]any, timer timer.Timer) (*bindings.WAFObject, error) { + if addressData == nil { + return nil, nil + } + + encoder, err := newEncoder(newEncoderConfig(pinner, timer)) + if err != nil { + return nil, fmt.Errorf("could not create encoder: %w", err) + } + + data, _ := encoder.Encode(addressData) + if len(encoder.truncations) > 0 { + context.mutex.Lock() + defer context.mutex.Unlock() + + context.truncations = merge(context.truncations, encoder.truncations) + } + + if timer.Exhausted() { + return nil, waferrors.ErrTimeout + } + + return data, nil +} + +// run executes the ddwaf_run call with the provided data on this context. The caller is responsible for locking the +// context appropriately around this call. +func (context *Context) run(persistentData, ephemeralData *bindings.WAFObject, runTimer timer.NodeTimer) (Result, error) { + var pinner runtime.Pinner + defer pinner.Unpin() + + var result bindings.WAFObject + pinner.Pin(&result) + defer wafLib.ObjectFree(&result) + + // The value of the timeout cannot exceed 2^55 + // cf. https://en.cppreference.com/w/cpp/chrono/duration + timeout := uint64(runTimer.SumRemaining().Microseconds()) & 0x008FFFFFFFFFFFFF + ret := wafLib.Run(context.cContext, persistentData, ephemeralData, &result, timeout) + + decodeTimer := runTimer.MustLeaf(DecodeTimeKey) + decodeTimer.Start() + defer decodeTimer.Stop() + + res, duration, err := unwrapWafResult(ret, &result) + runTimer.AddTime(DurationTimeKey, duration) + return res, err +} + +func unwrapWafResult(ret bindings.WAFReturnCode, result *bindings.WAFObject) (Result, time.Duration, error) { + if !result.IsMap() { + return Result{}, 0, fmt.Errorf("invalid result (expected map, got %s)", result.Type) + } + + entries, err := result.Values() + if err != nil { + return Result{}, 0, err + } + + var ( + res Result + duration time.Duration + ) + for _, entry := range entries { + switch key := entry.MapKey(); key { + case "timeout": + timeout, err := entry.BoolValue() + if err != nil { + return Result{}, 0, fmt.Errorf("failed to decode timeout: %w", err) + } + if timeout { + err = waferrors.ErrTimeout + } + case "keep": + keep, err := entry.BoolValue() + if err != nil { + return Result{}, 0, fmt.Errorf("failed to decode keep: %w", err) + } + res.Keep = keep + case "duration": + dur, err := entry.UIntValue() + if err != nil { + return Result{}, 0, fmt.Errorf("failed to decode duration: %w", err) + } + duration = time.Duration(dur) * time.Nanosecond + case "events": + if !entry.IsArray() { + return Result{}, 0, fmt.Errorf("invalid events (expected array, got %s)", entry.Type) + } + if entry.NbEntries != 0 { + events, err := entry.ArrayValue() + if err != nil { + return Result{}, 0, fmt.Errorf("failed to decode events: %w", err) + } + res.Events = events + } + case "actions": + if !entry.IsMap() { + return Result{}, 0, fmt.Errorf("invalid actions (expected map, got %s)", entry.Type) + } + if entry.NbEntries != 0 { + actions, err := entry.MapValue() + if err != nil { + return Result{}, 0, fmt.Errorf("failed to decode actions: %w", err) + } + res.Actions = actions + } + case "attributes": + if !entry.IsMap() { + return Result{}, 0, fmt.Errorf("invalid attributes (expected map, got %s)", entry.Type) + } + if entry.NbEntries != 0 { + derivatives, err := entry.MapValue() + if err != nil { + return Result{}, 0, fmt.Errorf("failed to decode attributes: %w", err) + } + res.Derivatives = derivatives + } + } + } + + return res, duration, goRunError(ret) +} + +// Close disposes of the underlying `ddwaf_context` and releases the associated +// internal data. It also decreases the reference count of the [Handle] which +// created this [Context], possibly releasing it completely (if this was the +// last [Context] created from it, and it is no longer in use by its creator). +func (context *Context) Close() { + context.mutex.Lock() + defer context.mutex.Unlock() + + wafLib.ContextDestroy(context.cContext) + defer context.handle.Close() // Reduce the reference counter of the Handle. + context.cContext = 0 // Makes it easy to spot use-after-free/double-free issues + + context.pinner.Unpin() // The pinned data is no longer needed, explicitly release +} + +// Truncations returns the truncations that occurred while encoding address data for WAF execution. +// The key is the truncation reason: either because the object was too deep, the arrays where to large or the strings were too long. +// The value is a slice of integers, each integer being the original size of the object that was truncated. +// In case of the [ObjectTooDeep] reason, the original size can only be approximated because of recursive objects. +func (context *Context) Truncations() map[TruncationReason][]int { + context.mutex.Lock() + defer context.mutex.Unlock() + + return maps.Clone(context.truncations) +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/decoder.go b/vendor/github.com/DataDog/go-libddwaf/v4/decoder.go new file mode 100644 index 00000000..995e0ee5 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/decoder.go @@ -0,0 +1,167 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package libddwaf + +import ( + "fmt" + + "github.com/DataDog/go-libddwaf/v4/internal/bindings" + "github.com/DataDog/go-libddwaf/v4/internal/unsafe" + "github.com/DataDog/go-libddwaf/v4/waferrors" +) + +// decodeErrors transforms the wafObject received by the wafRulesetInfo after the call to wafDl.wafInit to a map where +// keys are the error message and the value is a array of all the rule ids which triggered this specific error +func decodeErrors(obj *bindings.WAFObject) (map[string][]string, error) { + if !obj.IsMap() { + return nil, fmt.Errorf("decodeErrors: %w: expected map, got %s", waferrors.ErrInvalidObjectType, obj.Type) + } + + if obj.Value == 0 && obj.NbEntries == 0 { + return nil, nil + } + + if obj.Value == 0 && obj.NbEntries > 0 { + return nil, waferrors.ErrNilObjectPtr + } + + wafErrors := map[string][]string{} + for i := uint64(0); i < obj.NbEntries; i++ { + objElem := unsafe.CastWithOffset[bindings.WAFObject](obj.Value, i) + + errorMessage := unsafe.GostringSized(unsafe.Cast[byte](objElem.ParameterName), objElem.ParameterNameLength) + ruleIds, err := decodeStringArray(objElem) + if err != nil { + return nil, err + } + + wafErrors[errorMessage] = ruleIds + } + + return wafErrors, nil +} + +func decodeDiagnostics(obj *bindings.WAFObject) (Diagnostics, error) { + if !obj.IsMap() { + return Diagnostics{}, fmt.Errorf("decodeDiagnostics: %w: expected map, got %s", waferrors.ErrInvalidObjectType, obj.Type) + } + if obj.Value == 0 && obj.NbEntries > 0 { + return Diagnostics{}, waferrors.ErrNilObjectPtr + } + + var ( + diags Diagnostics + err error + ) + for i := uint64(0); i < obj.NbEntries; i++ { + objElem := unsafe.CastWithOffset[bindings.WAFObject](obj.Value, i) + key := unsafe.GostringSized(unsafe.Cast[byte](objElem.ParameterName), objElem.ParameterNameLength) + switch key { + case "actions": + diags.Actions, err = decodeFeature(objElem) + case "custom_rules": + diags.CustomRules, err = decodeFeature(objElem) + case "exclusions": + diags.Exclusions, err = decodeFeature(objElem) + case "rules": + diags.Rules, err = decodeFeature(objElem) + case "rules_data": + diags.RulesData, err = decodeFeature(objElem) + case "exclusion_data": + diags.ExclusionData, err = decodeFeature(objElem) + case "rules_override": + diags.RulesOverrides, err = decodeFeature(objElem) + case "processors": + diags.Processors, err = decodeFeature(objElem) + case "scanners": + diags.Scanners, err = decodeFeature(objElem) + case "ruleset_version": + diags.Version = unsafe.GostringSized(unsafe.Cast[byte](objElem.Value), objElem.NbEntries) + default: + // ignore? + } + if err != nil { + return Diagnostics{}, err + } + } + + return diags, nil +} + +func decodeFeature(obj *bindings.WAFObject) (*Feature, error) { + if !obj.IsMap() { + return nil, fmt.Errorf("decodeFeature: %w: expected map, got %s", waferrors.ErrInvalidObjectType, obj.Type) + } + if obj.Value == 0 && obj.NbEntries > 0 { + return nil, waferrors.ErrNilObjectPtr + } + var feature Feature + var err error + + for i := uint64(0); i < obj.NbEntries; i++ { + objElem := unsafe.CastWithOffset[bindings.WAFObject](obj.Value, i) + key := unsafe.GostringSized(unsafe.Cast[byte](objElem.ParameterName), objElem.ParameterNameLength) + switch key { + case "error": + feature.Error = unsafe.GostringSized(unsafe.Cast[byte](objElem.Value), objElem.NbEntries) + case "errors": + feature.Errors, err = decodeErrors(objElem) + case "failed": + feature.Failed, err = decodeStringArray(objElem) + case "loaded": + feature.Loaded, err = decodeStringArray(objElem) + case "skipped": + feature.Skipped, err = decodeStringArray(objElem) + case "warnings": + feature.Warnings, err = decodeErrors(objElem) + default: + return nil, fmt.Errorf("%w: %s", waferrors.ErrUnsupportedValue, key) + } + + if err != nil { + return nil, err + } + } + + return &feature, nil +} + +func decodeStringArray(obj *bindings.WAFObject) ([]string, error) { + // We consider that nil is an empty array + if obj.IsNil() { + return nil, nil + } + + if !obj.IsArray() { + return nil, fmt.Errorf("decodeStringArray: %w: expected array, got %s", waferrors.ErrInvalidObjectType, obj.Type) + } + + if obj.Value == 0 && obj.NbEntries > 0 { + return nil, waferrors.ErrNilObjectPtr + } + + if obj.NbEntries == 0 { + return nil, nil + } + + strArr := make([]string, 0, obj.NbEntries) + for i := uint64(0); i < obj.NbEntries; i++ { + objElem := unsafe.CastWithOffset[bindings.WAFObject](obj.Value, i) + if objElem.Type != bindings.WAFStringType { + return nil, fmt.Errorf("decodeStringArray: %w: expected string, got %s", waferrors.ErrInvalidObjectType, objElem.Type) + } + + strArr = append(strArr, unsafe.GostringSized(unsafe.Cast[byte](objElem.Value), objElem.NbEntries)) + } + + return strArr, nil +} + +// Deprecated: This is merely wrapping [bindings.WAFObject.AnyValue], which should be used directly +// instead. +func DecodeObject(obj *WAFObject) (any, error) { + return obj.AnyValue() +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/diagnostics.go b/vendor/github.com/DataDog/go-libddwaf/v4/diagnostics.go new file mode 100644 index 00000000..c1675af7 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/diagnostics.go @@ -0,0 +1,89 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package libddwaf + +import ( + "errors" + "fmt" +) + +// Diagnostics stores the information as provided by the WAF about WAF rules parsing and loading. It +// is returned by [Builder.AddOrUpdateConfig]. +type Diagnostics struct { + // Rules contains information about the loaded rules. + Rules *Feature + // CustomRules contains information about the loaded custom rules. + CustomRules *Feature + // Actions contains information about the loaded actions. + Actions *Feature + // Exclusions contains information about the loaded exclusions. + Exclusions *Feature + // RulesOverrides contains information about the loaded rules overrides. + RulesOverrides *Feature + // RulesData contains information about the loaded rules data. + RulesData *Feature + // ExclusionData contains information about the loaded exclusion data. + ExclusionData *Feature + // Processors contains information about the loaded processors. + Processors *Feature + // Scanners contains information about the loaded scanners. + Scanners *Feature + // Version is the version of the parsed ruleset if available. + Version string +} + +// EachFeature calls the provided callback for each (non-nil) feature in this diagnostics object. +func (d *Diagnostics) EachFeature(cb func(string, *Feature)) { + byName := map[string]*Feature{ + "rules": d.Rules, + "custom_rules": d.CustomRules, + "actions": d.Actions, + "exclusions": d.Exclusions, + "rules_overrides": d.RulesOverrides, + "rules_data": d.RulesData, + "exclusion_data": d.ExclusionData, + "processors": d.Processors, + "scanners": d.Scanners, + } + + for name, feat := range byName { + if feat != nil { + cb(name, feat) + } + } +} + +// TopLevelError returns the list of top-level errors reported by the WAF on any of the Diagnostics +// entries, rolled up into a single error value. Returns nil if no top-level errors were reported. +// Individual, item-level errors might still exist. +func (d *Diagnostics) TopLevelError() error { + var err error + d.EachFeature(func(name string, feat *Feature) { + if feat.Error != "" { + err = errors.Join(err, fmt.Errorf("%q: %s", name, feat.Error)) + } + }) + return err +} + +// Feature stores the information as provided by the WAF about loaded and failed +// rules for a specific feature of the WAF ruleset. +type Feature struct { + // Errors is a map of parsing errors to a list of unique identifiers from the elements which + // failed loading due to this specific error. + Errors map[string][]string + // Warnings is a map of parsing warnings to a list of unique identifiers from the elements which + // resulted in this specific warning. + Warnings map[string][]string + // Error is the single error which prevented parsing this feature. + Error string + // Loaded is a list of the unique identifiers from successfully loaded elements. + Loaded []string + // Failed is a list of the unique identifiers from the elements which couldn't be loaded. + Failed []string + // Skipped is a list of the unique identifiers from the elements which were skipped. + Skipped []string +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/encoder.go b/vendor/github.com/DataDog/go-libddwaf/v4/encoder.go new file mode 100644 index 00000000..4fde6e1e --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/encoder.go @@ -0,0 +1,598 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package libddwaf + +import ( + "context" + "encoding/json" + "encoding/xml" + "fmt" + "math" + "reflect" + "strings" + + "github.com/DataDog/go-libddwaf/v4/internal/bindings" + "github.com/DataDog/go-libddwaf/v4/internal/pin" + "github.com/DataDog/go-libddwaf/v4/timer" + "github.com/DataDog/go-libddwaf/v4/waferrors" +) + +type EncoderConfig struct { + // Pinner is used to pin the data referenced by the encoded wafObjects. + Pinner pin.Pinner + // Timer makes sure the encoder doesn't spend too much time doing its job. + Timer timer.Timer + // MaxContainerSize is the maximum number of elements in a container (list, map, struct) that will be encoded. + MaxContainerSize int + // MaxStringSize is the maximum length of a string that will be encoded. + MaxStringSize int + // MaxObjectDepth is the maximum depth of the object that will be encoded. + MaxObjectDepth int +} + +// encoder encodes Go values into wafObjects. Only the subset of Go types representable into wafObjects +// will be encoded while ignoring the rest of it. +// The encoder allocates the memory required for new wafObjects into the Go memory, which must be kept +// referenced for their lifetime in the C world. This lifetime depends on the ddwaf function being used with. +// the encoded result. The Go references of the allocated wafObjects, along with every Go pointer they may +// reference now or in the future, are stored and referenced in the `cgoRefs` field. The user MUST leverage +// `keepAlive()` with it according to its ddwaf use-case. +type encoder struct { + config EncoderConfig + + // For each TruncationReason, holds the size that is required to avoid truncation for each truncation that happened. + truncations map[TruncationReason][]int +} + +// TruncationReason is a flag representing reasons why some input was not encoded in full. +type TruncationReason uint8 + +const ( + // StringTooLong indicates a string exceeded the maximum string length configured. The truncation + // values indicate the actual length of truncated strings. + StringTooLong TruncationReason = 1 << iota + // ContainerTooLarge indicates a container (list, map, struct) exceeded the maximum number of + // elements configured. The truncation values indicate the actual number of elements in the + // truncated container. + ContainerTooLarge + // ObjectTooDeep indicates an overall object exceeded the maximum encoding depths configured. The + // truncation values indicate an estimated actual depth of the truncated object. The value is + // guaranteed to be less than or equal to the actual depth (it may not be more). + ObjectTooDeep +) + +func (reason TruncationReason) String() string { + switch reason { + case ObjectTooDeep: + return "container_depth" + case ContainerTooLarge: + return "container_size" + case StringTooLong: + return "string_length" + default: + return fmt.Sprintf("TruncationReason(%v)", int(reason)) + } +} + +const ( + AppsecFieldTag = "ddwaf" + AppsecFieldTagValueIgnore = "ignore" +) + +// WAFObject is the C struct that represents a WAF object. It is passed as-is to the C-world. +// It is highly advised to use the methods on the object to manipulate it and not set the fields manually. +type WAFObject = bindings.WAFObject + +// Encodable represent a type that can encode itself into a WAFObject. +// The encodable is responsible for using the [pin.Pinner] +// object passed in the [EncoderConfig] to pin the data referenced by the encoded [bindings.WAFObject]. +// The encoder must also use the [timer.Timer] passed in the [EncoderConfig] to +// make sure it doesn't spend too much time doing its job. +// The encoder must also respect the [EncoderConfig] limits and report truncations. +type Encodable interface { + // Encode encodes the receiver as the WAFObject obj using the provided EncoderConfig and remaining depth allowed. + // It returns a map of truncation reasons and their respective actual sizes. If the error returned is not nil, + // it is greatly advised to return errors from the waferrors package error when it matters. + // Outside of encoding the value, it is expected to check for truncations sizes as advised in the EncoderConfig + // and to regularly call the EncoderConfig.Timer.Exhausted() method to check if the encoding is still allowed + // and return waferrors.ErrTimeout if it is not. + // This method is not expected or required to be safe to concurrently call from multiple goroutines. + Encode(config EncoderConfig, obj *bindings.WAFObject, depth int) (map[TruncationReason][]int, error) +} + +func newEncoder(config EncoderConfig) (*encoder, error) { + if config.Pinner == nil { + return nil, fmt.Errorf("pinner cannot be nil") + } + if config.Timer == nil { + config.Timer, _ = timer.NewTimer(timer.WithUnlimitedBudget()) + } + if config.MaxContainerSize < 0 { + return nil, fmt.Errorf("container max size must be greater than 0") + } + if config.MaxStringSize < 0 { + return nil, fmt.Errorf("string max size must be greater than 0") + } + if config.MaxObjectDepth < 0 { + return nil, fmt.Errorf("object max depth must be greater than 0") + } + + return &encoder{config: config}, nil +} + +func newEncoderConfig(pinner pin.Pinner, timer timer.Timer) EncoderConfig { + return EncoderConfig{ + Pinner: pinner, + Timer: timer, + MaxContainerSize: bindings.MaxContainerSize, + MaxStringSize: bindings.MaxStringLength, + MaxObjectDepth: bindings.MaxContainerDepth, + } +} + +func newUnlimitedEncoderConfig(pinner pin.Pinner) EncoderConfig { + return EncoderConfig{ + Pinner: pinner, + MaxContainerSize: math.MaxInt, + MaxStringSize: math.MaxInt, + MaxObjectDepth: math.MaxInt, + } +} + +// Encode takes a Go value and returns a wafObject pointer and an error. +// The returned wafObject is the root of the tree of nested wafObjects representing the Go value. +// The only error case is if the top-level object is "Unusable" which means that the data is nil or a non-data type +// like a function or a channel. +func (encoder *encoder) Encode(data any) (*bindings.WAFObject, error) { + value := reflect.ValueOf(data) + wo := &bindings.WAFObject{} + + err := encoder.encode(value, wo, encoder.config.MaxObjectDepth) + + if _, ok := encoder.truncations[ObjectTooDeep]; ok && !encoder.config.Timer.Exhausted() { + ctx, cancelCtx := context.WithTimeout(context.Background(), encoder.config.Timer.Remaining()) + defer cancelCtx() + + depth, _ := depthOf(ctx, value) + encoder.truncations[ObjectTooDeep] = []int{depth} + } + + return wo, err +} + +var nullableTypeKinds = map[reflect.Kind]struct{}{ + reflect.Interface: {}, + reflect.Pointer: {}, + reflect.UnsafePointer: {}, + reflect.Map: {}, + reflect.Slice: {}, + reflect.Func: {}, + reflect.Chan: {}, +} + +var ( + jsonNumberType = reflect.TypeFor[json.Number]() + byteArrayType = reflect.TypeFor[[]byte]() +) + +// isValueNil check if the value is nullable and if it is actually nil +// we cannot directly use value.IsNil() because it panics on non-pointer values +func isValueNil(value reflect.Value) bool { + _, nullable := nullableTypeKinds[value.Kind()] + return nullable && value.IsNil() +} + +func (encoder *encoder) encode(value reflect.Value, obj *bindings.WAFObject, depth int) error { + if encoder.config.Timer.Exhausted() { + return waferrors.ErrTimeout + } + + if value.IsValid() && value.CanInterface() { + if encodable, ok := value.Interface().(Encodable); ok { + truncations, err := encodable.Encode(encoder.config, obj, depth) + encoder.truncations = merge(encoder.truncations, truncations) + return err + } + } + + value, kind := resolvePointer(value) + if (kind == reflect.Interface || kind == reflect.Pointer) && !value.IsNil() { + // resolvePointer failed to resolve to something that's not a pointer, it + // has indirected too many times... + return waferrors.ErrTooManyIndirections + } + + // Measure-only runs for leaves + if obj == nil && kind != reflect.Array && kind != reflect.Slice && kind != reflect.Map && kind != reflect.Struct { + // Nothing to do, we were only here to measure object depth! + return nil + } + + switch { + // Terminal cases (leaves of the tree) + // Is invalid type: nil interfaces for example, cannot be used to run any reflect method or it's susceptible to panic + case !value.IsValid() || kind == reflect.Invalid: + return waferrors.ErrUnsupportedValue + // Is nullable type: nil pointers, channels, maps or functions + case isValueNil(value): + obj.SetNil() + + // Booleans + case kind == reflect.Bool: + obj.SetBool(value.Bool()) + + // Numbers + case value.CanInt(): // any int type or alias + obj.SetInt(value.Int()) + case value.CanUint(): // any Uint type or alias + obj.SetUint(value.Uint()) + case value.CanFloat(): // any float type or alias + obj.SetFloat(value.Float()) + + // json.Number -- string-represented arbitrary precision numbers + case value.Type() == jsonNumberType: + encoder.encodeJSONNumber(value.Interface().(json.Number), obj) + + // Strings + case kind == reflect.String: // string type + encoder.encodeString(value.String(), obj) + + case (kind == reflect.Array || kind == reflect.Slice) && value.Type().Elem().Kind() == reflect.Uint8: + // Byte Arrays are skipped voluntarily because they are often used + // to do partial parsing which leads to false positives + return nil + + // Containers (internal nodes of the tree) + + // All recursive cases can only execute if the depth is superior to 0. + case depth <= 0: + // Record that there was a truncation; we will try to measure the actual depth of the object afterwards. + encoder.addTruncation(ObjectTooDeep, -1) + return waferrors.ErrMaxDepthExceeded + + // Either an array or a slice of an array + case kind == reflect.Array || kind == reflect.Slice: + encoder.encodeArray(value, obj, depth-1) + case kind == reflect.Map: + encoder.encodeMap(value, obj, depth-1) + case kind == reflect.Struct: + encoder.encodeStruct(value, obj, depth-1) + + default: + return waferrors.ErrUnsupportedValue + } + + return nil +} + +func (encoder *encoder) encodeJSONNumber(num json.Number, obj *bindings.WAFObject) { + // Important to attempt int64 first, as this is lossless. Values that are either too small or too + // large to be represented as int64 can be represented as float64, but this can be lossy. + if i, err := num.Int64(); err == nil { + obj.SetInt(i) + return + } + + if f, err := num.Float64(); err == nil { + obj.SetFloat(f) + return + } + + // Could not store as int64 nor float, so we'll store it as a string... + encoder.encodeString(num.String(), obj) +} + +func (encoder *encoder) encodeString(str string, obj *bindings.WAFObject) { + size := len(str) + if size > encoder.config.MaxStringSize { + str = str[:encoder.config.MaxStringSize] + encoder.addTruncation(StringTooLong, size) + } + obj.SetString(encoder.config.Pinner, str) +} + +var xmlNameType = reflect.TypeFor[xml.Name]() + +func getFieldNameFromType(field reflect.StructField) (string, bool) { + fieldName := field.Name + + // Private and synthetics fields + if !field.IsExported() { + return "", false + } + + // This is the XML namespace/name pair, this isn't technically part of the data. + if field.Type == xmlNameType { + return "", false + } + + // Use the encoding tag name as field name if present + var contentTypeTag bool + for _, tagName := range []string{"json", "yaml", "xml", "toml"} { + tag, ok := field.Tag.Lookup(tagName) + if !ok { + continue + } + if tag == "-" { + // Explicitly ignored, note that only "-" causes the field to be ignored, + // any qualifier ("-,omitempty" or event "-,") will cause the field to be + // actually named "-" instead of being ignored. + return "", false + } + contentTypeTag = true + tag, _, _ = strings.Cut(tag, ",") + switch tag { + case "": + // Nothing to do + continue + default: + return tag, true + } + } + + // If none of the content-type tags are set, the field name is used; but we + // specifically exclude those fields tagged as coming from a header, path + // parameter or query parameter (this is used by labstack/echo.v4, see + // https://echo.labstack.com/docs/binding). + if !contentTypeTag { + for _, tagName := range []string{"header", "path", "query"} { + if _, ok := field.Tag.Lookup(tagName); ok { + return "", false + } + } + } + + return fieldName, true +} + +// encodeStruct takes a reflect.Value and a wafObject pointer and iterates on the struct field to build +// a wafObject map of type wafMapType. The specificities are the following: +// - It will only take the first encoder.ContainerMaxSize elements of the struct +// - If the field has a json tag it will become the field name +// - Private fields and also values producing an error at encoding will be skipped +// - Even if the element values are invalid or null we still keep them to report the field name +func (encoder *encoder) encodeStruct(value reflect.Value, obj *bindings.WAFObject, depth int) { + if encoder.config.Timer.Exhausted() { + return + } + + typ := value.Type() + nbFields := typ.NumField() + + capacity := nbFields + length := 0 + if capacity > encoder.config.MaxContainerSize { + capacity = encoder.config.MaxContainerSize + } + + objArray := obj.SetMap(encoder.config.Pinner, uint64(capacity)) + for i := 0; i < nbFields; i++ { + if encoder.config.Timer.Exhausted() { + return + } + + if length == capacity { + encoder.addTruncation(ContainerTooLarge, nbFields) + break + } + + fieldType := typ.Field(i) + fieldName, usable := getFieldNameFromType(fieldType) + if tag, ok := fieldType.Tag.Lookup(AppsecFieldTag); !usable || ok && tag == AppsecFieldTagValueIgnore { + // Either the struct field is ignored by json marshaling so can we, + // or the field was explicitly set with `ddwaf:ignore` + continue + } + + objElem := &objArray[length] + // If the Map key is of unsupported type, skip it + encoder.encodeMapKeyFromString(fieldName, objElem) + + if err := encoder.encode(value.Field(i), objElem, depth); err != nil { + // We still need to keep the map key, so we can't discard the full object, instead, we make the value a noop + objElem.SetInvalid() + } + + length++ + } + + // Set the length to the final number of successfully encoded elements + obj.NbEntries = uint64(length) +} + +// encodeMap takes a reflect.Value and a wafObject pointer and iterates on the map elements and returns +// a wafObject map of type wafMapType. The specificities are the following: +// - It will only take the first encoder.ContainerMaxSize elements of the map +// - Even if the element values are invalid or null we still keep them to report the map key +func (encoder *encoder) encodeMap(value reflect.Value, obj *bindings.WAFObject, depth int) { + capacity := value.Len() + if capacity > encoder.config.MaxContainerSize { + capacity = encoder.config.MaxContainerSize + } + + objArray := obj.SetMap(encoder.config.Pinner, uint64(capacity)) + + length := 0 + for iter := value.MapRange(); iter.Next(); { + if encoder.config.Timer.Exhausted() { + return + } + + if length == capacity { + encoder.addTruncation(ContainerTooLarge, value.Len()) + break + } + + objElem := &objArray[length] + if err := encoder.encodeMapKey(iter.Key(), objElem); err != nil { + continue + } + + if err := encoder.encode(iter.Value(), objElem, depth); err != nil { + // We still need to keep the map key, so we can't discard the full object, instead, we make the value a noop + objElem.SetInvalid() + } + + length++ + } + + // Fix the size because we skipped map entries + obj.NbEntries = uint64(length) +} + +// encodeMapKey takes a reflect.Value and a wafObject and returns a wafObject ready to be considered a map entry. We use +// the function cgoRefPool.AllocWafMapKey to store the key in the wafObject. But first we need to grab the real +// underlying value by recursing through the pointer and interface values. +func (encoder *encoder) encodeMapKey(value reflect.Value, obj *bindings.WAFObject) error { + value, kind := resolvePointer(value) + + var keyStr string + switch { + case kind == reflect.Invalid: + return waferrors.ErrInvalidMapKey + case kind == reflect.String: + keyStr = value.String() + case value.Type() == byteArrayType: + keyStr = string(value.Bytes()) + default: + return waferrors.ErrInvalidMapKey + } + + encoder.encodeMapKeyFromString(keyStr, obj) + return nil +} + +// encodeMapKeyFromString takes a string and a wafObject and sets the map key attribute on the wafObject to the supplied +// string. The key may be truncated if it exceeds the maximum string size allowed by the encoder. +func (encoder *encoder) encodeMapKeyFromString(keyStr string, obj *bindings.WAFObject) { + size := len(keyStr) + if size > encoder.config.MaxStringSize { + keyStr = keyStr[:encoder.config.MaxStringSize] + encoder.addTruncation(StringTooLong, size) + } + + obj.SetMapKey(encoder.config.Pinner, keyStr) +} + +// encodeArray takes a reflect.Value and a wafObject pointer and iterates on the elements and returns +// a wafObject array of type wafArrayType. The specificities are the following: +// - It will only take the first encoder.ContainerMaxSize elements of the array +// - Elements producing an error at encoding or null values will be skipped +func (encoder *encoder) encodeArray(value reflect.Value, obj *bindings.WAFObject, depth int) { + length := value.Len() + + capacity := length + if capacity > encoder.config.MaxContainerSize { + capacity = encoder.config.MaxContainerSize + } + + currIndex := 0 + + objArray := obj.SetArray(encoder.config.Pinner, uint64(capacity)) + + for i := 0; i < length; i++ { + if encoder.config.Timer.Exhausted() { + return + } + if currIndex == capacity { + encoder.addTruncation(ContainerTooLarge, length) + break + } + + objElem := &objArray[currIndex] + if err := encoder.encode(value.Index(i), objElem, depth); err != nil { + continue + } + + // If the element is null or invalid it has no impact on the waf execution, therefore we can skip its + // encoding. In this specific case we just overwrite it at the next loop iteration. + if objElem.IsUnusable() { + continue + } + + currIndex++ + } + + // Fix the size because we skipped map entries + obj.NbEntries = uint64(currIndex) +} + +func (encoder *encoder) addTruncation(reason TruncationReason, size int) { + if encoder.truncations == nil { + encoder.truncations = make(map[TruncationReason][]int, 3) + } + encoder.truncations[reason] = append(encoder.truncations[reason], size) +} + +// depthOf returns the depth of the provided object. This is 0 for scalar values, +// such as strings. +func depthOf(ctx context.Context, obj reflect.Value) (depth int, err error) { + if err = ctx.Err(); err != nil { + // Timed out, won't go any deeper + return 0, err + } + + obj, kind := resolvePointer(obj) + + var itemDepth int + switch kind { + case reflect.Array, reflect.Slice: + if obj.Type() == byteArrayType { + // We treat byte slices as strings + return 0, nil + } + for i := 0; i < obj.Len(); i++ { + itemDepth, err = depthOf(ctx, obj.Index(i)) + depth = max(depth, itemDepth) + if err != nil { + break + } + } + return depth + 1, err + case reflect.Map: + for iter := obj.MapRange(); iter.Next(); { + itemDepth, err = depthOf(ctx, iter.Value()) + depth = max(depth, itemDepth) + if err != nil { + break + } + } + return depth + 1, err + case reflect.Struct: + typ := obj.Type() + for i := 0; i < obj.NumField(); i++ { + fieldType := typ.Field(i) + _, usable := getFieldNameFromType(fieldType) + if !usable { + continue + } + + itemDepth, err = depthOf(ctx, obj.Field(i)) + depth = max(depth, itemDepth) + if err != nil { + break + } + } + return depth + 1, err + default: + return 0, nil + } +} + +// resolvePointer attempts to resolve a pointer while limiting the pointer depth +// to be traversed, so that this is not susceptible to an infinite loop when +// provided a self-referencing pointer. +func resolvePointer(obj reflect.Value) (reflect.Value, reflect.Kind) { + kind := obj.Kind() + for limit := 8; limit > 0 && kind == reflect.Pointer || kind == reflect.Interface; limit-- { + if obj.IsNil() { + return obj, kind + } + obj = obj.Elem() + kind = obj.Kind() + } + return obj, kind +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/handle.go b/vendor/github.com/DataDog/go-libddwaf/v4/handle.go new file mode 100644 index 00000000..013fefcd --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/handle.go @@ -0,0 +1,169 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package libddwaf + +import ( + "fmt" + "runtime" + "sync/atomic" + + "github.com/DataDog/go-libddwaf/v4/internal/bindings" + "github.com/DataDog/go-libddwaf/v4/internal/unsafe" + "github.com/DataDog/go-libddwaf/v4/timer" + "github.com/DataDog/go-libddwaf/v4/waferrors" +) + +// Handle represents an instance of the WAF for a given ruleset. It is obtained +// from [Builder.Build]; and must be disposed of by calling [Handle.Close] +// once no longer in use. +type Handle struct { + // Lock-less reference counter avoiding blocking calls to the [Handle.Close] + // method while WAF [Context]s are still using the WAF handle. Instead, we let + // the release actually happen only when the reference counter reaches 0. + // This can happen either from a request handler calling its WAF context's + // [Context.Close] method, or either from the appsec instance calling the WAF + // [Handle.Close] method when creating a new WAF handle with new rules. + // Note that this means several instances of the WAF can exist at the same + // time with their own set of rules. This choice was done to be able to + // efficiently update the security rules concurrently, without having to + // block the request handlers for the time of the security rules update. + refCounter atomic.Int32 + + // Instance of the WAF + cHandle bindings.WAFHandle +} + +// wrapHandle wraps the provided C handle into a [Handle]. The caller is +// responsible to ensure the cHandle value is not 0 (NULL). The returned +// [Handle] has a reference count of 1, so callers need not call [Handle.retain] +// on it. +func wrapHandle(cHandle bindings.WAFHandle) *Handle { + handle := &Handle{cHandle: cHandle} + handle.refCounter.Store(1) // We count the handle itself in the counter + return handle +} + +// NewContext returns a new WAF context for the given WAF handle. +// An error is returned when the WAF handle was released or when the WAF context +// couldn't be created. +func (handle *Handle) NewContext(timerOptions ...timer.Option) (*Context, error) { + // Handle has been released + if !handle.retain() { + return nil, fmt.Errorf("handle was released") + } + + cContext := wafLib.ContextInit(handle.cHandle) + if cContext == 0 { + handle.Close() // We couldn't get a context, so we no longer have an implicit reference to the Handle in it... + return nil, fmt.Errorf("could not get C context") + } + + rootTimer, err := timer.NewTreeTimer(timerOptions...) + if err != nil { + return nil, err + } + + return &Context{ + handle: handle, + cContext: cContext, + Timer: rootTimer, + truncations: make(map[TruncationReason][]int, 3), + }, nil +} + +// Addresses returns the list of addresses the WAF has been configured to monitor based on the input +// ruleset. +func (handle *Handle) Addresses() []string { + return wafLib.KnownAddresses(handle.cHandle) +} + +// Actions returns the list of actions the WAF has been configured to monitor based on the input +// ruleset. +func (handle *Handle) Actions() []string { + return wafLib.KnownActions(handle.cHandle) +} + +// Close decrements the reference counter of this [Handle], possibly allowing it to be destroyed +// and all the resources associated with it to be released. +func (handle *Handle) Close() { + if handle.addRefCounter(-1) != 0 { + // Either the counter is still positive (this Handle is still referenced), or it had previously + // reached 0 and some other call has done the cleanup already. + return + } + + wafLib.Destroy(handle.cHandle) + handle.cHandle = 0 // Makes it easy to spot use-after-free/double-free issues +} + +// retain increments the reference counter of this [Handle]. Returns true if the +// [Handle] is still valid, false if it is no longer usable. Calls to +// [Handle.retain] must be balanced with calls to [Handle.Close] in order to +// avoid leaking [Handle]s. +func (handle *Handle) retain() bool { + return handle.addRefCounter(1) > 0 +} + +// addRefCounter adds x to Handle.refCounter. The return valid indicates whether the refCounter +// reached 0 as part of this call or not, which can be used to perform "only-once" activities: +// +// * result > 0 => the Handle is still usable +// * result == 0 => the handle is no longer usable, ref counter reached 0 as part of this call +// * result == -1 => the handle is no longer usable, ref counter was already 0 previously +func (handle *Handle) addRefCounter(x int32) int32 { + // We use a CAS loop to avoid setting the refCounter to a negative value. + for { + current := handle.refCounter.Load() + if current <= 0 { + // The object had already been released + return -1 + } + + next := current + x + if swapped := handle.refCounter.CompareAndSwap(current, next); swapped { + if next < 0 { + // TODO(romain.marcadier): somehow signal unexpected behavior to the + // caller (panic? error?). We currently clamp to 0 in order to avoid + // causing a customer program crash, but this is the symptom of a bug + // and should be investigated (however this clamping hides the issue). + return 0 + } + return next + } + } +} + +func newConfig(pinner *runtime.Pinner, keyObfuscatorRegex string, valueObfuscatorRegex string) *bindings.WAFConfig { + return &bindings.WAFConfig{ + Limits: bindings.WAFConfigLimits{ + MaxContainerDepth: bindings.MaxContainerDepth, + MaxContainerSize: bindings.MaxContainerSize, + MaxStringLength: bindings.MaxStringLength, + }, + Obfuscator: bindings.WAFConfigObfuscator{ + KeyRegex: unsafe.PtrToUintptr(unsafe.Cstring(pinner, keyObfuscatorRegex)), + ValueRegex: unsafe.PtrToUintptr(unsafe.Cstring(pinner, valueObfuscatorRegex)), + }, + // Prevent libddwaf from freeing our Go-memory-allocated ddwaf_objects + FreeFn: 0, + } +} + +func goRunError(rc bindings.WAFReturnCode) error { + switch rc { + case bindings.WAFErrInternal: + return waferrors.ErrInternal + case bindings.WAFErrInvalidObject: + return waferrors.ErrInvalidObject + case bindings.WAFErrInvalidArgument: + return waferrors.ErrInvalidArgument + case bindings.WAFOK, bindings.WAFMatch: + // No error... + return nil + default: + return fmt.Errorf("unknown waf return code %d", int(rc)) + } +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/ctypes.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/ctypes.go new file mode 100644 index 00000000..1ae77114 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/ctypes.go @@ -0,0 +1,412 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package bindings + +import ( + "fmt" + "structs" + + "github.com/DataDog/go-libddwaf/v4/internal/pin" + "github.com/DataDog/go-libddwaf/v4/internal/unsafe" + "github.com/DataDog/go-libddwaf/v4/waferrors" + "github.com/pkg/errors" +) + +const ( + MaxStringLength = 4096 + MaxContainerDepth = 20 + MaxContainerSize = 256 +) + +type WAFReturnCode int32 + +const ( + WAFErrInternal WAFReturnCode = iota - 3 + WAFErrInvalidObject + WAFErrInvalidArgument + WAFOK + WAFMatch +) + +// WAFObjectType is an enum in C which has the size of DWORD. +// But DWORD is 4 bytes in amd64 and arm64 so uint32 it is. +type WAFObjectType uint32 + +const WAFInvalidType WAFObjectType = 0 +const ( + WAFIntType WAFObjectType = 1 << iota + WAFUintType + WAFStringType + WAFArrayType + WAFMapType + WAFBoolType + WAFFloatType + WAFNilType +) + +func (w WAFObjectType) String() string { + switch w { + case WAFInvalidType: + return "invalid" + case WAFIntType: + return "int" + case WAFUintType: + return "uint" + case WAFStringType: + return "string" + case WAFArrayType: + return "array" + case WAFMapType: + return "map" + case WAFBoolType: + return "bool" + case WAFFloatType: + return "float" + case WAFNilType: + return "nil" + default: + return fmt.Sprintf("unknown(%d)", w) + } +} + +type WAFObject struct { + _ structs.HostLayout + ParameterName uintptr + ParameterNameLength uint64 + Value uintptr + NbEntries uint64 + Type WAFObjectType + _ [4]byte + // Forced padding + // We only support 2 archs and cgo generated the same padding to both. + // We don't want the C struct to be packed because actually go will do the same padding itself, + // we just add it explicitly to not take any chance. + // And we cannot pack a struct in go so it will get tricky if the struct is + // packed (apart from breaking all tracers of course) +} + +// IsInvalid determines whether this WAF Object has the invalid type (which is the 0-value). +func (w *WAFObject) IsInvalid() bool { + return w.Type == WAFInvalidType +} + +// IsNil determines whether this WAF Object is nil or not. +func (w *WAFObject) IsNil() bool { + return w.Type == WAFNilType +} + +// IsArray determines whether this WAF Object is an array or not. +func (w *WAFObject) IsArray() bool { + return w.Type == WAFArrayType +} + +// IsMap determines whether this WAF Object is a map or not. +func (w *WAFObject) IsMap() bool { + return w.Type == WAFMapType +} + +// IsInt determines whether this WAF Object is a iny or not. +func (w *WAFObject) IsInt() bool { + return w.Type == WAFIntType +} + +// IsUint determines whether this WAF Object is a uint or not. +func (w *WAFObject) IsUint() bool { + return w.Type == WAFUintType +} + +// IsBool determines whether this WAF Object is a bool or not. +func (w *WAFObject) IsBool() bool { + return w.Type == WAFBoolType +} + +// IsFloat determines whether this WAF Object is a float or not. +func (w *WAFObject) IsFloat() bool { + return w.Type == WAFFloatType +} + +// IsString determines whether this WAF Object is a string or not. +func (w *WAFObject) IsString() bool { + return w.Type == WAFStringType +} + +// IsUnusable returns true if the wafObject has no impact on the WAF execution +// But we still need this kind of objects to forward map keys in case the value of the map is invalid +func (w *WAFObject) IsUnusable() bool { + return w.Type == WAFInvalidType || w.Type == WAFNilType +} + +// SetArray sets the receiving [WAFObject] to a new array with the given +// capacity. +func (w *WAFObject) SetArray(pinner pin.Pinner, capacity uint64) []WAFObject { + return w.setArrayTyped(pinner, capacity, WAFArrayType) +} + +// SetArrayData sets the receiving [WAFObject] to the provided array items. +func (w *WAFObject) SetArrayData(pinner pin.Pinner, data []WAFObject) { + w.setArrayDataTyped(pinner, data, WAFArrayType) +} + +// SetMap sets the receiving [WAFObject] to a new map with the given capacity. +func (w *WAFObject) SetMap(pinner pin.Pinner, capacity uint64) []WAFObject { + return w.setArrayTyped(pinner, capacity, WAFMapType) +} + +// SetMapData sets the receiving [WAFObject] to the provided map items. +func (w *WAFObject) SetMapData(pinner pin.Pinner, data []WAFObject) { + w.setArrayDataTyped(pinner, data, WAFMapType) +} + +// SetMapKey sets the receiving [WAFObject] to a new map key with the given +// string. +func (w *WAFObject) SetMapKey(pinner pin.Pinner, key string) { + header := unsafe.NativeStringUnwrap(key) + + w.ParameterNameLength = uint64(header.Len) + if w.ParameterNameLength == 0 { + w.ParameterName = 0 + return + } + pinner.Pin(unsafe.Pointer(header.Data)) + w.ParameterName = uintptr(unsafe.Pointer(header.Data)) +} + +func (w *WAFObject) MapKey() string { + return string(unsafe.Slice(*(**byte)(unsafe.Pointer(&w.ParameterName)), w.ParameterNameLength)) +} + +func (w *WAFObject) Values() ([]WAFObject, error) { + if !w.IsArray() && !w.IsMap() { + return nil, errors.New("value is not an array or map") + } + return unsafe.Slice(*(**WAFObject)(unsafe.Pointer(&w.Value)), w.NbEntries), nil +} + +func (w *WAFObject) AnyValue() (any, error) { + switch w.Type { + case WAFArrayType: + return w.ArrayValue() + case WAFBoolType: + return w.BoolValue() + case WAFFloatType: + return w.FloatValue() + case WAFIntType: + return w.IntValue() + case WAFMapType: + return w.MapValue() + case WAFStringType: + return w.StringValue() + case WAFUintType: + return w.UIntValue() + case WAFNilType: + return nil, nil + default: + return nil, fmt.Errorf("%w: %s", waferrors.ErrUnsupportedValue, w.Type) + } +} + +func (w *WAFObject) ArrayValue() ([]any, error) { + if w.IsNil() { + return nil, nil + } + + if !w.IsArray() { + return nil, errors.New("value is not an array") + } + + items, err := w.Values() + if err != nil { + return nil, err + } + + res := make([]any, len(items)) + for i, item := range items { + res[i], err = item.AnyValue() + if err != nil { + return nil, fmt.Errorf("while decoding item at index %d: %w", i, err) + } + } + return res, nil +} + +func (w *WAFObject) MapValue() (map[string]any, error) { + if w.IsNil() { + return nil, nil + } + + if !w.IsMap() { + return nil, errors.New("value is not a map") + } + + items, err := w.Values() + if err != nil { + return nil, err + } + + res := make(map[string]any, len(items)) + for _, item := range items { + key := item.MapKey() + res[key], err = item.AnyValue() + if err != nil { + return nil, fmt.Errorf("while decoding value at %q: %w", key, err) + } + } + return res, nil +} + +func (w *WAFObject) BoolValue() (bool, error) { + if !w.IsBool() { + return false, errors.New("value is not a boolean") + } + return w.Value != 0, nil +} + +func (w *WAFObject) FloatValue() (float64, error) { + if !w.IsFloat() { + return 0, errors.New("value is not a uint") + } + return *(*float64)(unsafe.Pointer(&w.Value)), nil +} + +func (w *WAFObject) IntValue() (int64, error) { + if !w.IsInt() { + return 0, errors.New("value is not a uint") + } + return int64(w.Value), nil +} + +func (w *WAFObject) StringValue() (string, error) { + if !w.IsString() { + return "", errors.New("value is not a string") + } + return string(unsafe.Slice(*(**byte)(unsafe.Pointer(&w.Value)), w.NbEntries)), nil +} + +func (w *WAFObject) UIntValue() (uint64, error) { + if !w.IsUint() { + return 0, errors.New("value is not a uint") + } + return uint64(w.Value), nil +} + +var blankCStringValue = unsafe.Pointer(unsafe.NativeStringUnwrap("\x00").Data) + +// SetString sets the receiving [WAFObject] value to the given string. +func (w *WAFObject) SetString(pinner pin.Pinner, str string) { + header := unsafe.NativeStringUnwrap(str) + + w.Type = WAFStringType + w.NbEntries = uint64(header.Len) + if w.NbEntries == 0 { + w.Value = uintptr(blankCStringValue) + return + } + pinner.Pin(unsafe.Pointer(header.Data)) + w.Value = uintptr(unsafe.Pointer(header.Data)) +} + +// SetInt sets the receiving [WAFObject] value to the given int. +func (w *WAFObject) SetInt(i int64) { + w.Type = WAFIntType + w.Value = unsafe.NativeToUintptr(i) +} + +// SetUint sets the receiving [WAFObject] value to the given uint. +func (w *WAFObject) SetUint(i uint64) { + w.Type = WAFUintType + w.Value = unsafe.NativeToUintptr(i) +} + +// SetBool sets the receiving [WAFObject] value to the given bool. +func (w *WAFObject) SetBool(b bool) { + w.Type = WAFBoolType + if b { + w.Value = uintptr(1) + } else { + w.Value = uintptr(0) + } +} + +// SetFloat sets the receiving [WAFObject] value to the given float. +func (w *WAFObject) SetFloat(f float64) { + w.Type = WAFFloatType + w.Value = unsafe.NativeToUintptr(f) +} + +// SetNil sets the receiving [WAFObject] to nil. +func (w *WAFObject) SetNil() { + w.Type = WAFNilType + w.Value = 0 +} + +// SetInvalid sets the receiving [WAFObject] to invalid. +func (w *WAFObject) SetInvalid() { + w.Type = WAFInvalidType + w.Value = 0 +} + +func (w *WAFObject) setArrayTyped(pinner pin.Pinner, capacity uint64, t WAFObjectType) []WAFObject { + var arr []WAFObject + if capacity > 0 { + arr = make([]WAFObject, capacity) + } + w.setArrayDataTyped(pinner, arr, t) + return arr +} + +func (w *WAFObject) setArrayDataTyped(pinner pin.Pinner, arr []WAFObject, t WAFObjectType) { + w.Type = t + w.NbEntries = uint64(len(arr)) + if w.NbEntries == 0 { + w.Value = 0 + return + } + + ptr := unsafe.Pointer(unsafe.SliceData(arr)) + pinner.Pin(ptr) + w.Value = uintptr(ptr) +} + +type WAFConfig struct { + _ structs.HostLayout + Limits WAFConfigLimits + Obfuscator WAFConfigObfuscator + FreeFn uintptr +} + +type WAFConfigLimits struct { + _ structs.HostLayout + MaxContainerSize uint32 + MaxContainerDepth uint32 + MaxStringLength uint32 +} + +type WAFConfigObfuscator struct { + _ structs.HostLayout + KeyRegex uintptr // char * + ValueRegex uintptr // char * +} + +type WAFResult struct { + _ structs.HostLayout + Timeout byte + Events WAFObject + Actions WAFObject + Derivatives WAFObject + TotalRuntime uint64 +} + +// WAFBuilder is a forward declaration in ddwaf.h header +// We basically don't need to modify it, only to give it to the waf +type WAFBuilder uintptr + +// WAFHandle is a forward declaration in ddwaf.h header +// We basically don't need to modify it, only to give it to the waf +type WAFHandle uintptr + +// WAFContext is a forward declaration in ddwaf.h header +// We basically don't need to modify it, only to give it to the waf +type WAFContext uintptr diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/libddwaf.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/libddwaf.go new file mode 100644 index 00000000..0bc2f17e --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/libddwaf.go @@ -0,0 +1,79 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build (linux || darwin) && (amd64 || arm64) && !go1.26 && !datadog.no_waf && (cgo || appsec) + +package bindings + +import "github.com/ebitengine/purego" + +type wafSymbols struct { + builderInit uintptr + builderAddOrUpdateConfig uintptr + builderRemoveConfig uintptr + builderBuildInstance uintptr + builderGetConfigPaths uintptr + builderDestroy uintptr + setLogCb uintptr + destroy uintptr + knownAddresses uintptr + knownActions uintptr + getVersion uintptr + contextInit uintptr + contextDestroy uintptr + objectFree uintptr + run uintptr +} + +// newWafSymbols resolves the symbols of [wafSymbols] from the provided +// [purego.Dlopen] handle. +func newWafSymbols(handle uintptr) (syms wafSymbols, err error) { + if syms.builderAddOrUpdateConfig, err = purego.Dlsym(handle, "ddwaf_builder_add_or_update_config"); err != nil { + return syms, err + } + if syms.builderBuildInstance, err = purego.Dlsym(handle, "ddwaf_builder_build_instance"); err != nil { + return syms, err + } + if syms.builderDestroy, err = purego.Dlsym(handle, "ddwaf_builder_destroy"); err != nil { + return syms, err + } + if syms.builderGetConfigPaths, err = purego.Dlsym(handle, "ddwaf_builder_get_config_paths"); err != nil { + return syms, err + } + if syms.builderInit, err = purego.Dlsym(handle, "ddwaf_builder_init"); err != nil { + return syms, err + } + if syms.builderRemoveConfig, err = purego.Dlsym(handle, "ddwaf_builder_remove_config"); err != nil { + return syms, err + } + if syms.contextDestroy, err = purego.Dlsym(handle, "ddwaf_context_destroy"); err != nil { + return syms, err + } + if syms.contextInit, err = purego.Dlsym(handle, "ddwaf_context_init"); err != nil { + return syms, err + } + if syms.destroy, err = purego.Dlsym(handle, "ddwaf_destroy"); err != nil { + return syms, err + } + if syms.getVersion, err = purego.Dlsym(handle, "ddwaf_get_version"); err != nil { + return syms, err + } + if syms.knownActions, err = purego.Dlsym(handle, "ddwaf_known_actions"); err != nil { + return syms, err + } + if syms.knownAddresses, err = purego.Dlsym(handle, "ddwaf_known_addresses"); err != nil { + return syms, err + } + if syms.objectFree, err = purego.Dlsym(handle, "ddwaf_object_free"); err != nil { + return syms, err + } + if syms.run, err = purego.Dlsym(handle, "ddwaf_run"); err != nil { + return syms, err + } + if syms.setLogCb, err = purego.Dlsym(handle, "ddwaf_set_log_cb"); err != nil { + return syms, err + } + return syms, nil +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/safe.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/safe.go similarity index 80% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/safe.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/safe.go index 8106913e..a69799f3 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/safe.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/safe.go @@ -6,17 +6,16 @@ package bindings import ( - wafErrors "github.com/DataDog/go-libddwaf/v3/errors" - "fmt" "reflect" "runtime" + "github.com/DataDog/go-libddwaf/v4/waferrors" "github.com/pkg/errors" ) -func newPanicError(in func() error, err error) *wafErrors.PanicError { - return &wafErrors.PanicError{ +func newPanicError(in any, err error) *waferrors.PanicError { + return &waferrors.PanicError{ In: runtime.FuncForPC(reflect.ValueOf(in).Pointer()).Name(), Err: err, } @@ -24,7 +23,7 @@ func newPanicError(in func() error, err error) *wafErrors.PanicError { // tryCall calls function `f` and recovers from any panic occurring while it // executes, returning it in a `PanicError` object type. -func tryCall(f func() error) (err error) { +func tryCall[T any](f func() T) (res T, err error) { defer func() { r := recover() if r == nil { @@ -43,5 +42,6 @@ func tryCall(f func() error) (err error) { err = newPanicError(f, err) }() - return f() + res = f() + return } diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl.go new file mode 100644 index 00000000..66de699f --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl.go @@ -0,0 +1,250 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build (linux || darwin) && (amd64 || arm64) && !go1.26 && !datadog.no_waf && (cgo || appsec) + +package bindings + +import ( + "errors" + "fmt" + "os" + "runtime" + + "github.com/DataDog/go-libddwaf/v4/internal/lib" + "github.com/DataDog/go-libddwaf/v4/internal/log" + "github.com/DataDog/go-libddwaf/v4/internal/unsafe" + "github.com/ebitengine/purego" +) + +// WAFLib is the type wrapper for all C calls to the waf +// It uses `libwaf` to make C calls +// All calls must go through this one-liner to be type safe +// since purego calls are not type safe +type WAFLib struct { + wafSymbols + handle uintptr +} + +// NewWAFLib loads the libddwaf shared library and resolves all tge relevant symbols. +// The caller is responsible for calling wafDl.Close on the returned object once they +// are done with it so that associated resources can be released. +func NewWAFLib() (dl *WAFLib, err error) { + path, closer, err := lib.DumpEmbeddedWAF() + if err != nil { + return nil, fmt.Errorf("dump embedded WAF: %w", err) + } + defer func() { + if rmErr := closer(); rmErr != nil { + err = errors.Join(err, fmt.Errorf("error removing %s: %w", path, rmErr)) + } + }() + + var handle uintptr + if handle, err = purego.Dlopen(path, purego.RTLD_GLOBAL|purego.RTLD_NOW); err != nil { + return nil, fmt.Errorf("load a dynamic library file: %w", err) + } + + var symbols wafSymbols + if symbols, err = newWafSymbols(handle); err != nil { + if closeErr := purego.Dlclose(handle); closeErr != nil { + err = errors.Join(err, fmt.Errorf("error released the shared libddwaf library: %w", closeErr)) + } + return + } + + dl = &WAFLib{symbols, handle} + + // Try calling the waf to make sure everything is fine + if _, err = tryCall(dl.GetVersion); err != nil { + if closeErr := purego.Dlclose(handle); closeErr != nil { + err = errors.Join(err, fmt.Errorf("error released the shared libddwaf library: %w", closeErr)) + } + return + } + + if val := os.Getenv(log.EnvVarLogLevel); val != "" { + logLevel := log.LevelNamed(val) + if logLevel != log.LevelOff { + dl.SetLogCb(log.CallbackFunctionPointer(), logLevel) + } + } + + return +} + +func (waf *WAFLib) Close() error { + return purego.Dlclose(waf.handle) +} + +// GetVersion returned string is a static string so we do not need to free it +func (waf *WAFLib) GetVersion() string { + return unsafe.Gostring(unsafe.Cast[byte](waf.syscall(waf.getVersion))) +} + +// BuilderInit initializes a new WAF builder with the provided configuration, +// which may be nil. Returns nil in case of an error. +func (waf *WAFLib) BuilderInit(cfg *WAFConfig) WAFBuilder { + var pinner runtime.Pinner + defer pinner.Unpin() + pinner.Pin(cfg) + + return WAFBuilder(waf.syscall(waf.builderInit, unsafe.PtrToUintptr(cfg))) +} + +// BuilderAddOrUpdateConfig adds or updates a configuration based on the +// given path, which must be a unique identifier for the provided configuration. +// Returns false in case of an error. +func (waf *WAFLib) BuilderAddOrUpdateConfig(builder WAFBuilder, path string, config *WAFObject, diags *WAFObject) bool { + var pinner runtime.Pinner + defer pinner.Unpin() + pinner.Pin(config) + pinner.Pin(diags) + + res := waf.syscall(waf.builderAddOrUpdateConfig, + uintptr(builder), + unsafe.PtrToUintptr(unsafe.Cstring(&pinner, path)), + uintptr(len(path)), + unsafe.PtrToUintptr(config), + unsafe.PtrToUintptr(diags), + ) + return byte(res) != 0 +} + +// BuilderRemoveConfig removes a configuration based on the provided path. +// Returns false in case of an error. +func (waf *WAFLib) BuilderRemoveConfig(builder WAFBuilder, path string) bool { + var pinner runtime.Pinner + defer pinner.Unpin() + + return byte(waf.syscall(waf.builderRemoveConfig, + uintptr(builder), + unsafe.PtrToUintptr(unsafe.Cstring(&pinner, path)), + uintptr(len(path)), + )) != 0 +} + +// BuilderBuildInstance builds a WAF instance based on the current set of configurations. +// Returns nil in case of an error. +func (waf *WAFLib) BuilderBuildInstance(builder WAFBuilder) WAFHandle { + return WAFHandle(waf.syscall(waf.builderBuildInstance, uintptr(builder))) +} + +// BuilderGetConfigPaths returns the list of currently loaded paths. +// Returns nil in case of an error. +func (waf *WAFLib) BuilderGetConfigPaths(builder WAFBuilder, filter string) []string { + var paths WAFObject + var pinner runtime.Pinner + defer pinner.Unpin() + pinner.Pin(&filter) + pinner.Pin(&paths) + + count := waf.syscall(waf.builderGetConfigPaths, + uintptr(builder), + unsafe.PtrToUintptr(&paths), + unsafe.PtrToUintptr(unsafe.StringData(filter)), + uintptr(len(filter)), + ) + defer waf.ObjectFree(&paths) + + list := make([]string, 0, count) + for i := range uint64(count) { + obj := unsafe.CastWithOffset[WAFObject](paths.Value, i) + path := unsafe.GostringSized(unsafe.Cast[byte](obj.Value), obj.NbEntries) + list = append(list, path) + } + return list +} + +// BuilderDestroy destroys a WAF builder instance. +func (waf *WAFLib) BuilderDestroy(builder WAFBuilder) { + waf.syscall(waf.builderDestroy, uintptr(builder)) +} + +// SetLogCb sets the log callback function for the WAF. +func (waf *WAFLib) SetLogCb(cb uintptr, level log.Level) { + waf.syscall(waf.setLogCb, cb, uintptr(level)) +} + +// Destroy destroys a WAF instance. +func (waf *WAFLib) Destroy(handle WAFHandle) { + waf.syscall(waf.destroy, uintptr(handle)) +} + +func (waf *WAFLib) KnownAddresses(handle WAFHandle) []string { + return waf.knownX(handle, waf.knownAddresses) +} + +func (waf *WAFLib) KnownActions(handle WAFHandle) []string { + return waf.knownX(handle, waf.knownActions) +} + +func (waf *WAFLib) knownX(handle WAFHandle, symbol uintptr) []string { + var nbAddresses uint32 + + var pinner runtime.Pinner + defer pinner.Unpin() + pinner.Pin(&nbAddresses) + + arrayVoidC := waf.syscall(symbol, uintptr(handle), unsafe.PtrToUintptr(&nbAddresses)) + if arrayVoidC == 0 { + return nil + } + + if nbAddresses == 0 { + return nil + } + + // These C strings are static strings so we do not need to free them + addresses := make([]string, int(nbAddresses)) + for i := 0; i < int(nbAddresses); i++ { + addresses[i] = unsafe.Gostring(*unsafe.CastWithOffset[*byte](arrayVoidC, uint64(i))) + } + + return addresses +} + +func (waf *WAFLib) ContextInit(handle WAFHandle) WAFContext { + return WAFContext(waf.syscall(waf.contextInit, uintptr(handle))) +} + +func (waf *WAFLib) ContextDestroy(context WAFContext) { + waf.syscall(waf.contextDestroy, uintptr(context)) +} + +func (waf *WAFLib) ObjectFree(obj *WAFObject) { + var pinner runtime.Pinner + defer pinner.Unpin() + pinner.Pin(obj) + + waf.syscall(waf.objectFree, unsafe.PtrToUintptr(obj)) +} + +func (waf *WAFLib) Run(context WAFContext, persistentData, ephemeralData *WAFObject, result *WAFObject, timeout uint64) WAFReturnCode { + var pinner runtime.Pinner + defer pinner.Unpin() + + pinner.Pin(persistentData) + pinner.Pin(ephemeralData) + pinner.Pin(result) + + return WAFReturnCode(waf.syscall(waf.run, uintptr(context), unsafe.PtrToUintptr(persistentData), unsafe.PtrToUintptr(ephemeralData), unsafe.PtrToUintptr(result), uintptr(timeout))) +} + +func (waf *WAFLib) Handle() uintptr { + return waf.handle +} + +// syscall is the only way to make C calls with this interface. +// purego implementation limits the number of arguments to 9, it will panic if more are provided +// Note: `purego.SyscallN` has 3 return values: these are the following: +// +// 1st - The return value is a pointer or a int of any type +// 2nd - The return value is a float +// 3rd - The value of `errno` at the end of the call +func (waf *WAFLib) syscall(fn uintptr, args ...uintptr) uintptr { + ret, _, _ := purego.SyscallN(fn, args...) + return ret +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl_unsupported.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl_unsupported.go new file mode 100644 index 00000000..d2a0a934 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl_unsupported.go @@ -0,0 +1,59 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Build when the target OS or architecture are not supported +//go:build (!linux && !darwin) || (!amd64 && !arm64) || go1.26 || datadog.no_waf || (!cgo && !appsec) + +package bindings + +import ( + "errors" + + "github.com/DataDog/go-libddwaf/v4/internal/log" +) + +type WAFLib struct{} + +func NewWAFLib() (*WAFLib, error) { + return nil, errors.New("go-libddwaf is not supported on this platform") +} + +func (*WAFLib) Close() error { return nil } + +func (*WAFLib) GetVersion() string { return "" } + +func (*WAFLib) BuilderInit(*WAFConfig) WAFBuilder { return 0 } + +func (*WAFLib) BuilderAddOrUpdateConfig(WAFBuilder, string, *WAFObject, *WAFObject) bool { + return false +} + +func (*WAFLib) BuilderRemoveConfig(WAFBuilder, string) bool { return false } + +func (*WAFLib) BuilderBuildInstance(WAFBuilder) WAFHandle { return 0 } + +func (*WAFLib) BuilderGetConfigPaths(WAFBuilder, string) []string { return nil } + +func (*WAFLib) BuilderDestroy(WAFBuilder) {} + +func (*WAFLib) SetLogCb(uintptr, log.Level) {} + +func (*WAFLib) Destroy(WAFHandle) {} + +func (*WAFLib) KnownAddresses(WAFHandle) []string { return nil } + +func (*WAFLib) KnownActions(WAFHandle) []string { return nil } + +func (*WAFLib) ContextInit(WAFHandle) WAFContext { return 0 } + +func (*WAFLib) ContextDestroy(WAFContext) {} + +func (*WAFLib) ObjectFree(*WAFObject) {} + +func (*WAFLib) Run(WAFContext, *WAFObject, *WAFObject, *WAFObject, uint64) WAFReturnCode { + return WAFErrInternal +} + +func (*WAFLib) Handle() uintptr { return 0 } diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/.version b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/.version new file mode 100644 index 00000000..7c819a96 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/.version @@ -0,0 +1 @@ +1.25.1 \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/README.md b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/README.md similarity index 100% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/README.md rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/README.md diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/doc.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/doc.go similarity index 100% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/doc.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/doc.go diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_darwin.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_darwin.go new file mode 100644 index 00000000..b6c89720 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_darwin.go @@ -0,0 +1,61 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build darwin && (amd64 || arm64) && !go1.26 && !datadog.no_waf && (cgo || appsec) + +package lib + +import ( + "bytes" + "compress/gzip" + _ "embed" + "errors" + "fmt" + "io" + "os" +) + +// DumpEmbeddedWAF for darwin platform. +// DumpEmbeddedWAF creates a temporary file with the embedded WAF library content and returns the path to the file, +// a closer function and an error. This is the only way to make all implementations of DumpEmbeddedWAF consistent +// across all platforms. +func DumpEmbeddedWAF() (path string, closer func() error, err error) { + file, err := os.CreateTemp("", "libddwaf-*.dylib") + if err != nil { + return "", nil, fmt.Errorf("error creating temp file: %w", err) + } + + defer func() { + if err != nil { + if closeErr := file.Close(); closeErr != nil { + err = errors.Join(err, fmt.Errorf("error closing file: %w", closeErr)) + } + if rmErr := os.Remove(file.Name()); rmErr != nil { + err = errors.Join(err, fmt.Errorf("error removing file: %w", rmErr)) + } + } + }() + + gr, err := gzip.NewReader(bytes.NewReader(libddwaf)) + if err != nil { + return "", nil, fmt.Errorf("error creating gzip reader: %w", err) + } + + if _, err := io.Copy(file, gr); err != nil { + return "", nil, fmt.Errorf("error copying gzip content to file: %w", err) + } + + if err := gr.Close(); err != nil { + return "", nil, fmt.Errorf("error closing gzip reader: %w", err) + } + + if err := file.Close(); err != nil { + return "", nil, fmt.Errorf("error closing dylib file: %w", err) + } + + return file.Name(), func() error { + return os.Remove(file.Name()) + }, nil +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_linux.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_linux.go new file mode 100644 index 00000000..d9f38178 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_linux.go @@ -0,0 +1,58 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && (amd64 || arm64) && !go1.26 && !datadog.no_waf && (cgo || appsec) + +package lib + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "golang.org/x/sys/unix" +) + +// DumpEmbeddedWAF for linux systems. +// It creates a memfd and writes the embedded WAF library to it. Then it returns the path the /proc/self/fd/ path +// to the file. This trick makes us able to load the library without having to write it to disk. +// Hence, making go-libddwaf work on full read-only filesystems. +func DumpEmbeddedWAF() (path string, closer func() error, err error) { + fd, err := unix.MemfdCreate("libddwaf", 0) + if err != nil { + return "", nil, fmt.Errorf("error creating memfd: %w", err) + } + + file := os.NewFile(uintptr(fd), fmt.Sprintf("/proc/self/fd/%d", fd)) + if file == nil { + return "", nil, errors.New("error creating file from fd") + } + + defer func() { + if file != nil && err != nil { + if closeErr := file.Close(); closeErr != nil { + err = errors.Join(err, fmt.Errorf("error closing file: %w", closeErr)) + } + } + }() + + gr, err := gzip.NewReader(bytes.NewReader(libddwaf)) + if err != nil { + return "", nil, fmt.Errorf("error creating gzip reader: %w", err) + } + + if _, err := io.Copy(file, gr); err != nil { + return "", nil, fmt.Errorf("error copying gzip content to memfd: %w", err) + } + + if err := gr.Close(); err != nil { + return "", nil, fmt.Errorf("error closing gzip reader: %w", err) + } + + return file.Name(), file.Close, nil +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_amd64.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_amd64.go similarity index 79% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_amd64.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_amd64.go index 27c7acf0..52d7511b 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_amd64.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_amd64.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build darwin && amd64 && !go1.24 && !datadog.no_waf && (cgo || appsec) +//go:build darwin && amd64 && !go1.26 && !datadog.no_waf && (cgo || appsec) package lib @@ -13,5 +13,3 @@ import _ "embed" // Needed for go:embed //go:embed libddwaf-darwin-amd64.dylib.gz var libddwaf []byte - -const embedNamePattern = "libddwaf-*.dylib" diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_arm64.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_arm64.go similarity index 79% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_arm64.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_arm64.go index 3133ac40..cc849873 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_arm64.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_arm64.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build darwin && arm64 && !go1.24 && !datadog.no_waf && (cgo || appsec) +//go:build darwin && arm64 && !go1.26 && !datadog.no_waf && (cgo || appsec) package lib @@ -13,5 +13,3 @@ import _ "embed" // Needed for go:embed //go:embed libddwaf-darwin-arm64.dylib.gz var libddwaf []byte - -const embedNamePattern = "libddwaf-*.dylib" diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_amd64.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_amd64.go similarity index 79% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_amd64.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_amd64.go index 9e72cdca..4cc9b52c 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_amd64.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_amd64.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux && amd64 && !go1.24 && !datadog.no_waf && (cgo || appsec) +//go:build linux && amd64 && !go1.26 && !datadog.no_waf && (cgo || appsec) package lib @@ -13,5 +13,3 @@ import _ "embed" // Needed for go:embed //go:embed libddwaf-linux-amd64.so.gz var libddwaf []byte - -const embedNamePattern = "libddwaf-*.so" diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_arm64.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_arm64.go similarity index 79% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_arm64.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_arm64.go index e8be318d..7d2d299a 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_arm64.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_arm64.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux && arm64 && !go1.24 && !datadog.no_waf && (cgo || appsec) +//go:build linux && arm64 && !go1.26 && !datadog.no_waf && (cgo || appsec) package lib @@ -13,5 +13,3 @@ import _ "embed" // Needed for go:embed //go:embed libddwaf-linux-arm64.so.gz var libddwaf []byte - -const embedNamePattern = "libddwaf-*.so" diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-amd64.dylib.gz b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-amd64.dylib.gz new file mode 100644 index 00000000..05ba8467 Binary files /dev/null and b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-amd64.dylib.gz differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-arm64.dylib.gz b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-arm64.dylib.gz new file mode 100644 index 00000000..3e27856a Binary files /dev/null and b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-arm64.dylib.gz differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-amd64.so.gz b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-amd64.so.gz new file mode 100644 index 00000000..894bd822 Binary files /dev/null and b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-amd64.so.gz differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-arm64.so.gz b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-arm64.so.gz new file mode 100644 index 00000000..8e1d1741 Binary files /dev/null and b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-arm64.so.gz differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/version.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/version.go new file mode 100644 index 00000000..8c0828e9 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/version.go @@ -0,0 +1,13 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package lib + +import ( + _ "embed" // For go:embed +) + +//go:embed .version +var EmbeddedWAFVersion string diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/ddwaf.h b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/ddwaf.h similarity index 76% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/log/ddwaf.h rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/log/ddwaf.h index cfad7f93..e7f00f79 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/ddwaf.h +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/ddwaf.h @@ -11,17 +11,19 @@ namespace ddwaf{ class waf; class context_wrapper; +class waf_builder; } // namespace ddwaf using ddwaf_handle = ddwaf::waf *; using ddwaf_context = ddwaf::context_wrapper *; +using ddwaf_builder = ddwaf::waf_builder *; extern "C" { #endif #include -#include #include +#include #define DDWAF_MAX_STRING_LENGTH 4096 #define DDWAF_MAX_CONTAINER_DEPTH 20 @@ -61,11 +63,11 @@ typedef enum **/ typedef enum { - DDWAF_ERR_INTERNAL = -3, - DDWAF_ERR_INVALID_OBJECT = -2, + DDWAF_ERR_INTERNAL = -3, + DDWAF_ERR_INVALID_OBJECT = -2, DDWAF_ERR_INVALID_ARGUMENT = -1, - DDWAF_OK = 0, - DDWAF_MATCH = 1, + DDWAF_OK = 0, + DDWAF_MATCH = 1, } DDWAF_RET_CODE; /** @@ -86,11 +88,11 @@ typedef enum #ifndef __cplusplus typedef struct _ddwaf_handle* ddwaf_handle; typedef struct _ddwaf_context* ddwaf_context; +typedef struct _ddwaf_builder* ddwaf_builder; #endif typedef struct _ddwaf_object ddwaf_object; typedef struct _ddwaf_config ddwaf_config; -typedef struct _ddwaf_result ddwaf_result; /** * @struct ddwaf_object * @@ -151,27 +153,6 @@ struct _ddwaf_config ddwaf_object_free_fn free_fn; }; -/** - * @struct ddwaf_result - * - * Structure containing the result of a WAF run. - **/ -struct _ddwaf_result -{ - /** Whether there has been a timeout during the operation **/ - bool timeout; - /** Array of events generated, this is guaranteed to be an array **/ - ddwaf_object events; - /** Map of actions generated, this is guaranteed to be a map in the format: - * {action type: { }, ...} - **/ - ddwaf_object actions; - /** Map containing all derived objects in the format (address, value) **/ - ddwaf_object derivatives; - /** Total WAF runtime in nanoseconds **/ - uint64_t total_runtime; -}; - /** * @typedef ddwaf_log_cb * @@ -207,28 +188,12 @@ typedef void (*ddwaf_log_cb)( ddwaf_handle ddwaf_init(const ddwaf_object *ruleset, const ddwaf_config* config, ddwaf_object *diagnostics); -/** - * ddwaf_update - * - * Update a ddwaf instance - * - * @param ruleset ddwaf::object map containing rules, exclusions, rules_override and rules_data. (nonnull) - * @param diagnostics Optional ruleset parsing diagnostics. (nullable) - * - * @return Handle to the new WAF instance or NULL if there was an error processing the ruleset. - * - * @note If handle or ruleset are NULL, the diagnostics object will not be initialised. - * @note This function is not thread-safe - **/ -ddwaf_handle ddwaf_update(ddwaf_handle handle, const ddwaf_object *ruleset, - ddwaf_object *diagnostics); - /** * ddwaf_destroy * * Destroy a WAF instance. * - * @param Handle to the WAF instance. + * @param handle Handle to the WAF instance. */ void ddwaf_destroy(ddwaf_handle handle); @@ -242,16 +207,34 @@ void ddwaf_destroy(ddwaf_handle handle); * * The memory is owned by the WAF and should not be freed. * - * @param Handle to the WAF instance. + * @param handle Handle to the WAF instance. * @param size Output parameter in which the size will be returned. The value of * size will be 0 if the return value is NULL. * @return NULL if empty, otherwise a pointer to an array with size elements. * - * @Note The returned array should be considered invalid after calling ddwaf_destroy + * @note This function is not thread-safe + * @note The returned array should be considered invalid after calling ddwaf_destroy * on the handle used to obtain it. **/ const char* const* ddwaf_known_addresses(const ddwaf_handle handle, uint32_t *size); - +/** + * ddwaf_known_actions + * + * Get an array of all the action types which could be triggered as a result of + * the current set of rules and exclusion filters. + * + * The memory is owned by the WAF and should not be freed. + * + * @param handle Handle to the WAF instance. + * @param size Output parameter in which the size will be returned. The value of + * size will be 0 if the return value is NULL. + * @return NULL if empty, otherwise a pointer to an array with size elements. + * + * @note This function is not thread-safe + * @note The returned array should be considered invalid after calling ddwaf_destroy + * on the handle used to obtain it. + **/ +const char *const *ddwaf_known_actions(const ddwaf_handle handle, uint32_t *size); /** * ddwaf_context_init * @@ -291,10 +274,23 @@ ddwaf_context ddwaf_context_init(const ddwaf_handle handle); * can be of an arbitrary type. This parameter can be null if persistent data * is provided. * - * @param result Structure containing the result of the operation. (nullable) + * @param result (nullable) Object map containing the following items: + * - events: an array of the generated events. + * - actions: a map of the generated actions in the format: + * {action type: { }, ...} + * - duration: an unsigned specifying the total runtime of the + * call in nanoseconds. + * - timeout: whether there has been a timeout during the call. + * - attributes: a map containing all derived objects in the + * format: {tag, value} + * - keep: whether the data contained herein must override any + * transport sampling through the relevant mechanism. + * This structure must be freed by the caller and will contain all + * specified keys when the value returned by ddwaf_run is either + * DDWAF_OK or DDWAF_MATCH and will be empty otherwise. * @param timeout Maximum time budget in microseconds. * - * @return Return code of the operation, also contained in the result structure. + * @return Return code of the operation. * @error DDWAF_ERR_INVALID_ARGUMENT The context is invalid, the data will not * be freed. * @error DDWAF_ERR_INVALID_OBJECT The data provided didn't match the desired @@ -325,7 +321,7 @@ ddwaf_context ddwaf_context_init(const ddwaf_handle handle); * recommended and might be explicitly rejected in the future. **/ DDWAF_RET_CODE ddwaf_run(ddwaf_context context, ddwaf_object *persistent_data, - ddwaf_object *ephemeral_data, ddwaf_result *result, uint64_t timeout); + ddwaf_object *ephemeral_data, ddwaf_object *result, uint64_t timeout); /** * ddwaf_context_destroy @@ -338,13 +334,100 @@ DDWAF_RET_CODE ddwaf_run(ddwaf_context context, ddwaf_object *persistent_data, void ddwaf_context_destroy(ddwaf_context context); /** - * ddwaf_result_free + * ddwaf_builder_init + * + * Initialize an instace of the waf builder. + * + * @param config Optional configuration of the WAF. (nullable) * - * Free a ddwaf_result structure. + * @return Handle to the builer instance or NULL on error. * - * @param result Structure to free. (nonnull) + * @note If config is NULL, default values will be used, including the default + * free function (ddwaf_object_free). **/ -void ddwaf_result_free(ddwaf_result *result); +ddwaf_builder ddwaf_builder_init(const ddwaf_config *config); + +/** + * ddwaf_builder_add_or_update_config + * + * Adds or updates a configuration based on the given path, which must be a unique + * identifier for the provided configuration. + * + * @param builder Builder to perform the operation on. (nonnull) + * @param path A string containing the path of the configuration, this must uniquely identify the configuration. (nonnull) + * @param path_len The length of the string contained within path. + * @param config ddwaf::object map containing rules, exclusions, rules_override and rules_data. (nonnull) + * @param diagnostics Optional ruleset parsing diagnostics. (nullable) + * + * @return Whether the operation succeeded (true) or failed (false). + * + * @note if any of the arguments are NULL, the diagnostics object will not be initialised. + * @note The memory associated with the path, config and diagnostics must be freed by the caller. + * @note This function is not thread-safe. + **/ +bool ddwaf_builder_add_or_update_config(ddwaf_builder builder, const char *path, uint32_t path_len, const ddwaf_object *config, ddwaf_object *diagnostics); + +/** + * ddwaf_builder_remove_config + * + * Removes a configuration based on the provided path. + * + * @param builder Builder to perform the operation on. (nonnull) + * @param path A string containing the path of the configuration to be removed. (nonnull) + * @param path_len The length of the string contained within path. + * + * @return Whether the operation succeeded (true) or failed (false). + * + * @note The memory associated with the path must be freed by the caller. + * @note This function is not thread-safe. + **/ +bool ddwaf_builder_remove_config(ddwaf_builder builder, const char *path, uint32_t path_len); + +/** + * ddwaf_builder_build_instance + * + * Builds a ddwaf instance based on the current set of configurations. + * + * @param builder Builder to perform the operation on. (nonnull) + * + * @return Handle to the new WAF instance or NULL if there was an error. + * + * @note This function is not thread-safe. + **/ +ddwaf_handle ddwaf_builder_build_instance(ddwaf_builder builder); + +/** + * ddwaf_builder_get_config_paths + * + * Provides an array of the currently loaded paths, optionally matching the + * regex provided in filter. In addition, the count is provided as the return + * value, allowing paths to be nullptr. + * + * @param builder Builder to perform the operation on. (nonnull) + * @param paths The object in which paths will be returned, as an array of + * strings. If NULL, only the count is provided. (nullable) + * @param filter An optional string regex to filter the provided paths. The + * provided regular expression is used unanchored so matches can be found + * at any point within the path, any necessary anchors must be explicitly + * added to the regex. (nullable). + * @oaran filter_len The length of the filter string (or 0 otherwise). + * + * @return The total number of configurations loaded or, if provided, the number + * of those matching the filter. + * + * @note This function is not thread-safe and the memory of the paths object must + * be freed by the caller. + **/ +uint32_t ddwaf_builder_get_config_paths(ddwaf_builder builder, ddwaf_object *paths, const char *filter, uint32_t filter_len); + +/** + * ddwaf_builder_destroy + * + * Destroy an instance of the builder. + * + * @param builder Builder to perform the operation on. (nonnull) + */ +void ddwaf_builder_destroy(ddwaf_builder builder); /** * ddwaf_object_invalid @@ -680,6 +763,19 @@ bool ddwaf_object_get_bool(const ddwaf_object *object); **/ const ddwaf_object* ddwaf_object_get_index(const ddwaf_object *object, size_t index); +/** + * ddwaf_object_find + * + * Returns the object within the given map with a key matching the provided one. + * + * @param object The container from which to extract the object. + * @param key A string representing the key to find. + * @param length Length of the key. + * + * @return The requested object or NULL if the key was not found or the + * object is not a container. + **/ +const ddwaf_object* ddwaf_object_find(const ddwaf_object *object, const char *key, size_t length); /** * ddwaf_object_free diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log.go similarity index 100% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log.go diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_cgo.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_cgo.go new file mode 100644 index 00000000..8f071517 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_cgo.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build (linux || darwin) && (amd64 || arm64) && !go1.26 && !datadog.no_waf && cgo + +package log + +// #include "./ddwaf.h" +// extern void ddwafLogCallbackFnV4( +// DDWAF_LOG_LEVEL level, +// char* function, +// char* file, +// unsigned line, +// char* message, +// uint64_t message_len +// ); +import "C" +import "github.com/DataDog/go-libddwaf/v4/internal/unsafe" + +// CallbackFunctionPointer returns a pointer to the log callback function which +// can be used with libddwaf. +func CallbackFunctionPointer() uintptr { + return uintptr(C.ddwafLogCallbackFnV4) +} + +//export ddwafLogCallbackFnV4 +func ddwafLogCallbackFnV4(level C.DDWAF_LOG_LEVEL, fnPtr, filePtr *C.char, line C.unsigned, msgPtr *C.char, _ C.uint64_t) { + function := unsafe.Gostring(unsafe.CastNative[C.char, byte](fnPtr)) + file := unsafe.Gostring(unsafe.CastNative[C.char, byte](filePtr)) + message := unsafe.Gostring(unsafe.CastNative[C.char, byte](msgPtr)) + + logMessage(Level(level), function, file, uint(line), message) +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_purego.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_purego.go new file mode 100644 index 00000000..7bbc92ac --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_purego.go @@ -0,0 +1,37 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build (linux || darwin) && (amd64 || arm64) && !go1.26 && !datadog.no_waf && !cgo && appsec + +package log + +import ( + "sync" + + "github.com/DataDog/go-libddwaf/v4/internal/unsafe" + + "github.com/ebitengine/purego" +) + +var ( + once = sync.OnceValue(func() uintptr { + return purego.NewCallback(ddwafLogCallbackFn) + }) + functionPointer uintptr +) + +// CallbackFunctionPointer returns a pointer to the log callback function which +// can be used with libddwaf. +func CallbackFunctionPointer() uintptr { + return once() +} + +func ddwafLogCallbackFn(level Level, fnPtr, filePtr *byte, line uint, msgPtr *byte, _ uint64) { + function := unsafe.Gostring(fnPtr) + file := unsafe.Gostring(filePtr) + message := unsafe.Gostring(msgPtr) + + logMessage(level, function, file, line, message) +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log_unsupported.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_unsupported.go similarity index 81% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log_unsupported.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_unsupported.go index 3b7b51fe..f548fec1 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/log_unsupported.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_unsupported.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build (!cgo && ((!darwin && !freebsd) || go1.24)) || datadog.no_waf +//go:build (!linux && !darwin) || (!amd64 && !arm64) || go1.26 || datadog.no_waf || (!cgo && !appsec) package log diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/pin/pinner.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/pin/pinner.go new file mode 100644 index 00000000..457b976b --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/pin/pinner.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package pin + +import ( + "runtime" + "sync" +) + +// A Pinner is a set of Go objects each pinned to a fixed location in memory. +// The [Pinner.Pin] method pins one object, while [Pinner.Unpin] unpins all +// pinned objects. See their comments for more information. +type Pinner interface { + // Pin pins a Go object, preventing it from being moved or freed by the + // garbage collector until the [Pinner.Unpin] method has been called. + // + // A pointer to a pinned object can be directly stored in C memory or can be + // contained in Go memory passed to C functions. If the pinned object itself + // contains pointers to Go objects, these objects must be pinned separately if + // they are going to be accessed from C code. + // + // The argument must be a pointer of any type or an [unsafe.Pointer]. + // It's safe to call Pin on non-Go pointers, in which case Pin will do + // nothing. + Pin(any) + + // Unpin unpins all pinned objects of the [Pinner]. + Unpin() +} + +var _ Pinner = (*runtime.Pinner)(nil) + +// ConcurrentPinner is a [Pinner] that is safe for concurrent use by multiple +// goroutines. +type ConcurrentPinner struct { + runtime.Pinner + sync.Mutex +} + +func (p *ConcurrentPinner) Pin(v any) { + p.Lock() + p.Pinner.Pin(v) + p.Unlock() +} + +func (p *ConcurrentPinner) Unpin() { + p.Lock() + p.Pinner.Unpin() + p.Unlock() +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/.gitattributes b/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/.gitattributes new file mode 100644 index 00000000..fb0380ea --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/.gitattributes @@ -0,0 +1 @@ +/recommended.json.gz linguist-vendored diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/recommended.json.gz b/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/recommended.json.gz new file mode 100644 index 00000000..79ef2094 Binary files /dev/null and b/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/recommended.json.gz differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/ruleset.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/ruleset.go new file mode 100644 index 00000000..e66b2d81 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/ruleset.go @@ -0,0 +1,36 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux || darwin + +package ruleset + +import ( + "bytes" + "compress/gzip" + _ "embed" + "runtime" + + "github.com/DataDog/go-libddwaf/v4/internal/bindings" + "github.com/DataDog/go-libddwaf/v4/json" +) // For go:embed + +//go:embed recommended.json.gz +var defaultRuleset []byte + +func DefaultRuleset(pinner *runtime.Pinner) (bindings.WAFObject, error) { + gz, err := gzip.NewReader(bytes.NewReader(defaultRuleset)) + if err != nil { + return bindings.WAFObject{}, err + } + + dec := json.NewDecoder(gz, pinner) + + var ruleset bindings.WAFObject + if err := dec.Decode(&ruleset); err != nil { + return bindings.WAFObject{}, err + } + return ruleset, nil +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/ruleset_unsupported.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/ruleset_unsupported.go new file mode 100644 index 00000000..524bde30 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/ruleset/ruleset_unsupported.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !(linux || darwin) + +package ruleset + +import ( + "errors" + "runtime" + + "github.com/DataDog/go-libddwaf/v4/internal/bindings" +) + +func DefaultRuleset(pinner *runtime.Pinner) (bindings.WAFObject, error) { + return bindings.WAFObject{}, errors.New("the default ruleset is not available on unsupported platforms") +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_cgo_disabled.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_cgo_disabled.go similarity index 79% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_cgo_disabled.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_cgo_disabled.go index 9c4ed83c..0c06bdf5 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_cgo_disabled.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_cgo_disabled.go @@ -9,8 +9,8 @@ package support -import "github.com/DataDog/go-libddwaf/v3/errors" +import "github.com/DataDog/go-libddwaf/v4/waferrors" func init() { - wafSupportErrors = append(wafSupportErrors, errors.CgoDisabledError{}) + wafSupportErrors = append(wafSupportErrors, waferrors.CgoDisabledError{}) } diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_manually_disabled.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_manually_disabled.go similarity index 75% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_manually_disabled.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_manually_disabled.go index 8c65ddbc..98aa5e3d 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_manually_disabled.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_manually_disabled.go @@ -8,8 +8,8 @@ package support -import "github.com/DataDog/go-libddwaf/v3/errors" +import "github.com/DataDog/go-libddwaf/v4/waferrors" func init() { - wafManuallyDisabledErr = errors.ManuallyDisabledError{} + wafManuallyDisabledErr = waferrors.ManuallyDisabledError{} } diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_support.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_support.go similarity index 100% rename from vendor/github.com/DataDog/go-libddwaf/v3/internal/support/waf_support.go rename to vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_support.go diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_go.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_go.go new file mode 100644 index 00000000..7e8d7452 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_go.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Unsupported Go versions (>=) +//go:build go1.26 + +package support + +import "github.com/DataDog/go-libddwaf/v4/waferrors" + +func init() { + wafSupportErrors = append(wafSupportErrors, waferrors.UnsupportedGoVersionError{}) +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_target.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_target.go new file mode 100644 index 00000000..2757cc54 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_target.go @@ -0,0 +1,20 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Unsupported target OS or architecture +// Unsupported OS Unsupported Arch +//go:build (!linux && !darwin) || (!amd64 && !arm64) + +package support + +import ( + "runtime" + + "github.com/DataDog/go-libddwaf/v4/waferrors" +) + +func init() { + wafSupportErrors = append(wafSupportErrors, waferrors.UnsupportedOSArchError{OS: runtime.GOOS, Arch: runtime.GOARCH}) +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/unsafe/utils.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/unsafe/utils.go new file mode 100644 index 00000000..6f6c74d1 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/unsafe/utils.go @@ -0,0 +1,112 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package unsafe + +import ( + "runtime" + "unsafe" +) + +type Pointer = unsafe.Pointer + +func SliceData[E any, T ~[]E](slice T) *E { + return unsafe.SliceData(slice) +} + +func StringData(str string) *byte { + return unsafe.StringData(str) +} + +// Gostring copies a char* to a Go string. +func Gostring(ptr *byte) string { + if ptr == nil { + return "" + } + var length int + for *(*byte)(unsafe.Add(unsafe.Pointer(ptr), uintptr(length))) != '\x00' { + length++ + } + //string builtin copies the slice + return string(unsafe.Slice(ptr, length)) +} + +type StringHeader struct { + Len int + Data *byte +} + +// NativeStringUnwrap cast a native string type into it's runtime value. +func NativeStringUnwrap(str string) StringHeader { + return StringHeader{ + Data: unsafe.StringData(str), + Len: len(str), + } +} + +func GostringSized(ptr *byte, size uint64) string { + if ptr == nil { + return "" + } + return string(unsafe.Slice(ptr, size)) +} + +// Cstring converts a go string to *byte that can be passed to C code. +func Cstring(pinner *runtime.Pinner, name string) *byte { + var b = make([]byte, len(name)+1) + copy(b, name) + pinner.Pin(&b[0]) + return unsafe.SliceData(b) +} + +// Cast is used to centralize unsafe use C of allocated pointer. +// We take the address and then dereference it to trick go vet from creating a possible misuse of unsafe.Pointer +func Cast[T any](ptr uintptr) *T { + return (*T)(*(*unsafe.Pointer)(unsafe.Pointer(&ptr))) +} + +type Native interface { + ~byte | ~float64 | ~float32 | ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~bool | ~uintptr +} + +func CastNative[N Native, T Native](ptr *N) *T { + return (*T)(*(*unsafe.Pointer)(unsafe.Pointer(&ptr))) +} + +// NativeToUintptr is a helper used by populate WafObject values +// with Go values +func NativeToUintptr[T any](x T) uintptr { + return *(*uintptr)(unsafe.Pointer(&x)) +} + +// UintToNative is a helper used retrieve Go values from an uintptr encoded +// value from a WafObject +func UintptrToNative[T any](x uintptr) T { + return *(*T)(unsafe.Pointer(&x)) +} + +// CastWithOffset is the same as cast but adding an offset to the pointer by a multiple of the size +// of the type pointed. +func CastWithOffset[T any](ptr uintptr, offset uint64) *T { + return (*T)(unsafe.Add(*(*unsafe.Pointer)(unsafe.Pointer(&ptr)), offset*uint64(unsafe.Sizeof(*new(T))))) +} + +// PtrToUintptr is a helper to centralize of usage of unsafe.Pointer +// do not use this function to cast interfaces +func PtrToUintptr[T any](arg *T) uintptr { + return uintptr(unsafe.Pointer(arg)) +} + +func SliceToUintptr[T any](arg []T) uintptr { + return uintptr(unsafe.Pointer(unsafe.SliceData(arg))) +} + +func Slice[T any](ptr *T, length uint64) []T { + return unsafe.Slice(ptr, length) +} + +func String(ptr *byte, length uint64) string { + return unsafe.String(ptr, length) +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/json/decoder.go b/vendor/github.com/DataDog/go-libddwaf/v4/json/decoder.go new file mode 100644 index 00000000..462b8c02 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/json/decoder.go @@ -0,0 +1,138 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package json + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "runtime" + "strconv" + "unique" + + "github.com/DataDog/go-libddwaf/v4/internal/bindings" +) + +type Decoder struct { + pinner *runtime.Pinner + json *json.Decoder +} + +func NewDecoder(rd io.Reader, pinner *runtime.Pinner) *Decoder { + js := json.NewDecoder(rd) + js.UseNumber() + return &Decoder{pinner: pinner, json: js} +} + +func (d *Decoder) Decode(v *bindings.WAFObject) error { + tok, err := d.json.Token() + if err != nil { + return err + } + + switch tok := tok.(type) { + case json.Delim: + switch tok { + case '{': + return d.decodeMap(v) + case '[': + return d.decodeArray(v) + default: + return fmt.Errorf("%w: %q", errors.ErrUnsupported, tok) + } + + case json.Number: + return decodeNumber(v, tok) + + case bool: + v.SetBool(tok) + return nil + + case string: + v.SetString(d.pinner, tok) + return nil + + case nil: + v.SetNil() + return nil + + default: + return fmt.Errorf("%w: %T %v", errors.ErrUnsupported, tok, tok) + } +} + +func (d *Decoder) decodeArray(v *bindings.WAFObject) error { + var items []bindings.WAFObject + for d.json.More() { + var v bindings.WAFObject + if err := d.Decode(&v); err != nil { + return err + } + items = append(items, v) + } + + // Consume the closing bracket... + if _, err := d.json.Token(); err != nil { + return err + } + + v.SetArrayData(d.pinner, items) + return nil +} + +func (d *Decoder) decodeMap(v *bindings.WAFObject) error { + var items []bindings.WAFObject + for d.json.More() { + keyTok, err := d.json.Token() + if err != nil { + return err + } + key, ok := keyTok.(string) + if !ok { + return fmt.Errorf("expected string key, got %T %q", keyTok, keyTok) + } + // To reduce the overall amount of memory that is retained by the resulting WAFObjects, we make + // the keys unique, as they are repeated a lot in the original JSON. + key = unique.Make(key).Value() + + var v bindings.WAFObject + v.SetMapKey(d.pinner, key) + if err := d.Decode(&v); err != nil { + return err + } + items = append(items, v) + } + + // Consume the closing brace... + if _, err := d.json.Token(); err != nil { + return err + } + + v.SetMapData(d.pinner, items) + return nil +} + +func decodeNumber(v *bindings.WAFObject, tok json.Number) error { + if i, err := strconv.ParseUint(string(tok), 10, 64); err == nil { + v.SetUint(i) + return nil + } + + if i, err := tok.Int64(); err == nil { + v.SetInt(i) + return nil + } + + f, err := tok.Float64() + if err != nil { + return fmt.Errorf("invalid number %q: %w", tok, err) + } + + v.SetFloat(f) + + return nil +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/result.go b/vendor/github.com/DataDog/go-libddwaf/v4/result.go new file mode 100644 index 00000000..9528b667 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/result.go @@ -0,0 +1,60 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package libddwaf + +import ( + "time" + + "github.com/DataDog/go-libddwaf/v4/timer" +) + +// Result stores the multiple values returned by a call to [Context.Run]. +type Result struct { + // Events is the list of events the WAF detected, together with any relevant + // details. These are typically forwarded as opaque objects to the Datadog + // backend. + Events []any + + // Derivatives is the set of key-value pairs generated by the WAF, and which + // need to be reported on the trace to provide additional data to the Datadog + // backend. + Derivatives map[string]any + + // Actions is the set of actions the WAF decided on when evaluating rules + // against the provided address data. It maps action types to their dynamic + // parameter values. + Actions map[string]any + + // Timer returns the time spend in the different parts of the run. Keys can be found with the suffix [ + TimerStats map[timer.Key]time.Duration + + // Keep is true if the WAF instructs the trace should be set to manual keep priority. + Keep bool +} + +// HasEvents return true if the [Result] holds at least 1 event. +func (r *Result) HasEvents() bool { + return len(r.Events) > 0 +} + +// HasDerivatives return true if the [Result] holds at least 1 derivative. +func (r *Result) HasDerivatives() bool { + return len(r.Derivatives) > 0 +} + +// HasActions return true if the [Result] holds at least 1 action. +func (r *Result) HasActions() bool { + return len(r.Actions) > 0 +} + +const ( + // EncodeTimeKey is the key used to track the time spent encoding the address data reported in [Result.TimerStats]. + EncodeTimeKey timer.Key = "encode" + // DurationTimeKey is the key used to track the time spent in libddwaf ddwaf_run C function reported in [Result.TimerStats]. + DurationTimeKey timer.Key = "duration" + // DecodeTimeKey is the key used to track the time spent decoding the address data reported in [Result.TimerStats]. + DecodeTimeKey timer.Key = "decode" +) diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/timer/base_timer.go b/vendor/github.com/DataDog/go-libddwaf/v4/timer/base_timer.go similarity index 94% rename from vendor/github.com/DataDog/go-libddwaf/v3/timer/base_timer.go rename to vendor/github.com/DataDog/go-libddwaf/v4/timer/base_timer.go index adb68bb1..f51ee7ca 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/timer/base_timer.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/timer/base_timer.go @@ -26,10 +26,12 @@ type baseTimer struct { parent NodeTimer // componentName is the name of the component of the timer. It is used to store the time spent in the component and to propagate the stop of the timer to the parent timer. - componentName string + componentName Key // spent is the time spent on the timer, set after calling stop spent time.Duration + // stopped is true if the timer has been stopped + stopped bool } var _ Timer = (*baseTimer)(nil) @@ -67,12 +69,12 @@ func (timer *baseTimer) Start() time.Time { func (timer *baseTimer) Spent() time.Duration { // timer was never started - if timer.start == (time.Time{}) { + if timer.start.IsZero() { return 0 } // timer was already stopped - if timer.spent != 0 { + if timer.stopped { return timer.spent } @@ -102,11 +104,12 @@ func (timer *baseTimer) Exhausted() bool { func (timer *baseTimer) Stop() time.Duration { // If the current timer has already stopped, return the current spent time - if timer.spent != 0 { + if timer.stopped { return timer.spent } timer.spent = timer.Spent() + timer.stopped = true if timer.parent != nil { timer.parent.childStopped(timer.componentName, timer.spent) } diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/timer/clock.go b/vendor/github.com/DataDog/go-libddwaf/v4/timer/clock.go similarity index 100% rename from vendor/github.com/DataDog/go-libddwaf/v3/timer/clock.go rename to vendor/github.com/DataDog/go-libddwaf/v4/timer/clock.go diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/timer/component.go b/vendor/github.com/DataDog/go-libddwaf/v4/timer/component.go new file mode 100644 index 00000000..7ccb504e --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/timer/component.go @@ -0,0 +1,28 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022 Datadog, Inc. + +package timer + +import ( + "sync/atomic" +) + +// components store the data shared between child timers of the same component name +type components struct { + lookup map[Key]*atomic.Int64 + storage []atomic.Int64 +} + +func newComponents(names []Key) components { + lookup := make(map[Key]*atomic.Int64, len(names)) + storage := make([]atomic.Int64, len(names)) + for i, name := range names { + lookup[name] = &storage[i] + } + return components{ + lookup: lookup, + storage: storage, + } +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/timer/config.go b/vendor/github.com/DataDog/go-libddwaf/v4/timer/config.go new file mode 100644 index 00000000..dbaa7f0b --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/timer/config.go @@ -0,0 +1,86 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022 Datadog, Inc. + +package timer + +import ( + "math" + "time" +) + +const ( + // UnlimitedBudget is a special value for the budget that means the timer has no budget + UnlimitedBudget = time.Duration(math.MaxInt64) + + // DynamicBudget is a special value for the budget that means the timer should inherit the budget from its parent + // It is the default value if no options such as WithBudget, WithUnlimitedBudget or WithInheritedBudget are provided + DynamicBudget = ^time.Duration(0) +) + +// DynamicBudgetFunc is a function that is called on all children when a change to the parent happens +type DynamicBudgetFunc func(timer NodeTimer) time.Duration + +// config is the configuration of a timer. It can be created through the use of options +type config struct { + dynamicBudget DynamicBudgetFunc + // components store all the components of the timer + components []Key + // budget is the time budget for the timer + budget time.Duration +} + +func newConfig(options ...Option) config { + config := config{} + // Make sure the budget is inherited by default + WithInheritedSumBudget()(&config) + for _, option := range options { + option(&config) + } + return config +} + +// Option are the configuration options for any type of timer. Please read the documentation of said timer to see which options are available +type Option func(*config) + +// WithBudget is an Option that sets the budget value +func WithBudget(budget time.Duration) Option { + return func(c *config) { + c.budget = budget + } +} + +// WithUnlimitedBudget is an Option that sets the UnlimitedBudget flag on config.budget +func WithUnlimitedBudget() Option { + return func(c *config) { + c.budget = UnlimitedBudget + } +} + +// WithInheritedBudget is an Option that sets the DynamicBudget flag on config.budget +func WithInheritedBudget() Option { + return func(c *config) { + c.budget = DynamicBudget + c.dynamicBudget = func(timer NodeTimer) time.Duration { + return timer.Remaining() + } + } +} + +// WithInheritedSumBudget is an Option that sets the DynamicBudget flag on config.budget and sets the DynamicBudgetFunc to sum the remaining time of all children +func WithInheritedSumBudget() Option { + return func(c *config) { + c.budget = DynamicBudget + c.dynamicBudget = func(timer NodeTimer) time.Duration { + return timer.SumRemaining() + } + } +} + +// WithComponents is an Option that adds multiple components to the components list +func WithComponents(components ...Key) Option { + return func(c *config) { + c.components = append(c.components, components...) + } +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/timer/node_timer.go b/vendor/github.com/DataDog/go-libddwaf/v4/timer/node_timer.go similarity index 79% rename from vendor/github.com/DataDog/go-libddwaf/v3/timer/node_timer.go rename to vendor/github.com/DataDog/go-libddwaf/v4/timer/node_timer.go index 1115f59e..5d20a062 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/timer/node_timer.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/timer/node_timer.go @@ -26,10 +26,6 @@ func NewTreeTimer(options ...Option) (NodeTimer, error) { return nil, errors.New("root timer cannot inherit parent budget, please provide a budget using timer.WithBudget() or timer.WithUnlimitedBudget()") } - if len(config.components) == 0 { - return nil, errors.New("NewTreeTimer: tree timer must have at least one component, otherwise use NewTimer()") - } - return &nodeTimer{ baseTimer: baseTimer{ config: config, @@ -39,7 +35,7 @@ func NewTreeTimer(options ...Option) (NodeTimer, error) { }, nil } -func (timer *nodeTimer) NewNode(name string, options ...Option) (NodeTimer, error) { +func (timer *nodeTimer) NewNode(name Key, options ...Option) (NodeTimer, error) { config := newConfig(options...) if len(config.components) == 0 { return nil, errors.New("NewNode: node timer must have at least one component, otherwise use NewLeaf()") @@ -61,7 +57,7 @@ func (timer *nodeTimer) NewNode(name string, options ...Option) (NodeTimer, erro }, nil } -func (timer *nodeTimer) NewLeaf(name string, options ...Option) (Timer, error) { +func (timer *nodeTimer) NewLeaf(name Key, options ...Option) (Timer, error) { config := newConfig(options...) if len(config.components) != 0 { return nil, errors.New("NewLeaf: leaf timer cannot have components, otherwise use NewNode()") @@ -80,7 +76,7 @@ func (timer *nodeTimer) NewLeaf(name string, options ...Option) (Timer, error) { }, nil } -func (timer *nodeTimer) MustLeaf(name string, options ...Option) Timer { +func (timer *nodeTimer) MustLeaf(name Key, options ...Option) Timer { leaf, err := timer.NewLeaf(name, options...) if err != nil { panic(err) @@ -90,16 +86,11 @@ func (timer *nodeTimer) MustLeaf(name string, options ...Option) Timer { func (timer *nodeTimer) childStarted() {} -func (timer *nodeTimer) childStopped(componentName string, duration time.Duration) { +func (timer *nodeTimer) childStopped(componentName Key, duration time.Duration) { timer.components.lookup[componentName].Add(int64(duration)) - if timer.parent == nil { - return - } - - timer.parent.childStopped(timer.componentName, duration) } -func (timer *nodeTimer) AddTime(name string, duration time.Duration) { +func (timer *nodeTimer) AddTime(name Key, duration time.Duration) { value, ok := timer.components.lookup[name] if !ok { return @@ -108,8 +99,8 @@ func (timer *nodeTimer) AddTime(name string, duration time.Duration) { value.Add(int64(duration)) } -func (timer *nodeTimer) Stats() map[string]time.Duration { - stats := make(map[string]time.Duration, len(timer.components.lookup)) +func (timer *nodeTimer) Stats() map[Key]time.Duration { + stats := make(map[Key]time.Duration, len(timer.components.lookup)) for name, component := range timer.components.lookup { stats[name] = time.Duration(component.Load()) } diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/timer/timer.go b/vendor/github.com/DataDog/go-libddwaf/v4/timer/timer.go new file mode 100644 index 00000000..2a67ed1c --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/timer/timer.go @@ -0,0 +1,118 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022 Datadog, Inc. + +package timer + +import ( + "time" +) + +// Key is used to key track of each component of a tree timer. It can be used create constants that can be used to identify components in the tree. +type Key string + +// Timer is the default interface for all timers. NewTimer will provide you with a Timer. +// Keep in mind that they are NOT thread-safe and once Stop() is called, the Timer cannot be restarted. +type Timer interface { + // Start starts the timer and returns the start time. + // If the timer was already started, it returns the previous start time. + // If the timer was started without specifying a budget, it will inherit the budget from its parent when calling Start(). + // if the timer has no parent and no budget was specified, the call creating the timer (either NewTreeTimer or NewTimer) will return an error asking to specify a budget (which can be unlimited). + // Start is NOT thread-safe + Start() time.Time + + // Stop ends the timer and returns the time spent on the timer as Spent() would. + // Stop will trigger the computation of sum timers if the timer is part of a tree. See NodeTimer for more information. + // Stop is NOT thread-safe + Stop() time.Duration + + // Spent returns the current time spent between Start() and Stop() or between Start() and now if the timer is still running. + // Spent is thread-safe + Spent() time.Duration + + // Remaining returns the time remaining before the timer reaches its budget. (budget - Spent()) + // It returns 0 if the timer is exhausted. Remaining may never return a value below zero. + // Remaining only makes sense if the timer has a budget. If the timer has no budget, it returns the special value UnlimitedBudget. + // Remaining is thread-safe + Remaining() time.Duration + + // Exhausted returns true if the timer spent in the timer is greater than the budget. (Spent() > budget) + // Exhausted may return true only in case the time has a budget. If the timer has n, it returns false. + // Exhausted is thread-safe + Exhausted() bool + + // Timed is a convenience function that starts the timer, calls the provided function and stops the timer. + // Timed is panic-safe and will stop the timer even if the function panics. + // Timed is NOT thread-safe + Timed(timedFunc func(timer Timer)) time.Duration +} + +// SumTimer is a sub-interface for timers capable of having children and making the sum of their time spent. +// NewTreeTimer will provide you with a timer supporting this interface +type SumTimer interface { + // SumSpent returns the sum of the time spent in each component of the timer. + // SumSpent is thread-safe + SumSpent() time.Duration + + // SumRemaining returns the sum of the time remaining in each component of the timer. + // SumRemaining returns UnlimitedBudget if the timer has no budget. (UnlimitedBudget) + // SumRemaining is thread-safe + SumRemaining() time.Duration + + // SumExhausted returns true if the sum of the time spent in each component of the timer is greater than the budget. + // SumExhausted returns false if the timer has no budget. (UnlimitedBudget) + // SumExhausted is thread-safe + SumExhausted() bool +} + +// NodeTimer is the interface for tree timers. NewTreeTimer will provide you with a NodeTimer. +// NodeTimer can have children (NodeTimer or Timer) and will compute the sum of their spent time each time a children timer calls its Stop() method. +// To add children to a NodeTimer, you have to specify component names when creating the timer with the WithComponent and WithComponents options. +// The component names must be unique and cannot be empty. The component names are used to identify the children timers. +// The returned timer can now create children timers using the NewNode and NewLeaf functions using the names provided when creating the parent timer. +// Multiple timers from the same component can be used in parallel and will be summed together. +// In parallel to that, NodeTimer can have their own wall time timer and budget that will apply to the sum of their children and their own timer. +// The following functions are the same as the Timer interface but works using the sum of the children timers: +// - SumSpent() -> Spent() +// - SumRemaining() -> Remaining() +// - SumExhausted() -> Exhausted() +// Keep in mind that the timer itself (only Start and Stop) is NOT thread-safe and once Stop() is called, the NodeTimer cannot be restarted. +type NodeTimer interface { + Timer + SumTimer + + // NewNode creates a new NodeTimer with the given name and options. The given name must match one of the component name of the parent timer. + // A node timer is required to have at least one component. If no component is provided, it will return an error asking you to use NewLeaf instead. + // If no budget is provided, it will inherit the budget from its parent when calling Start(). + // NewNode is thread-safe + NewNode(name Key, options ...Option) (NodeTimer, error) + + // NewLeaf creates a new Timer with the given name and options. The given name must match one of the component name of the parent timer. + // A leaf timer is forbidden to have components. If a component is provided, it will return an error asking you to use NewNode instead. + // If no budget is provided, it will inherit the budget from its parent when calling Start(). + // NewLeaf is thread-safe + NewLeaf(name Key, options ...Option) (Timer, error) + + // MustLeaf creates a new Timer with the given name and options. The given name must match one of the component name of the parent timer. + // MustLeaf wraps a call to NewLeaf but will panic if the error is not nil. + // MustLeaf is thread-safe + MustLeaf(name Key, options ...Option) Timer + + // AddTime adds the given duration to the component of the timer with the given name. + // AddTime is thread-safe + AddTime(name Key, duration time.Duration) + + // Stats returns a map of the time spent in each component of the timer. + // Stats is thread-safe + Stats() map[Key]time.Duration + + // childStarted is used to propagate the start of a child timer to the parent timer through the whole tree. + childStarted() + + // childStopped is used to propagate the time spent in a child timer to the parent timer through the whole tree. + childStopped(componentName Key, duration time.Duration) + + // now is a convenience wrapper to swap the time.Now() function for testing and performance purposes. + now() time.Time +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/waf.go b/vendor/github.com/DataDog/go-libddwaf/v4/waf.go new file mode 100644 index 00000000..fe005128 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/waf.go @@ -0,0 +1,80 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package libddwaf + +import ( + "errors" + "sync" + + "github.com/DataDog/go-libddwaf/v4/internal/bindings" + "github.com/DataDog/go-libddwaf/v4/internal/support" +) + +// Globally dlopen() libddwaf only once because several dlopens (eg. in tests) +// aren't supported by macOS. +var ( + // libddwaf's dynamic library handle and entrypoints + wafLib *bindings.WAFLib + // libddwaf's dlopen error if any + wafLoadErr error + openWafOnce sync.Once +) + +// Load loads libddwaf's dynamic library. The dynamic library is opened only +// once by the first call to this function and internally stored globally. +// No function is currently provided in this API to unload it. +// +// This function is automatically called by [NewBuilder], and most users need +// not explicitly call it. It is however useful in order to explicitly check +// for the status of the WAF library's initialization. +// +// The function returns true when libddwaf was successfully loaded, along with +// an error value. An error might still be returned even though the WAF load was +// successful: in such cases the error is indicative that some non-critical +// features are not available; but the WAF may still be used. +func Load() (bool, error) { + if ok, err := Usable(); !ok { + return false, err + } + + openWafOnce.Do(func() { + wafLib, wafLoadErr = bindings.NewWAFLib() + if wafLoadErr != nil { + return + } + wafVersion = wafLib.GetVersion() + }) + + return wafLib != nil, wafLoadErr +} + +var wafVersion string + +// Version returns the version returned by libddwaf. +// It relies on the dynamic loading of the library, which can fail and return +// an empty string or the previously loaded version, if any. +func Version() string { + _, _ = Load() + return wafVersion +} + +// Usable returns true if the WAF is usable, false and an error otherwise. +// +// If the WAF is usable, an error value may still be returned and should be +// treated as a warning (it is non-blocking). +// +// The following conditions are checked: +// - The WAF library has been loaded successfully (you need to call [Load] first for this case to be +// taken into account) +// - The WAF library has not been manually disabled with the `datadog.no_waf` go build tag +// - The WAF library is not in an unsupported OS/Arch +// - The WAF library is not in an unsupported Go version +func Usable() (bool, error) { + wafSupportErrors := errors.Join(support.WafSupportErrors()...) + wafManuallyDisabledErr := support.WafManuallyDisabledError() + + return (wafLib != nil || wafLoadErr == nil) && wafSupportErrors == nil && wafManuallyDisabledErr == nil, errors.Join(wafLoadErr, wafSupportErrors, wafManuallyDisabledErr) +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/errors/support.go b/vendor/github.com/DataDog/go-libddwaf/v4/waferrors/support.go similarity index 86% rename from vendor/github.com/DataDog/go-libddwaf/v3/errors/support.go rename to vendor/github.com/DataDog/go-libddwaf/v4/waferrors/support.go index 79fbe1da..37cd2fc6 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/errors/support.go +++ b/vendor/github.com/DataDog/go-libddwaf/v4/waferrors/support.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package errors +package waferrors import ( "fmt" @@ -13,12 +13,12 @@ import ( // UnsupportedOSArchError is a wrapper error type helping to handle the error // case of trying to execute this package when the OS or architecture is not supported. type UnsupportedOSArchError struct { - Os string + OS string Arch string } func (e UnsupportedOSArchError) Error() string { - return fmt.Sprintf("unsupported OS/Arch: %s/%s", e.Os, e.Arch) + return fmt.Sprintf("unsupported OS/Arch: %s/%s", e.OS, e.Arch) } // UnsupportedGoVersionError is a wrapper error type helping to handle the error @@ -26,7 +26,10 @@ func (e UnsupportedOSArchError) Error() string { type UnsupportedGoVersionError struct{} func (e UnsupportedGoVersionError) Error() string { - return fmt.Sprintf("unsupported Go version: %s", runtime.Version()) + return fmt.Sprintf( + "unsupported Go version: %s (try running `go get github.com/DataDog/go-libddwaf@latest`)", + runtime.Version(), + ) } type CgoDisabledError struct{} diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/waferrors/waf.go b/vendor/github.com/DataDog/go-libddwaf/v4/waferrors/waf.go new file mode 100644 index 00000000..34c46c88 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v4/waferrors/waf.go @@ -0,0 +1,108 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package waferrors + +import ( + "errors" + "fmt" +) + +var ( + // ErrContextClosed is returned when an operation is attempted on a + // [github.com/DataDog/go-libddwaf/v4.Context] that has already been closed. + ErrContextClosed = errors.New("closed WAF context") + + // ErrMaxDepthExceeded is returned when the WAF encounters a value that + // exceeds the maximum depth. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + // ErrUnsupportedValue is returned when the WAF encounters a value that + // is not supported by the encoder or decoder. + ErrUnsupportedValue = errors.New("unsupported Go value") + // ErrInvalidMapKey is returned when the WAF encounters an invalid map key. + ErrInvalidMapKey = errors.New("invalid WAF object map key") + // ErrNilObjectPtr is returned when the WAF encounters a nil object pointer at + // an unexpected location. + ErrNilObjectPtr = errors.New("nil WAF object pointer") + // ErrInvalidObjectType is returned when the WAF encounters an invalid type + // when decoding a value. + ErrInvalidObjectType = errors.New("invalid type encountered when decoding") + // ErrTooManyIndirections is returned when the WAF encounters a value that + // exceeds the maximum number of indirections (pointer to pointer to...). + ErrTooManyIndirections = errors.New("too many indirections") +) + +// RunError the WAF can return when running it. +type RunError int + +// Errors the WAF can return when running it. +const ( + // ErrInternal denotes a WAF internal error. + ErrInternal RunError = iota + 1 + // ErrInvalidObject is returned when the WAF received an invalid object. + ErrInvalidObject + // ErrInvalidArgument is returned when the WAF received an invalid argument. + ErrInvalidArgument + // ErrTimeout is returned when the WAF ran out of time budget to spend. + ErrTimeout + // ErrOutOfMemory is returned when the WAF ran out of memory when trying to + // allocate a result object. + ErrOutOfMemory + // ErrEmptyRuleAddresses is returned when the WAF received an empty list of + // rule addresses. + ErrEmptyRuleAddresses +) + +var errorStrMap = map[RunError]string{ + ErrInternal: "internal waf error", + ErrInvalidObject: "invalid waf object", + ErrInvalidArgument: "invalid waf argument", + ErrTimeout: "waf timeout", + ErrOutOfMemory: "out of memory", + ErrEmptyRuleAddresses: "empty rule addresses", +} + +// Error returns the string representation of the [RunError]. +func (e RunError) Error() string { + description, ok := errorStrMap[e] + if !ok { + return fmt.Sprintf("unknown waf error %d", e) + } + + return description +} + +// ToWafErrorCode converts an error to a WAF error code, returns zero if the +// error is not a [RunError]. +func ToWafErrorCode(in error) int { + var runError RunError + if !errors.As(in, &runError) { + return 0 + } + return int(runError) +} + +// PanicError is an error type wrapping a recovered panic value that happened +// during a function call. Such error must be considered unrecoverable and be +// used to try to gracefully abort. Keeping using this package after such an +// error is unreliable and the caller must rather stop using the library. +// Examples include safety checks errors. +type PanicError struct { + // The recovered panic error while executing the function `in`. + Err error + // The function symbol name that was given to `tryCall()`. + In string +} + +// Unwrap the error and return it. +// Required by errors.Is and errors.As functions. +func (e *PanicError) Unwrap() error { + return e.Err +} + +// Error returns the error string representation. +func (e *PanicError) Error() string { + return fmt.Sprintf("panic while executing %s: %#+v", e.In, e.Err) +} diff --git a/vendor/github.com/DataDog/go-runtime-metrics-internal/LICENSE b/vendor/github.com/DataDog/go-runtime-metrics-internal/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/DataDog/go-runtime-metrics-internal/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/go-runtime-metrics-internal/LICENSE-3rdparty.csv b/vendor/github.com/DataDog/go-runtime-metrics-internal/LICENSE-3rdparty.csv new file mode 100644 index 00000000..9e444fcd --- /dev/null +++ b/vendor/github.com/DataDog/go-runtime-metrics-internal/LICENSE-3rdparty.csv @@ -0,0 +1,7 @@ +Component,Origin,License,Copyright +N/A,github.com/davecgh/go-spew,ISC,Copyright (c) 2012-2016 Dave Collins +N/A,github.com/pmezard/go-difflib,BSD 3-Clause,"Copyright (c) 2013, Patrick Mezard" +N/A,github.com/stretchr/testify,MIT,"Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors." +N/A,gopkg.in/check.v1,BSD-2-Clause,Copyright (c) 2010-2013 Gustavo Niemeyer +N/A,gopkg.in/yaml.v3,MIT,Copyright (c) 2006-2010 Kirill Simonov + Copyright (c) 2006-2011 Kirill Simonov +N/A,gopkg.in/yaml.v3,Apache-2.0,Copyright (c) 2011-2019 Canonical Ltd \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-runtime-metrics-internal/NOTICE b/vendor/github.com/DataDog/go-runtime-metrics-internal/NOTICE new file mode 100644 index 00000000..702d2b3e --- /dev/null +++ b/vendor/github.com/DataDog/go-runtime-metrics-internal/NOTICE @@ -0,0 +1,4 @@ +Datadog go-runtime-metrics-internal +Copyright 2024-2024 Datadog, Inc. + +This product includes software developed at Datadog (). \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics/histogram.go b/vendor/github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics/histogram.go new file mode 100644 index 00000000..9ae911ee --- /dev/null +++ b/vendor/github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics/histogram.go @@ -0,0 +1,195 @@ +package runtimemetrics + +import ( + "math" + "runtime/metrics" + "slices" + "sort" +) + +// As of 2023/04, the statsd client does not support sending fully formed +// histograms to the datadog-agent. +// +// These helpers extract the histograms exported by the runtime/metrics +// package into multiple values representing: avg, min, max, median, p95 +// and p99 values of these histograms, so we can submit them as gauges to +// the agent. + +type histogramStats struct { + Avg float64 + Min float64 // aka P0 + Median float64 // aka P50 + P95 float64 + P99 float64 + Max float64 // aka P100 +} + +type distributionSample struct { + Value float64 + Rate float64 +} + +func distributionSamplesFromHist(h *metrics.Float64Histogram, samples []distributionSample) []distributionSample { + for i, count := range h.Counts { + start, end := h.Buckets[i], h.Buckets[i+1] + // Handle edge cases where start or end of buckets could be infinity + if i == 0 && math.IsInf(h.Buckets[0], -1) { + start = end + } + if i == len(h.Counts)-1 && math.IsInf(h.Buckets[len(h.Buckets)-1], 1) { + end = start + } + if start == end && math.IsInf(start, 0) { + // All buckets are empty, return early + return samples + } + + if count == 0 { + // Don't submit empty buckets + continue + } + + sample := distributionSample{ + Value: (start + end) / 2, + Rate: 1 / float64(count), + } + samples = append(samples, sample) + } + return samples +} + +func statsFromHist(h *metrics.Float64Histogram) *histogramStats { + p := percentiles(h, []float64{0, 0.5, 0.95, 0.99, 1}) + return &histogramStats{ + Avg: avg(h), + Min: p[0], + Median: p[1], + P95: p[2], + P99: p[3], + Max: p[4], + } +} + +// Return the difference between both histograms, and whether +// the two histograms are equal +// We assume a and b always have the same lengths for `Counts` and +// `Buckets` slices which is guaranteed by the runtime/metrics +// package: https://go.dev/src/runtime/metrics/histogram.go +func sub(a, b *metrics.Float64Histogram) (*metrics.Float64Histogram, bool) { + equal := true + res := &metrics.Float64Histogram{ + Counts: make([]uint64, len(a.Counts)), + Buckets: make([]float64, len(a.Buckets)), + } + copy(res.Buckets, a.Buckets) + for i := range res.Counts { + count := a.Counts[i] - b.Counts[i] + res.Counts[i] = count + if equal && count != 0 { + equal = false + } + } + return res, equal +} + +func avg(h *metrics.Float64Histogram) float64 { + var total float64 + var cumulative float64 + for i, count := range h.Counts { + start, end := h.Buckets[i], h.Buckets[i+1] + // Handle edge cases where start or end of buckets could be infinity + if i == 0 && math.IsInf(h.Buckets[0], -1) { + start = end + } + if i == len(h.Counts)-1 && math.IsInf(h.Buckets[len(h.Buckets)-1], 1) { + end = start + } + if start == end && math.IsInf(start, 0) { + return 0 + } + cumulative += float64(count) * (float64(start+end) / 2) + total += float64(count) + } + if total == 0 { + return 0 + } + return cumulative / total +} + +// This function takes a runtime/metrics histogram, and a slice of all +// percentiles to compute for that histogram. It computes all percentiles +// in a single pass and returns the results which is more efficient than +// computing each percentile separately. +func percentiles(h *metrics.Float64Histogram, pInput []float64) []float64 { + p := make([]float64, len(pInput)) + copy(p, pInput) + sort.Float64s(p) + + if p[0] < 0.0 || p[len(p)-1] > 1.0 { + panic("percentiles is invoked with a <0 or >1 percentile") + } + + results := make([]float64, len(p)) + + var total float64 // total count across all buckets + for i := range h.Counts { + total += float64(h.Counts[i]) + } + + var cumulative float64 // cumulative count of all buckets we've iterated through + var start, end float64 // start and end of current bucket + i := 0 // index of current bucket + j := 0 // index of the percentile we're currently calculating + + for j < len(p) && i < len(h.Counts) { + start, end = h.Buckets[i], h.Buckets[i+1] + // Avoid interpolating with Inf if our percentile lies in an edge bucket + if i == 0 && math.IsInf(h.Buckets[0], -1) { + start = end + } + if i == len(h.Counts)-1 && math.IsInf(h.Buckets[len(h.Buckets)-1], 1) { + end = start + } + + if start == end && math.IsInf(start, 0) { + return results + } + + // adds the counts of this bucket, to check whether the percentile is in this bucket + bucketCount := float64(h.Counts[i]) + cumulative += bucketCount + + // Skip empty buckets at the beginning of the histogram and as long as we still have + // percentiles to compute, check whether the target percentile falls in this bucket + for (cumulative > 0) && j < len(p) && (cumulative >= total*p[j]) { + // The target percentile is somewhere in the current bucket: [start, end] + // and corresponds to a count in: [cumulative-bucketCount, cumulative] + // We use linear interpolation to estimate the value of the percentile + // within the bucket. + // + // bucketCount + // <---------------------------------> + // percentileCount + // <-------------------> + // |....................@.............| + // ^ ^ ^ + // counts: cumulative-bucketCount | total*p[j] | cumulative + // | | + // buckets: start | percentile | end + // + percentileCount := total*p[j] - (cumulative - bucketCount) + results[j] = start + (end-start)*(percentileCount/bucketCount) // percentile + // we can have multiple percentiles fall in the same bucket, so we check if the + // next percentile falls in this bucket + j++ + } + i++ + } + + orderedResults := make([]float64, len(p)) + for i := range orderedResults { + orderedResults[i] = results[slices.Index(p, pInput[i])] + } + + return orderedResults +} diff --git a/vendor/github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics/runtime_metrics.go b/vendor/github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics/runtime_metrics.go new file mode 100644 index 00000000..8bcee704 --- /dev/null +++ b/vendor/github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics/runtime_metrics.go @@ -0,0 +1,461 @@ +// Package runtimemetrics exports all runtime/metrics via statsd on a regular interval. +package runtimemetrics + +import ( + "cmp" + "errors" + "fmt" + "log/slog" + "math" + "regexp" + "runtime/metrics" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Options are the options for the runtime metrics emitter. +type Options struct { + // Logger is used to log errors. Defaults to slog.Default() if nil. + Logger *slog.Logger + // Tags are added to all metrics. + Tags []string + // Period is the period at which we poll runtime/metrics and report + // them to statsd. Defaults to 10s. + // + // The statsd client aggregates this data, usually over a 2s window [1], and + // so does the agent, usually over a 10s window [2]. + // + // We submit one data point per aggregation window, using the + // CountWithTimestamp / GaugeWithTimestamp APIs for submitting precisely + // aligned metrics, to enable comparing them with one another. + // + // [1] https://github.com/DataDog/datadog-go/blob/e612112c8bb396b33ad5d9edd645d289b07d0e40/statsd/options.go/#L23 + // [2] https://docs.datadoghq.com/developers/dogstatsd/data_aggregation/#how-is-aggregation-performed-with-the-dogstatsd-server + Period time.Duration + // AllowMultipleInstances is used to allow multiple instances of the runtime + // metrics emitter to be started. This is useful in cases where the + // application is using multiple runtimemetrics.Emitter instances to report + // metrics to different statsd clients. + AllowMultipleInstances bool +} + +// instances is used prevent multiple instances of the runtime metrics emitter +// from being started concurrently by accident. +var instances atomic.Int64 + +// NewEmitter creates a new runtime metrics emitter and starts it. Unless +// AllowMultipleInstances is set to true, it will return an error if an emitter +// has already been started and not stopped yet. This is to prevent +// accidental misconfigurations in larger systems. +func NewEmitter(statsd partialStatsdClientInterface, opts *Options) (*Emitter, error) { + if opts == nil { + opts = &Options{} + } + if n := instances.Add(1); n > 1 && !opts.AllowMultipleInstances { + instances.Add(-1) + return nil, errors.New("runtimemetrics has already been started") + } + e := &Emitter{ + statsd: statsd, + logger: cmp.Or(opts.Logger, slog.Default()), + tags: opts.Tags, + stop: make(chan struct{}), + stopped: make(chan struct{}), + period: cmp.Or(opts.Period, 10*time.Second), + } + go e.emit() + return e, nil +} + +// Emitter submits runtime/metrics to statsd on a regular interval. +type Emitter struct { + statsd partialStatsdClientInterface + logger *slog.Logger + tags []string + period time.Duration + + stop chan struct{} + stopped chan struct{} +} + +// emit emits runtime/metrics to statsd on a regular interval. +func (e *Emitter) emit() { + descs := supportedMetrics() + tags := append(getBaseTags(), e.tags...) + rms := newRuntimeMetricStore(descs, e.statsd, e.logger, tags) + // TODO: Go services experiencing high scheduling latency might see a + // large variance for the period in between rms.report calls. This might + // cause spikes in cumulative metric reporting. Should we try to correct + // for this by measuring the actual reporting time delta to adjust + // the numbers? + // + // Another challenge is that some metrics only update after GC mark + // termination, see [1][2]. This means that it's likely that the rate of + // submission for those metrics will be dependant on the service's workload + // and GC configuration. + // + // [1] https://github.com/golang/go/blob/go1.21.3/src/runtime/mstats.go#L939 + // [2] https://github.com/golang/go/issues/59749 + tick := time.Tick(e.period) + for { + select { + case <-e.stop: + close(e.stopped) + return + case <-tick: + rms.report() + } + } +} + +// Stop stops the emitter. It is idempotent. +func (e *Emitter) Stop() { + select { + case <-e.stop: + <-e.stopped + return + default: + close(e.stop) + <-e.stopped + instances.Add(-1) + } +} + +type runtimeMetric struct { + ddMetricName string + cumulative bool + + currentValue metrics.Value + previousValue metrics.Value +} + +// the map key is the name of the metric in runtime/metrics +type runtimeMetricStore struct { + metrics map[string]*runtimeMetric + statsd partialStatsdClientInterface + logger *slog.Logger + baseTags []string + unknownMetricLogOnce *sync.Once + unsupportedKindLogOnce *sync.Once +} + +// partialStatsdClientInterface is the subset of statsd.ClientInterface that is +// used by this package. +type partialStatsdClientInterface interface { + // Rate is used in the datadog-go statsd library to sample to values sent, + // we should always submit a rate >=1 to ensure our submissions are not sampled. + // The rate is forwarded to the agent but then discarded for gauge metrics. + GaugeWithTimestamp(name string, value float64, tags []string, rate float64, timestamp time.Time) error + CountWithTimestamp(name string, value int64, tags []string, rate float64, timestamp time.Time) error + DistributionSamples(name string, values []float64, tags []string, rate float64) error +} + +func newRuntimeMetricStore(descs []metrics.Description, statsdClient partialStatsdClientInterface, logger *slog.Logger, tags []string) runtimeMetricStore { + rms := runtimeMetricStore{ + metrics: map[string]*runtimeMetric{}, + statsd: statsdClient, + logger: logger, + baseTags: tags, + unknownMetricLogOnce: &sync.Once{}, + unsupportedKindLogOnce: &sync.Once{}, + } + + for _, d := range descs { + cumulative := d.Cumulative + + // /sched/latencies:seconds is incorrectly set as non-cumulative, + // fixed by https://go-review.googlesource.com/c/go/+/486755 + // TODO: Use a build tag to apply this logic to Go versions < 1.20. + if d.Name == "/sched/latencies:seconds" { + cumulative = true + } + + ddMetricName, err := datadogMetricName(d.Name) + if err != nil { + rms.logger.Warn("runtimemetrics: not reporting one of the runtime metrics", slog.Attr{Key: "error", Value: slog.StringValue(err.Error())}) + continue + } + + rms.metrics[d.Name] = &runtimeMetric{ + ddMetricName: ddMetricName, + cumulative: cumulative, + } + } + + rms.update() + + return rms +} + +func (rms runtimeMetricStore) update() { + // TODO: Reuse this slice to avoid allocations? Note: I don't see these + // allocs show up in profiling. + samples := make([]metrics.Sample, len(rms.metrics)) + i := 0 + // NOTE: Map iteration in Go is randomized, so we end up randomizing the + // samples slice. In theory this should not impact correctness, but it's + // worth keeping in mind in case problems are observed in the future. + for name := range rms.metrics { + samples[i].Name = name + i++ + } + metrics.Read(samples) + for _, s := range samples { + runtimeMetric := rms.metrics[s.Name] + + runtimeMetric.previousValue = runtimeMetric.currentValue + runtimeMetric.currentValue = s.Value + } +} + +func (rms runtimeMetricStore) report() { + ts := time.Now() + rms.update() + samples := []distributionSample{} + + rms.statsd.GaugeWithTimestamp(datadogMetricPrefix+"enabled", 1, rms.baseTags, 1, ts) + for name, rm := range rms.metrics { + switch rm.currentValue.Kind() { + case metrics.KindUint64: + v := rm.currentValue.Uint64() + // if the value didn't change between two reporting + // cycles, don't submit anything. this avoids having + // inaccurate drops to zero + // we submit 0 values to be able to distinguish between + // cases where the metric was never reported as opposed + // to the metric always being equal to zero + if rm.cumulative && v != 0 && v == rm.previousValue.Uint64() { + continue + } + + // Some of the Uint64 metrics are actually calculated as a difference by the Go runtime: v = uint64(x - y) + // + // Notably, this means that if x < y, then v will be roughly MaxUint64 (minus epsilon). + // This then shows up as '16 EiB' in Datadog graphs, because MaxUint64 bytes = 2^64 = 2^(4 + 10x6) = 2^4 x (2^10)^6 = 16 x 1024^6 = 16 EiB. + // + // This is known to happen with the '/memory/classes/heap/unused:bytes' metric: https://github.com/golang/go/blob/go1.22.1/src/runtime/metrics.go#L364 + // Until this bug is fixed, we log the problematic value and skip submitting that point to avoid spurious spikes in graphs. + if v > math.MaxUint64/2 { + tags := make([]string, 0, len(rms.baseTags)+1) + tags = append(tags, rms.baseTags...) + tags = append(tags, "metric_name:"+rm.ddMetricName) + rms.statsd.CountWithTimestamp(datadogMetricPrefix+"skipped_values", 1, tags, 1, ts) + + // Some metrics are ~sort of expected to report this high value (e.g. + // "runtime.go.metrics.gc_gogc.percent" will consistently report "MaxUint64 - 1" if + // GOGC is OFF). We only want to log the full heap stats for the not-so-expected + // case of "heap unused bytes". + if name == "/memory/classes/heap/unused:bytes" { + logAttrs := []any{ + slog.Attr{Key: "metric_name", Value: slog.StringValue(rm.ddMetricName)}, + slog.Attr{Key: "timestamp", Value: slog.TimeValue(ts)}, + slog.Attr{Key: "uint64(x-y)", Value: slog.Uint64Value(v)}, + slog.Attr{ + // If v is very close to MaxUint64, it will be hard to read "how negative was x-y", so we compute it here for convenience: + Key: "int64(x-y)", + Value: slog.Int64Value(-int64(math.MaxUint64 - v + 1)), // the '+1' is necessary because if int64(x-y)=-1, then uint64(x-y)=MaxUint64 + }, + } + + // Append all Uint64 values for maximum observability + for name, rm := range rms.metrics { + if rm.currentValue.Kind() == metrics.KindUint64 { + logAttrs = append(logAttrs, slog.Attr{Key: name, Value: slog.Uint64Value(rm.currentValue.Uint64())}) + } + } + + rms.logger.Warn("runtimemetrics: skipped submission of absurd value", logAttrs...) + } + continue + } + + rms.statsd.GaugeWithTimestamp(rm.ddMetricName, float64(v), rms.baseTags, 1, ts) + case metrics.KindFloat64: + v := rm.currentValue.Float64() + // if the value didn't change between two reporting + // cycles, don't submit anything. this avoids having + // inaccurate drops to zero + // we submit 0 values to be able to distinguish between + // cases where the metric was never reported as opposed + // to the metric always being equal to zero + if rm.cumulative && v != 0 && v == rm.previousValue.Float64() { + continue + } + rms.statsd.GaugeWithTimestamp(rm.ddMetricName, v, rms.baseTags, 1, ts) + case metrics.KindFloat64Histogram: + v := rm.currentValue.Float64Histogram() + var equal bool + if rm.cumulative { + // Note: This branch should ALWAYS be taken as of go1.21. + v, equal = sub(v, rm.previousValue.Float64Histogram()) + // if the histogram didn't change between two reporting + // cycles, don't submit anything. this avoids having + // inaccurate drops to zero for percentile metrics + if equal { + continue + } + } + + samples = samples[:0] + distSamples := distributionSamplesFromHist(v, samples) + values := make([]float64, len(distSamples)) + for i, ds := range distSamples { + values[i] = ds.Value + rms.statsd.DistributionSamples(rm.ddMetricName, values[i:i+1], rms.baseTags, ds.Rate) + } + + stats := statsFromHist(v) + // TODO: Could/should we use datadog distribution metrics for this? + rms.statsd.GaugeWithTimestamp(rm.ddMetricName+".avg", stats.Avg, rms.baseTags, 1, ts) + rms.statsd.GaugeWithTimestamp(rm.ddMetricName+".min", stats.Min, rms.baseTags, 1, ts) + rms.statsd.GaugeWithTimestamp(rm.ddMetricName+".max", stats.Max, rms.baseTags, 1, ts) + rms.statsd.GaugeWithTimestamp(rm.ddMetricName+".median", stats.Median, rms.baseTags, 1, ts) + rms.statsd.GaugeWithTimestamp(rm.ddMetricName+".p95", stats.P95, rms.baseTags, 1, ts) + rms.statsd.GaugeWithTimestamp(rm.ddMetricName+".p99", stats.P99, rms.baseTags, 1, ts) + case metrics.KindBad: + // This should never happen because all metrics are supported + // by construction. + rms.unknownMetricLogOnce.Do(func() { + rms.logger.Error("runtimemetrics: encountered an unknown metric, this should never happen and might indicate a bug", slog.Attr{Key: "metric_name", Value: slog.StringValue(name)}) + }) + default: + // This may happen as new metric kinds get added. + // + // The safest thing to do here is to simply log it somewhere once + // as something to look into, but ignore it for now. + rms.unsupportedKindLogOnce.Do(func() { + rms.logger.Error("runtimemetrics: unsupported metric kind, support for that kind should be added in pkg/runtimemetrics", + slog.Attr{Key: "metric_name", Value: slog.StringValue(name)}, + slog.Attr{Key: "kind", Value: slog.AnyValue(rm.currentValue.Kind())}, + ) + }) + } + } +} + +// regex extracted from https://cs.opensource.google/go/go/+/refs/tags/go1.20.3:src/runtime/metrics/description.go;l=13 +var runtimeMetricRegex = regexp.MustCompile("^(?P/[^:]+):(?P[^:*/]+(?:[*/][^:*/]+)*)$") + +// see https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics +var datadogMetricRegex = regexp.MustCompile(`[^a-zA-Z0-9\._]`) + +const datadogMetricPrefix = "runtime.go.metrics." + +func datadogMetricName(runtimeName string) (string, error) { + m := runtimeMetricRegex.FindStringSubmatch(runtimeName) + + if len(m) != 3 { + return "", fmt.Errorf("failed to parse metric name for metric %s", runtimeName) + } + + // strip leading "/" + metricPath := strings.TrimPrefix(m[1], "/") + metricUnit := m[2] + + name := datadogMetricRegex.ReplaceAllString(metricPath+"."+metricUnit, "_") + + // Note: This prefix is special. Don't change it without consulting the + // runtime/metrics squad. + return datadogMetricPrefix + name, nil +} + +var startTags struct { + sync.Mutex + tags []string +} + +// Start starts reporting runtime/metrics to the given statsd client. +// +// Deprecated: Use NewEmitter instead. +func Start(statsd partialStatsdClientInterface, logger *slog.Logger) error { + startTags.Lock() + defer startTags.Unlock() + _, err := NewEmitter(statsd, &Options{Logger: logger, Tags: startTags.tags}) + return err +} + +// SetBaseTags sets the base tags that will be added to all metrics when using +// the Start function. +// +// Deprecated: Use NewEmitter with Options.Tags instead. +func SetBaseTags(tags []string) { + startTags.Lock() + defer startTags.Unlock() + startTags.tags = tags +} + +// supportedMetrics returns the list of metrics that are supported. +func supportedMetrics() []metrics.Description { + descs := metrics.All() + supported := make([]metrics.Description, 0, len(supportedMetricsTable)) + for _, d := range descs { + if _, ok := supportedMetricsTable[d.Name]; ok { + supported = append(supported, d) + } + } + return supported +} + +// supportedMetricsTable contains all metrics as of go1.24, except godebug +// metrics to limit cardinality. New metrics are added manually b/c they need +// to be registered in the backend first. +var supportedMetricsTable = map[string]struct{}{ + "/cgo/go-to-c-calls:calls": {}, + "/cpu/classes/gc/mark/assist:cpu-seconds": {}, + "/cpu/classes/gc/mark/dedicated:cpu-seconds": {}, + "/cpu/classes/gc/mark/idle:cpu-seconds": {}, + "/cpu/classes/gc/pause:cpu-seconds": {}, + "/cpu/classes/gc/total:cpu-seconds": {}, + "/cpu/classes/idle:cpu-seconds": {}, + "/cpu/classes/scavenge/assist:cpu-seconds": {}, + "/cpu/classes/scavenge/background:cpu-seconds": {}, + "/cpu/classes/scavenge/total:cpu-seconds": {}, + "/cpu/classes/total:cpu-seconds": {}, + "/cpu/classes/user:cpu-seconds": {}, + "/gc/cycles/automatic:gc-cycles": {}, + "/gc/cycles/forced:gc-cycles": {}, + "/gc/cycles/total:gc-cycles": {}, + "/gc/gogc:percent": {}, + "/gc/gomemlimit:bytes": {}, + "/gc/heap/allocs-by-size:bytes": {}, + "/gc/heap/allocs:bytes": {}, + "/gc/heap/allocs:objects": {}, + "/gc/heap/frees-by-size:bytes": {}, + "/gc/heap/frees:bytes": {}, + "/gc/heap/frees:objects": {}, + "/gc/heap/goal:bytes": {}, + "/gc/heap/live:bytes": {}, + "/gc/heap/objects:objects": {}, + "/gc/heap/tiny/allocs:objects": {}, + "/gc/limiter/last-enabled:gc-cycle": {}, + "/gc/pauses:seconds": {}, + "/gc/scan/globals:bytes": {}, + "/gc/scan/heap:bytes": {}, + "/gc/scan/stack:bytes": {}, + "/gc/scan/total:bytes": {}, + "/gc/stack/starting-size:bytes": {}, + "/memory/classes/heap/free:bytes": {}, + "/memory/classes/heap/objects:bytes": {}, + "/memory/classes/heap/released:bytes": {}, + "/memory/classes/heap/stacks:bytes": {}, + "/memory/classes/heap/unused:bytes": {}, + "/memory/classes/metadata/mcache/free:bytes": {}, + "/memory/classes/metadata/mcache/inuse:bytes": {}, + "/memory/classes/metadata/mspan/free:bytes": {}, + "/memory/classes/metadata/mspan/inuse:bytes": {}, + "/memory/classes/metadata/other:bytes": {}, + "/memory/classes/os-stacks:bytes": {}, + "/memory/classes/other:bytes": {}, + "/memory/classes/profiling/buckets:bytes": {}, + "/memory/classes/total:bytes": {}, + "/sched/gomaxprocs:threads": {}, + "/sched/goroutines:goroutines": {}, + "/sched/latencies:seconds": {}, + "/sched/pauses/stopping/gc:seconds": {}, + "/sched/pauses/stopping/other:seconds": {}, + "/sched/pauses/total/gc:seconds": {}, + "/sched/pauses/total/other:seconds": {}, + "/sync/mutex/wait/total:seconds": {}, +} diff --git a/vendor/github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics/tags.go b/vendor/github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics/tags.go new file mode 100644 index 00000000..9e340943 --- /dev/null +++ b/vendor/github.com/DataDog/go-runtime-metrics-internal/pkg/runtimemetrics/tags.go @@ -0,0 +1,71 @@ +package runtimemetrics + +import ( + "fmt" + "math" + "runtime" + "runtime/metrics" +) + +func getBaseTags() []string { + const gogcMetricName = "/gc/gogc:percent" + const gomemlimitMetricName = "/gc/gomemlimit:bytes" + const gomaxProcsMetricName = "/sched/gomaxprocs:threads" + + samples := []metrics.Sample{ + {Name: gogcMetricName}, + {Name: gomemlimitMetricName}, + {Name: gomaxProcsMetricName}, + } + + baseTags := make([]string, 0, len(samples)+1) + baseTags = append(baseTags, "goversion:"+runtime.Version()) + + metrics.Read(samples) + + for _, s := range samples { + switch s.Name { + case gogcMetricName: + gogc := s.Value.Uint64() + var goGCTagValue string + if gogc == math.MaxUint64 { + goGCTagValue = "off" + } else { + goGCTagValue = fmt.Sprintf("%d", gogc) + } + baseTags = append(baseTags, fmt.Sprintf("gogc:%s", goGCTagValue)) + case gomemlimitMetricName: + gomemlimit := s.Value.Uint64() + var goMemLimitTagValue string + if gomemlimit == math.MaxInt64 { + goMemLimitTagValue = "unlimited" + } else { + // Convert GOMEMLIMIT to a human-readable string with the right byte unit + goMemLimitTagValue = formatByteSize(gomemlimit) + } + baseTags = append(baseTags, fmt.Sprintf("gomemlimit:%s", goMemLimitTagValue)) + case gomaxProcsMetricName: + gomaxprocs := s.Value.Uint64() + baseTags = append(baseTags, fmt.Sprintf("gomaxprocs:%d", gomaxprocs)) + } + } + + return baseTags +} + +// Function to format byte size with the right unit +func formatByteSize(bytes uint64) string { + const ( + unit = 1024 + format = "%.0f %sB" + ) + if bytes < unit { + return fmt.Sprintf(format, float64(bytes), "") + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf(format, float64(bytes)/float64(div), string("KMGTPE"[exp])+"i") +} diff --git a/vendor/github.com/DataDog/go-sqllexer/.gitignore b/vendor/github.com/DataDog/go-sqllexer/.gitignore index 3b735ec4..0fa6069b 100644 --- a/vendor/github.com/DataDog/go-sqllexer/.gitignore +++ b/vendor/github.com/DataDog/go-sqllexer/.gitignore @@ -19,3 +19,9 @@ # Go workspace file go.work + +# Go fuzz test files +testdata/fuzz/ + +# Benchmark files +bench \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/README.md b/vendor/github.com/DataDog/go-sqllexer/README.md index 95a8d6bf..dc9c8494 100644 --- a/vendor/github.com/DataDog/go-sqllexer/README.md +++ b/vendor/github.com/DataDog/go-sqllexer/README.md @@ -27,8 +27,11 @@ import "github.com/DataDog/go-sqllexer" func main() { query := "SELECT * FROM users WHERE id = 1" lexer := sqllexer.New(query) - tokens := lexer.ScanAll() - for _, token := range tokens { + for { + token := lexer.Scan() + if token.Type == EOF { + break + } fmt.Println(token) } } diff --git a/vendor/github.com/DataDog/go-sqllexer/normalizer.go b/vendor/github.com/DataDog/go-sqllexer/normalizer.go index fe437e05..bb1b3300 100644 --- a/vendor/github.com/DataDog/go-sqllexer/normalizer.go +++ b/vendor/github.com/DataDog/go-sqllexer/normalizer.go @@ -1,6 +1,7 @@ package sqllexer import ( + "fmt" "strings" ) @@ -100,10 +101,35 @@ type StatementMetadata struct { Procedures []string `json:"procedures"` } +type metadataSet struct { + size int + tablesSet map[string]struct{} + commentsSet map[string]struct{} + commandsSet map[string]struct{} + proceduresSet map[string]struct{} +} + +// addMetadata adds a value to a metadata slice if it doesn't exist in the set +func (m *metadataSet) addMetadata(value string, set map[string]struct{}, slice *[]string) { + if _, exists := set[value]; !exists { + set[value] = struct{}{} + *slice = append(*slice, value) + m.size += len(value) + } +} + type groupablePlaceholder struct { groupable bool } +type headState struct { + readFirstNonSpaceNonComment bool + inLeadingParenthesesExpression bool + foundLeadingExpressionInParentheses bool + standaloneExpressionInParentheses bool + expressionInParentheses strings.Builder +} + type Normalizer struct { config *normalizerConfig } @@ -120,82 +146,142 @@ func NewNormalizer(opts ...normalizerOption) *Normalizer { return &normalizer } -// Normalize takes an input SQL string and returns a normalized SQL string, a StatementMetadata struct, and an error. -// The normalizer collapses input SQL into compact format, groups obfuscated values into single placeholder, -// and collects metadata such as table names, comments, and commands. -func (n *Normalizer) Normalize(input string, lexerOpts ...lexerOption) (normalizedSQL string, statementMetadata *StatementMetadata, err error) { - lexer := New( - input, - lexerOpts..., - ) +// normalizeToken is a helper function that handles the common normalization logic +func (n *Normalizer) normalizeToken(lexer *Lexer, normalizedSQLBuilder *strings.Builder, meta *metadataSet, statementMetadata *StatementMetadata, preProcessToken func(*Token, *LastValueToken), lexerOpts ...lexerOption) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("error normalizing SQL token: %v", r) + } + }() - var normalizedSQLBuilder strings.Builder + var groupablePlaceholder groupablePlaceholder + var headState headState + var ctes map[string]bool - statementMetadata = &StatementMetadata{ - Tables: []string{}, - Comments: []string{}, - Commands: []string{}, - Procedures: []string{}, + // Only allocate CTEs map if collecting tables + if n.config.CollectTables { + ctes = make(map[string]bool, 2) } - var lastToken Token // The last token that is not whitespace or comment - var groupablePlaceholder groupablePlaceholder + var lastValueToken *LastValueToken for { token := lexer.Scan() + if preProcessToken != nil { + // pre-process the token, often used for obfuscation + preProcessToken(token, lastValueToken) + } + if n.shouldCollectMetadata() { + n.collectMetadata(token, lastValueToken, meta, statementMetadata, ctes) + } + n.normalizeSQL(token, lastValueToken, normalizedSQLBuilder, &groupablePlaceholder, &headState, lexerOpts...) if token.Type == EOF { break } - n.collectMetadata(&token, &lastToken, statementMetadata) - n.normalizeSQL(&token, &lastToken, &normalizedSQLBuilder, &groupablePlaceholder, lexerOpts...) + if isValueToken(token) { + lastValueToken = token.getLastValueToken() + } } - normalizedSQL = normalizedSQLBuilder.String() + return nil +} + +func (n *Normalizer) Normalize(input string, lexerOpts ...lexerOption) (normalizedSQL string, statementMetadata *StatementMetadata, err error) { + lexer := New(input, lexerOpts...) + var normalizedSQLBuilder strings.Builder + normalizedSQLBuilder.Grow(len(input)) + + meta := &metadataSet{ + tablesSet: map[string]struct{}{}, + commentsSet: map[string]struct{}{}, + commandsSet: map[string]struct{}{}, + proceduresSet: map[string]struct{}{}, + } + + statementMetadata = &StatementMetadata{ + Tables: []string{}, + Comments: []string{}, + Commands: []string{}, + Procedures: []string{}, + } - // Dedupe collected metadata - dedupeStatementMetadata(statementMetadata) + if err = n.normalizeToken(lexer, &normalizedSQLBuilder, meta, statementMetadata, nil, lexerOpts...); err != nil { + return "", nil, err + } + normalizedSQL = normalizedSQLBuilder.String() + statementMetadata.Size = meta.size return n.trimNormalizedSQL(normalizedSQL), statementMetadata, nil } -func (n *Normalizer) collectMetadata(token *Token, lastToken *Token, statementMetadata *StatementMetadata) { +func (n *Normalizer) shouldCollectMetadata() bool { + return n.config.CollectTables || n.config.CollectCommands || n.config.CollectComments || n.config.CollectProcedure +} + +func (n *Normalizer) collectMetadata(token *Token, lastValueToken *LastValueToken, meta *metadataSet, statementMetadata *StatementMetadata, ctes map[string]bool) { if n.config.CollectComments && (token.Type == COMMENT || token.Type == MULTILINE_COMMENT) { - // Collect comments - statementMetadata.Comments = append(statementMetadata.Comments, token.Value) + comment := token.Value + meta.addMetadata(comment, meta.commentsSet, &statementMetadata.Comments) + } else if token.Type == COMMAND { + if n.config.CollectCommands { + command := strings.ToUpper(token.Value) + meta.addMetadata(command, meta.commandsSet, &statementMetadata.Commands) + } } else if token.Type == IDENT || token.Type == QUOTED_IDENT || token.Type == FUNCTION { tokenVal := token.Value if token.Type == QUOTED_IDENT { - // We always want to trim the quotes for collected metadata such as table names - // This is because the metadata is used as tags, and we don't want them to be normalized as underscores later on - tokenVal = trimQuotes(tokenVal, tokenVal[0:1], tokenVal[len(tokenVal)-1:]) + tokenVal = trimQuotes(token) if !n.config.KeepIdentifierQuotation { + // trim quotes and set the token type to IDENT token.Value = tokenVal + token.Type = IDENT } } - if n.config.CollectCommands && isCommand(strings.ToUpper(tokenVal)) { - // Collect commands - statementMetadata.Commands = append(statementMetadata.Commands, strings.ToUpper(tokenVal)) - } else if n.config.CollectTables && isTableIndicator(strings.ToUpper(lastToken.Value)) && !isSQLKeyword(token) { - // Collect table names - statementMetadata.Tables = append(statementMetadata.Tables, tokenVal) - } else if n.config.CollectProcedure && isProcedure(lastToken) { - // Collect procedure names - statementMetadata.Procedures = append(statementMetadata.Procedures, tokenVal) + if lastValueToken != nil && lastValueToken.Type == CTE_INDICATOR { + ctes[tokenVal] = true + } else if n.config.CollectTables && lastValueToken != nil && lastValueToken.isTableIndicator { + if _, ok := ctes[tokenVal]; !ok { + meta.addMetadata(tokenVal, meta.tablesSet, &statementMetadata.Tables) + } + } else if n.config.CollectProcedure && lastValueToken != nil && lastValueToken.Type == PROC_INDICATOR { + meta.addMetadata(tokenVal, meta.proceduresSet, &statementMetadata.Procedures) } } } -func (n *Normalizer) normalizeSQL(token *Token, lastToken *Token, normalizedSQLBuilder *strings.Builder, groupablePlaceholder *groupablePlaceholder, lexerOpts ...lexerOption) { - if token.Type != WS && token.Type != COMMENT && token.Type != MULTILINE_COMMENT { +func (n *Normalizer) normalizeSQL(token *Token, lastValueToken *LastValueToken, normalizedSQLBuilder *strings.Builder, groupablePlaceholder *groupablePlaceholder, headState *headState, lexerOpts ...lexerOption) { + if token.Type != SPACE && token.Type != COMMENT && token.Type != MULTILINE_COMMENT { + if token.Type == QUOTED_IDENT && !n.config.KeepIdentifierQuotation { + token.Value = trimQuotes(token) + } + + // handle leading expression in parentheses + if !headState.readFirstNonSpaceNonComment { + headState.readFirstNonSpaceNonComment = true + if token.Type == PUNCTUATION && token.Value == "(" { + headState.inLeadingParenthesesExpression = true + headState.standaloneExpressionInParentheses = true + } + } + if token.Type == EOF { + if headState.standaloneExpressionInParentheses { + normalizedSQLBuilder.WriteString(headState.expressionInParentheses.String()) + } + return + } else if headState.foundLeadingExpressionInParentheses { + headState.standaloneExpressionInParentheses = false + } + if token.Type == DOLLAR_QUOTED_FUNCTION && token.Value != StringPlaceholder { // if the token is a dollar quoted function and it is not obfuscated, // we need to recusively normalize the content of the dollar quoted function quotedFunc := token.Value[6 : len(token.Value)-6] // remove the $func$ prefix and suffix - normalizedQuotedFunc, _, err := n.Normalize(quotedFunc, lexerOpts...) + normalizedQuotedFunc, _, err := n.Normalize(quotedFunc) if err == nil { // replace the content of the dollar quoted function with the normalized content // if there is an error, we just keep the original content var normalizedDollarQuotedFunc strings.Builder + normalizedDollarQuotedFunc.Grow(len(normalizedQuotedFunc) + 12) normalizedDollarQuotedFunc.WriteString("$func$") normalizedDollarQuotedFunc.WriteString(normalizedQuotedFunc) normalizedDollarQuotedFunc.WriteString("$func$") @@ -205,66 +291,68 @@ func (n *Normalizer) normalizeSQL(token *Token, lastToken *Token, normalizedSQLB if !n.config.KeepSQLAlias { // discard SQL alias - if strings.ToUpper(token.Value) == "AS" { - // if current token is AS, then continue to next token - // because without seeing the next token, we cannot - // determine if the current token is an alias or not - *lastToken = *token + if token.Type == ALIAS_INDICATOR { return } - if strings.ToUpper(lastToken.Value) == "AS" { - if token.Type == IDENT && !isSQLKeyword(token) { - // if the last token is AS and the current token is IDENT, - // then the current token is an alias, so we discard it - *lastToken = *token + if lastValueToken != nil && lastValueToken.Type == ALIAS_INDICATOR { + if token.Type == IDENT || token.Type == QUOTED_IDENT { return } else { // if the last token is AS and the current token is not IDENT, // this could be a CTE like WITH ... AS (...), // so we do not discard the current token - n.appendWhitespace(lastToken, token, normalizedSQLBuilder) - n.writeToken(lastToken, normalizedSQLBuilder) + n.appendSpace(token, lastValueToken, normalizedSQLBuilder) + n.writeToken(lastValueToken.Type, lastValueToken.Value, normalizedSQLBuilder) } } } // group consecutive obfuscated values into single placeholder - if n.isObfuscatedValueGroupable(token, lastToken, groupablePlaceholder, normalizedSQLBuilder) { + if n.isObfuscatedValueGroupable(token, lastValueToken, groupablePlaceholder, normalizedSQLBuilder) { // return the token but not write it to the normalizedSQLBuilder - *lastToken = *token return } - // determine if we should add a whitespace - n.appendWhitespace(lastToken, token, normalizedSQLBuilder) - n.writeToken(token, normalizedSQLBuilder) - - *lastToken = *token + if headState.inLeadingParenthesesExpression { + n.appendSpace(token, lastValueToken, &headState.expressionInParentheses) + n.writeToken(token.Type, token.Value, &headState.expressionInParentheses) + if token.Type == PUNCTUATION && token.Value == ")" { + headState.inLeadingParenthesesExpression = false + headState.foundLeadingExpressionInParentheses = true + } + } else { + n.appendSpace(token, lastValueToken, normalizedSQLBuilder) + n.writeToken(token.Type, token.Value, normalizedSQLBuilder) + } } } -func (n *Normalizer) writeToken(token *Token, normalizedSQLBuilder *strings.Builder) { - if n.config.UppercaseKeywords && isSQLKeyword(token) { - normalizedSQLBuilder.WriteString(strings.ToUpper(token.Value)) +func (n *Normalizer) writeToken(tokenType TokenType, tokenValue string, normalizedSQLBuilder *strings.Builder) { + if n.config.UppercaseKeywords && (tokenType == COMMAND || tokenType == KEYWORD) { + normalizedSQLBuilder.WriteString(strings.ToUpper(tokenValue)) } else { - normalizedSQLBuilder.WriteString(token.Value) + normalizedSQLBuilder.WriteString(tokenValue) } } -func (n *Normalizer) isObfuscatedValueGroupable(token *Token, lastToken *Token, groupablePlaceholder *groupablePlaceholder, normalizedSQLBuilder *strings.Builder) bool { +func (n *Normalizer) isObfuscatedValueGroupable(token *Token, lastValueToken *LastValueToken, groupablePlaceholder *groupablePlaceholder, normalizedSQLBuilder *strings.Builder) bool { if token.Value == NumberPlaceholder || token.Value == StringPlaceholder { - if lastToken.Value == "(" || lastToken.Value == "[" { + if lastValueToken == nil { + // if the last token is nil, we know it's the start of groupable placeholders + return false + } + if lastValueToken.Value == "(" || lastValueToken.Value == "[" { // if the last token is "(" or "[", and the current token is a placeholder, // we know it's the start of groupable placeholders // we don't return here because we still need to write the first placeholder groupablePlaceholder.groupable = true - } else if lastToken.Value == "," && groupablePlaceholder.groupable { + } else if lastValueToken.Value == "," && groupablePlaceholder.groupable { return true } } - if (lastToken.Value == NumberPlaceholder || lastToken.Value == StringPlaceholder) && token.Value == "," && groupablePlaceholder.groupable { + if lastValueToken != nil && (lastValueToken.Value == NumberPlaceholder || lastValueToken.Value == StringPlaceholder) && token.Value == "," && groupablePlaceholder.groupable { return true } @@ -274,20 +362,20 @@ func (n *Normalizer) isObfuscatedValueGroupable(token *Token, lastToken *Token, return false } - if groupablePlaceholder.groupable && token.Value != NumberPlaceholder && token.Value != StringPlaceholder && lastToken.Value == "," { + if groupablePlaceholder.groupable && token.Value != NumberPlaceholder && token.Value != StringPlaceholder && lastValueToken != nil && lastValueToken.Value == "," { // This is a tricky edge case. If we are inside a groupbale block, and the current token is not a placeholder, // we not only want to write the current token to the normalizedSQLBuilder, but also write the last comma that we skipped. // For example, (?, ARRAY[?, ?, ?]) should be normalized as (?, ARRAY[?]) - normalizedSQLBuilder.WriteString(lastToken.Value) + normalizedSQLBuilder.WriteString(lastValueToken.Value) return false } return false } -func (n *Normalizer) appendWhitespace(lastToken *Token, token *Token, normalizedSQLBuilder *strings.Builder) { +func (n *Normalizer) appendSpace(token *Token, lastValueToken *LastValueToken, normalizedSQLBuilder *strings.Builder) { // do not add a space between parentheses if RemoveSpaceBetweenParentheses is true - if n.config.RemoveSpaceBetweenParentheses && (lastToken.Type == FUNCTION || lastToken.Value == "(" || lastToken.Value == "[") { + if n.config.RemoveSpaceBetweenParentheses && lastValueToken != nil && (lastValueToken.Type == FUNCTION || lastValueToken.Value == "(" || lastValueToken.Value == "[") { return } @@ -296,13 +384,11 @@ func (n *Normalizer) appendWhitespace(lastToken *Token, token *Token, normalized } switch token.Value { - case ",": - case ";": + case ",", ";": + return case "=": - if lastToken.Value == ":" { - // do not add a space before an equals if a colon was - // present before it. - break + if lastValueToken != nil && lastValueToken.Value == ":" { + return } fallthrough default: @@ -317,27 +403,3 @@ func (n *Normalizer) trimNormalizedSQL(normalizedSQL string) string { } return strings.TrimSpace(normalizedSQL) } - -func dedupeCollectedMetadata(metadata []string) (dedupedMetadata []string, size int) { - // Dedupe collected metadata - // e.g. [SELECT, JOIN, SELECT, JOIN] -> [SELECT, JOIN] - dedupedMetadata = []string{} - var metadataSeen = make(map[string]struct{}) - for _, m := range metadata { - if _, seen := metadataSeen[m]; !seen { - metadataSeen[m] = struct{}{} - dedupedMetadata = append(dedupedMetadata, m) - size += len(m) - } - } - return dedupedMetadata, size -} - -func dedupeStatementMetadata(info *StatementMetadata) { - var tablesSize, commentsSize, commandsSize, procedureSize int - info.Tables, tablesSize = dedupeCollectedMetadata(info.Tables) - info.Comments, commentsSize = dedupeCollectedMetadata(info.Comments) - info.Commands, commandsSize = dedupeCollectedMetadata(info.Commands) - info.Procedures, procedureSize = dedupeCollectedMetadata(info.Procedures) - info.Size += tablesSize + commentsSize + commandsSize + procedureSize -} diff --git a/vendor/github.com/DataDog/go-sqllexer/obfuscate_and_normalize.go b/vendor/github.com/DataDog/go-sqllexer/obfuscate_and_normalize.go index b52c46c2..377457b2 100644 --- a/vendor/github.com/DataDog/go-sqllexer/obfuscate_and_normalize.go +++ b/vendor/github.com/DataDog/go-sqllexer/obfuscate_and_normalize.go @@ -5,12 +5,16 @@ import "strings" // ObfuscateAndNormalize takes an input SQL string and returns an normalized SQL string with metadata // This function is a convenience function that combines the Obfuscator and Normalizer in one pass func ObfuscateAndNormalize(input string, obfuscator *Obfuscator, normalizer *Normalizer, lexerOpts ...lexerOption) (normalizedSQL string, statementMetadata *StatementMetadata, err error) { - lexer := New( - input, - lexerOpts..., - ) - + lexer := New(input, lexerOpts...) var normalizedSQLBuilder strings.Builder + normalizedSQLBuilder.Grow(len(input)) + + meta := &metadataSet{ + tablesSet: map[string]struct{}{}, + commentsSet: map[string]struct{}{}, + commandsSet: map[string]struct{}{}, + proceduresSet: map[string]struct{}{}, + } statementMetadata = &StatementMetadata{ Tables: []string{}, @@ -19,23 +23,16 @@ func ObfuscateAndNormalize(input string, obfuscator *Obfuscator, normalizer *Nor Procedures: []string{}, } - var lastToken Token // The last token that is not whitespace or comment - var groupablePlaceholder groupablePlaceholder + obfuscate := func(token *Token, lastValueToken *LastValueToken) { + obfuscator.ObfuscateTokenValue(token, lastValueToken, lexerOpts...) + } - for { - token := lexer.Scan() - if token.Type == EOF { - break - } - token.Value = obfuscator.ObfuscateTokenValue(token, lexerOpts...) - normalizer.collectMetadata(&token, &lastToken, statementMetadata) - normalizer.normalizeSQL(&token, &lastToken, &normalizedSQLBuilder, &groupablePlaceholder, lexerOpts...) + // Pass obfuscation as the pre-process step + if err = normalizer.normalizeToken(lexer, &normalizedSQLBuilder, meta, statementMetadata, obfuscate, lexerOpts...); err != nil { + return "", nil, err } normalizedSQL = normalizedSQLBuilder.String() - - // Dedupe collected metadata - dedupeStatementMetadata(statementMetadata) - + statementMetadata.Size = meta.size return normalizer.trimNormalizedSQL(normalizedSQL), statementMetadata, nil } diff --git a/vendor/github.com/DataDog/go-sqllexer/obfuscator.go b/vendor/github.com/DataDog/go-sqllexer/obfuscator.go index ae2d464d..07c77de2 100644 --- a/vendor/github.com/DataDog/go-sqllexer/obfuscator.go +++ b/vendor/github.com/DataDog/go-sqllexer/obfuscator.go @@ -10,6 +10,8 @@ type obfuscatorConfig struct { ReplacePositionalParameter bool `json:"replace_positional_parameter"` ReplaceBoolean bool `json:"replace_boolean"` ReplaceNull bool `json:"replace_null"` + KeepJsonPath bool `json:"keep_json_path"` // by default, we replace json path with placeholder + ReplaceBindParameter bool `json:"replace_bind_parameter"` } type obfuscatorOption func(*obfuscatorConfig) @@ -44,6 +46,18 @@ func WithDollarQuotedFunc(dollarQuotedFunc bool) obfuscatorOption { } } +func WithKeepJsonPath(keepJsonPath bool) obfuscatorOption { + return func(c *obfuscatorConfig) { + c.KeepJsonPath = keepJsonPath + } +} + +func WithReplaceBindParameter(replaceBindParameter bool) obfuscatorOption { + return func(c *obfuscatorConfig) { + c.ReplaceBindParameter = replaceBindParameter + } +} + type Obfuscator struct { config *obfuscatorConfig } @@ -69,60 +83,74 @@ const ( // The obfuscator replaces all literal values with a single placeholder func (o *Obfuscator) Obfuscate(input string, lexerOpts ...lexerOption) string { var obfuscatedSQL strings.Builder + obfuscatedSQL.Grow(len(input)) lexer := New( input, lexerOpts..., ) + + var lastValueToken *LastValueToken + for { token := lexer.Scan() if token.Type == EOF { break } - obfuscatedSQL.WriteString(o.ObfuscateTokenValue(token, lexerOpts...)) + o.ObfuscateTokenValue(token, lastValueToken, lexerOpts...) + obfuscatedSQL.WriteString(token.Value) + if isValueToken(token) { + lastValueToken = token.getLastValueToken() + } } return strings.TrimSpace(obfuscatedSQL.String()) } -func (o *Obfuscator) ObfuscateTokenValue(token Token, lexerOpts ...lexerOption) string { +func (o *Obfuscator) ObfuscateTokenValue(token *Token, lastValueToken *LastValueToken, lexerOpts ...lexerOption) { switch token.Type { case NUMBER: - return NumberPlaceholder + if o.config.KeepJsonPath && lastValueToken != nil && lastValueToken.Type == JSON_OP { + break + } + token.Value = NumberPlaceholder case DOLLAR_QUOTED_FUNCTION: if o.config.DollarQuotedFunc { // obfuscate the content of dollar quoted function quotedFunc := token.Value[6 : len(token.Value)-6] // remove the $func$ prefix and suffix var obfuscatedDollarQuotedFunc strings.Builder + obfuscatedDollarQuotedFunc.Grow(len(quotedFunc) + 12) obfuscatedDollarQuotedFunc.WriteString("$func$") obfuscatedDollarQuotedFunc.WriteString(o.Obfuscate(quotedFunc, lexerOpts...)) obfuscatedDollarQuotedFunc.WriteString("$func$") - return obfuscatedDollarQuotedFunc.String() - } else { - return StringPlaceholder + token.Value = obfuscatedDollarQuotedFunc.String() + break } + token.Value = StringPlaceholder case STRING, INCOMPLETE_STRING, DOLLAR_QUOTED_STRING: - return StringPlaceholder + if o.config.KeepJsonPath && lastValueToken != nil && lastValueToken.Type == JSON_OP { + break + } + token.Value = StringPlaceholder case POSITIONAL_PARAMETER: if o.config.ReplacePositionalParameter { - return StringPlaceholder - } else { - return token.Value + token.Value = StringPlaceholder } - case IDENT, QUOTED_IDENT: - if o.config.ReplaceBoolean && isBoolean(token.Value) { - return StringPlaceholder + case BIND_PARAMETER: + if o.config.ReplaceBindParameter { + token.Value = StringPlaceholder } - if o.config.ReplaceNull && isNull(token.Value) { - return StringPlaceholder + case BOOLEAN: + if o.config.ReplaceBoolean { + token.Value = StringPlaceholder } - - if o.config.ReplaceDigits { - return replaceDigits(token.Value, NumberPlaceholder) - } else { - return token.Value + case NULL: + if o.config.ReplaceNull { + token.Value = StringPlaceholder + } + case IDENT, QUOTED_IDENT: + if o.config.ReplaceDigits && token.hasDigits { + token.Value = replaceDigits(token, NumberPlaceholder) } - default: - return token.Value } } diff --git a/vendor/github.com/DataDog/go-sqllexer/sqllexer.go b/vendor/github.com/DataDog/go-sqllexer/sqllexer.go index f092622b..e16fe803 100644 --- a/vendor/github.com/DataDog/go-sqllexer/sqllexer.go +++ b/vendor/github.com/DataDog/go-sqllexer/sqllexer.go @@ -1,13 +1,15 @@ package sqllexer -import "unicode/utf8" +import ( + "unicode/utf8" +) type TokenType int const ( ERROR TokenType = iota EOF - WS // whitespace + SPACE // space or newline STRING // string literal INCOMPLETE_STRING // incomplete string literal so that we can obfuscate it, e.g. 'abc NUMBER // number literal @@ -25,12 +27,38 @@ const ( FUNCTION // function SYSTEM_VARIABLE // system variable UNKNOWN // unknown token + COMMAND // SQL commands like SELECT, INSERT + KEYWORD // Other SQL keywords + JSON_OP // JSON operators + BOOLEAN // boolean literal + NULL // null literal + PROC_INDICATOR // procedure indicator + CTE_INDICATOR // CTE indicator + ALIAS_INDICATOR // alias indicator ) // Token represents a SQL token with its type and value. type Token struct { - Type TokenType - Value string + Type TokenType + Value string + isTableIndicator bool // true if the token is a table indicator + hasDigits bool + hasQuotes bool // private - only used by trimQuotes + lastValueToken LastValueToken // private - internal state +} + +type LastValueToken struct { + Type TokenType + Value string + isTableIndicator bool +} + +// getLastValueToken can be private since it's only used internally +func (t *Token) getLastValueToken() *LastValueToken { + t.lastValueToken.Type = t.Type + t.lastValueToken.Value = t.Value + t.lastValueToken.isTableIndicator = t.isTableIndicator + return &t.lastValueToken } type LexerConfig struct { @@ -40,67 +68,48 @@ type LexerConfig struct { type lexerOption func(*LexerConfig) func WithDBMS(dbms DBMSType) lexerOption { + dbms = getDBMSFromAlias(dbms) return func(c *LexerConfig) { c.DBMS = dbms } } +type trieNode struct { + children map[rune]*trieNode + isEnd bool + tokenType TokenType + isTableIndicator bool +} + // SQL Lexer inspired from Rob Pike's talk on Lexical Scanning in Go type Lexer struct { - src string // the input src string - cursor int // the current position of the cursor - start int // the start position of the current token - config *LexerConfig + src string // the input src string + cursor int // the current position of the cursor + start int // the start position of the current token + config *LexerConfig + token *Token + hasQuotes bool // true if any quotes in token + hasDigits bool // true if the token has digits + isTableIndicator bool // true if the token is a table indicator } func New(input string, opts ...lexerOption) *Lexer { - lexer := &Lexer{src: input, config: &LexerConfig{}} + lexer := &Lexer{ + src: input, + config: &LexerConfig{}, + token: &Token{}, + } for _, opt := range opts { opt(lexer.config) } return lexer } -// ScanAll scans the entire input string and returns a slice of tokens. -func (s *Lexer) ScanAll() []Token { - var tokens []Token - for { - token := s.Scan() - if token.Type == EOF { - // don't include EOF token in the result - break - } - tokens = append(tokens, token) - } - return tokens -} - -// ScanAllTokens scans the entire input string and returns a channel of tokens. -// Use this if you want to process the tokens as they are scanned. -func (s *Lexer) ScanAllTokens() <-chan Token { - tokenCh := make(chan Token) - - go func() { - defer close(tokenCh) - - for { - token := s.Scan() - if token.Type == EOF { - // don't include EOF token in the result - break - } - tokenCh <- token - } - }() - - return tokenCh -} - // Scan scans the next token and returns it. -func (s *Lexer) Scan() Token { +func (s *Lexer) Scan() *Token { ch := s.peek() switch { - case isWhitespace(ch): + case isSpace(ch): return s.scanWhitespace() case isLetter(ch): return s.scanIdentifier(ch) @@ -109,7 +118,7 @@ func (s *Lexer) Scan() Token { case isSingleQuote(ch): return s.scanString() case isSingleLineComment(ch, s.lookAhead(1)): - return s.scanSingleLineComment() + return s.scanSingleLineComment(ch) case isMultiLineComment(ch, s.lookAhead(1)): return s.scanMultiLineComment() case isLeadingSign(ch): @@ -142,23 +151,34 @@ func (s *Lexer) Scan() Token { if s.config.DBMS == DBMSMySQL { return s.scanDoubleQuotedIdentifier('`') } - fallthrough + return s.scanUnknown() // backtick is only valid in mysql case ch == '#': if s.config.DBMS == DBMSSQLServer { return s.scanIdentifier(ch) } else if s.config.DBMS == DBMSMySQL { // MySQL treats # as a comment - return s.scanSingleLineComment() + return s.scanSingleLineComment(ch) } - fallthrough + return s.scanOperator(ch) case ch == '@': + if s.lookAhead(1) == '@' { + if isAlphaNumeric(s.lookAhead(2)) { + return s.scanSystemVariable() + } + s.start = s.cursor + s.nextBy(2) // consume @@ + return s.emit(JSON_OP) + } if isAlphaNumeric(s.lookAhead(1)) { if s.config.DBMS == DBMSSnowflake { return s.scanIdentifier(ch) } return s.scanBindParameter() - } else if s.lookAhead(1) == '@' { - return s.scanSystemVariable() + } + if s.lookAhead(1) == '?' || s.lookAhead(1) == '>' { + s.start = s.cursor + s.nextBy(2) // consume @? or @> + return s.emit(JSON_OP) } fallthrough case isOperator(ch): @@ -169,7 +189,7 @@ func (s *Lexer) Scan() Token { } return s.scanPunctuation() case isEOF(ch): - return Token{EOF, ""} + return s.emit(EOF) default: return s.scanUnknown() } @@ -177,10 +197,17 @@ func (s *Lexer) Scan() Token { // lookAhead returns the rune n positions ahead of the cursor. func (s *Lexer) lookAhead(n int) rune { - if s.cursor+n >= len(s.src) || s.cursor+n < 0 { + pos := s.cursor + n + if pos >= len(s.src) || pos < 0 { return 0 } - r, _ := utf8.DecodeRuneInString(s.src[s.cursor+n:]) + // Fast path for ASCII + b := s.src[pos] + if b < utf8.RuneSelf { + return rune(b) + } + // Slow path for non-ASCII + r, _ := utf8.DecodeRuneInString(s.src[pos:]) return r } @@ -199,6 +226,12 @@ func (s *Lexer) nextBy(n int) rune { if s.cursor >= len(s.src) { return 0 } + // Fast path for ASCII + b := s.src[s.cursor] + if b < utf8.RuneSelf { + return rune(b) + } + // Slow path for non-ASCII r, _ := utf8.DecodeRuneInString(s.src[s.cursor:]) return r } @@ -220,18 +253,19 @@ func (s *Lexer) matchAt(match []rune) bool { return true } -func (s *Lexer) scanNumberWithLeadingSign() Token { +func (s *Lexer) scanNumberWithLeadingSign() *Token { s.start = s.cursor ch := s.next() // consume the leading sign - return s.scanNumberic(ch) + return s.scanDecimalNumber(ch) } -func (s *Lexer) scanNumber(ch rune) Token { +func (s *Lexer) scanNumber(ch rune) *Token { s.start = s.cursor return s.scanNumberic(ch) } -func (s *Lexer) scanNumberic(ch rune) Token { +func (s *Lexer) scanNumberic(ch rune) *Token { + s.start = s.cursor if ch == '0' { nextCh := s.lookAhead(1) if nextCh == 'x' || nextCh == 'X' { @@ -241,12 +275,11 @@ func (s *Lexer) scanNumberic(ch rune) Token { } } - return s.scanDecimalNumber() + ch = s.next() // consume first digit + return s.scanDecimalNumber(ch) } -func (s *Lexer) scanDecimalNumber() Token { - ch := s.next() - +func (s *Lexer) scanDecimalNumber(ch rune) *Token { // scan digits for isDigit(ch) || ch == '.' || isExpontent(ch) { if isExpontent(ch) { @@ -258,87 +291,135 @@ func (s *Lexer) scanDecimalNumber() Token { ch = s.next() } } - return Token{NUMBER, s.src[s.start:s.cursor]} + return s.emit(NUMBER) } -func (s *Lexer) scanHexNumber() Token { - ch := s.nextBy(2) // consume the leading 0x +func (s *Lexer) scanHexNumber() *Token { + ch := s.nextBy(2) // consume 0x or 0X for isDigit(ch) || ('a' <= ch && ch <= 'f') || ('A' <= ch && ch <= 'F') { ch = s.next() } - return Token{NUMBER, s.src[s.start:s.cursor]} + return s.emit(NUMBER) } -func (s *Lexer) scanOctalNumber() Token { +func (s *Lexer) scanOctalNumber() *Token { ch := s.nextBy(2) // consume the leading 0 and number for '0' <= ch && ch <= '7' { ch = s.next() } - return Token{NUMBER, s.src[s.start:s.cursor]} + return s.emit(NUMBER) } -func (s *Lexer) scanString() Token { +func (s *Lexer) scanString() *Token { s.start = s.cursor - ch := s.next() // consume the opening quote escaped := false + escapedQuote := false - for { + ch := s.next() // consume opening quote + + for ; !isEOF(ch); ch = s.next() { if escaped { - // encountered an escape character - // reset the escaped flag and continue escaped = false - ch = s.next() + escapedQuote = ch == '\'' continue } if ch == '\\' { escaped = true - ch = s.next() continue } if ch == '\'' { s.next() // consume the closing quote - return Token{STRING, s.src[s.start:s.cursor]} + return s.emit(STRING) } - - if isEOF(ch) { - // encountered EOF before closing quote - // this usually happens when the string is truncated - return Token{INCOMPLETE_STRING, s.src[s.start:s.cursor]} - } - ch = s.next() } + // Special case: if we ended with an escaped quote (e.g. ESCAPE '\') + if escapedQuote { + return s.emit(STRING) + } + // If we get here, we hit EOF before finding closing quote + return s.emit(INCOMPLETE_STRING) } -func (s *Lexer) scanIdentifier(ch rune) Token { - // NOTE: this func does not distinguish between SQL keywords and identifiers +func (s *Lexer) scanIdentifier(ch rune) *Token { s.start = s.cursor - ch = s.nextBy(utf8.RuneLen(ch)) - for isLetter(ch) || isDigit(ch) || ch == '.' || ch == '?' || ch == '$' || ch == '#' || ch == '/' { + node := keywordRoot + pos := s.cursor + + // If first character is Unicode, skip trie lookup + if ch > 127 { + for isIdentifier(ch) { + s.hasDigits = s.hasDigits || isDigit(ch) + ch = s.nextBy(utf8.RuneLen(ch)) + } + if s.start == s.cursor { + return s.scanUnknown() + } + return s.emit(IDENT) + } + + // ASCII characters - try keyword matching + for isAsciiLetter(ch) || ch == '_' { + // Convert to uppercase for case-insensitive matching + upperCh := ch + if ch >= 'a' && ch <= 'z' { + upperCh -= 32 + } + + // Try to follow trie path + if next, exists := node.children[upperCh]; exists { + node = next + pos = s.cursor + ch = s.next() + } else { + // No more matches possible in trie + // Reset node for next potential keyword + // and continue scanning identifier + node = keywordRoot + ch = s.next() + break + } + } + + // If we found a complete keyword and next char is whitespace + if node.isEnd && (isPunctuation(ch) || isSpace(ch) || isEOF(ch)) { + s.cursor = pos + 1 // Include the last matched character + s.isTableIndicator = node.isTableIndicator + return s.emit(node.tokenType) + } + + // Continue scanning identifier if no keyword match + for isIdentifier(ch) { + s.hasDigits = s.hasDigits || isDigit(ch) ch = s.nextBy(utf8.RuneLen(ch)) } + + if s.start == s.cursor { + return s.scanUnknown() + } + if ch == '(' { - // if the identifier is followed by a (, then it's a function - return Token{FUNCTION, s.src[s.start:s.cursor]} + return s.emit(FUNCTION) } - return Token{IDENT, s.src[s.start:s.cursor]} + return s.emit(IDENT) } -func (s *Lexer) scanDoubleQuotedIdentifier(delimiter rune) Token { +func (s *Lexer) scanDoubleQuotedIdentifier(delimiter rune) *Token { closingDelimiter := delimiter if delimiter == '[' { closingDelimiter = ']' } s.start = s.cursor + s.hasQuotes = true ch := s.next() // consume the opening quote for { // encountered the closing quote // BUT if it's followed by .", then we should keep going - // e.g. postgre "foo"."bar" + // e.g. postgres "foo"."bar" // e.g. sqlserver [foo].[bar] if ch == closingDelimiter { specialCase := []rune{closingDelimiter, '.', delimiter} @@ -349,51 +430,97 @@ func (s *Lexer) scanDoubleQuotedIdentifier(delimiter rune) Token { break } if isEOF(ch) { - return Token{ERROR, s.src[s.start:s.cursor]} + s.hasQuotes = false // if we hit EOF, we clear the quotes + return s.emit(ERROR) } + s.hasDigits = s.hasDigits || isDigit(ch) ch = s.next() } s.next() // consume the closing quote - return Token{QUOTED_IDENT, s.src[s.start:s.cursor]} + return s.emit(QUOTED_IDENT) } -func (s *Lexer) scanWhitespace() Token { +func (s *Lexer) scanWhitespace() *Token { // scan whitespace, tab, newline, carriage return s.start = s.cursor ch := s.next() - for isWhitespace(ch) { + for isSpace(ch) { ch = s.next() } - return Token{WS, s.src[s.start:s.cursor]} + return s.emit(SPACE) } -func (s *Lexer) scanOperator(lastCh rune) Token { +func (s *Lexer) scanOperator(lastCh rune) *Token { s.start = s.cursor - ch := s.next() - for isOperator(ch) && !(lastCh == '=' && ch == '?') { + ch := s.next() // consume the first character + + // Check for json operators + switch lastCh { + case '-': + if ch == '>' { + ch = s.next() + if ch == '>' { + s.next() + return s.emit(JSON_OP) // ->> + } + return s.emit(JSON_OP) // -> + } + case '#': + if ch == '>' { + ch = s.next() + if ch == '>' { + s.next() + return s.emit(JSON_OP) // #>> + } + return s.emit(JSON_OP) // #> + } else if ch == '-' { + s.next() + return s.emit(JSON_OP) // #- + } + case '?': + if ch == '|' { + s.next() + return s.emit(JSON_OP) // ?| + } else if ch == '&' { + s.next() + return s.emit(JSON_OP) // ?& + } + case '<': + if ch == '@' { + s.next() + return s.emit(JSON_OP) // <@ + } + } + + for isOperator(ch) && !(lastCh == '=' && (ch == '?' || ch == '@')) { // hack: we don't want to treat "=?" as an single operator lastCh = ch ch = s.next() } - return Token{OPERATOR, s.src[s.start:s.cursor]} + + return s.emit(OPERATOR) } -func (s *Lexer) scanWildcard() Token { +func (s *Lexer) scanWildcard() *Token { s.start = s.cursor s.next() - return Token{WILDCARD, s.src[s.start:s.cursor]} + return s.emit(WILDCARD) } -func (s *Lexer) scanSingleLineComment() Token { +func (s *Lexer) scanSingleLineComment(ch rune) *Token { s.start = s.cursor - ch := s.nextBy(2) // consume the opening dashes + if ch == '#' { + ch = s.next() // consume the opening # + } else { + ch = s.nextBy(2) // consume the opening dashes + } for ch != '\n' && !isEOF(ch) { ch = s.next() } - return Token{COMMENT, s.src[s.start:s.cursor]} + return s.emit(COMMENT) } -func (s *Lexer) scanMultiLineComment() Token { +func (s *Lexer) scanMultiLineComment() *Token { s.start = s.cursor ch := s.nextBy(2) // consume the opening slash and asterisk for { @@ -404,20 +531,20 @@ func (s *Lexer) scanMultiLineComment() Token { if isEOF(ch) { // encountered EOF before closing comment // this usually happens when the comment is truncated - return Token{ERROR, s.src[s.start:s.cursor]} + return s.emit(ERROR) } ch = s.next() } - return Token{MULTILINE_COMMENT, s.src[s.start:s.cursor]} + return s.emit(MULTILINE_COMMENT) } -func (s *Lexer) scanPunctuation() Token { +func (s *Lexer) scanPunctuation() *Token { s.start = s.cursor s.next() - return Token{PUNCTUATION, s.src[s.start:s.cursor]} + return s.emit(PUNCTUATION) } -func (s *Lexer) scanDollarQuotedString() Token { +func (s *Lexer) scanDollarQuotedString() *Token { s.start = s.cursor ch := s.next() // consume the dollar sign tagStart := s.cursor @@ -432,16 +559,16 @@ func (s *Lexer) scanDollarQuotedString() Token { if s.matchAt([]rune(tag)) { s.nextBy(len(tag)) // consume the closing tag if tag == "$func$" { - return Token{DOLLAR_QUOTED_FUNCTION, s.src[s.start:s.cursor]} + return s.emit(DOLLAR_QUOTED_FUNCTION) } - return Token{DOLLAR_QUOTED_STRING, s.src[s.start:s.cursor]} + return s.emit(DOLLAR_QUOTED_STRING) } s.next() } - return Token{ERROR, s.src[s.start:s.cursor]} + return s.emit(ERROR) } -func (s *Lexer) scanPositionalParameter() Token { +func (s *Lexer) scanPositionalParameter() *Token { s.start = s.cursor ch := s.nextBy(2) // consume the dollar sign and the number for { @@ -450,10 +577,10 @@ func (s *Lexer) scanPositionalParameter() Token { } ch = s.next() } - return Token{POSITIONAL_PARAMETER, s.src[s.start:s.cursor]} + return s.emit(POSITIONAL_PARAMETER) } -func (s *Lexer) scanBindParameter() Token { +func (s *Lexer) scanBindParameter() *Token { s.start = s.cursor ch := s.nextBy(2) // consume the (colon|at sign) and the char for { @@ -462,24 +589,49 @@ func (s *Lexer) scanBindParameter() Token { } ch = s.next() } - return Token{BIND_PARAMETER, s.src[s.start:s.cursor]} + return s.emit(BIND_PARAMETER) } -func (s *Lexer) scanSystemVariable() Token { +func (s *Lexer) scanSystemVariable() *Token { s.start = s.cursor ch := s.nextBy(2) // consume @@ - for { - if !isAlphaNumeric(ch) { - break - } + // Must be followed by at least one alphanumeric character + if !isAlphaNumeric(ch) { + return s.emit(ERROR) + } + for isAlphaNumeric(ch) { ch = s.next() } - return Token{SYSTEM_VARIABLE, s.src[s.start:s.cursor]} + return s.emit(SYSTEM_VARIABLE) } -func (s *Lexer) scanUnknown() Token { +func (s *Lexer) scanUnknown() *Token { // When we see an unknown token, we advance the cursor until we see something that looks like a token boundary. s.start = s.cursor s.next() - return Token{UNKNOWN, s.src[s.start:s.cursor]} + return s.emit(UNKNOWN) +} + +// Modify emit function to use positions and maintain links +func (s *Lexer) emit(t TokenType) *Token { + tok := s.token + lastValueToken := tok.lastValueToken + + // Zero other fields + *tok = Token{ + Type: t, + Value: s.src[s.start:s.cursor], + isTableIndicator: s.isTableIndicator, + lastValueToken: lastValueToken, + } + + tok.hasDigits = s.hasDigits + tok.hasQuotes = s.hasQuotes + + // Reset lexer state + s.start = s.cursor + s.isTableIndicator = false + s.hasDigits = false + + return tok } diff --git a/vendor/github.com/DataDog/go-sqllexer/sqllexer_utils.go b/vendor/github.com/DataDog/go-sqllexer/sqllexer_utils.go index f694ce79..2d199a10 100644 --- a/vendor/github.com/DataDog/go-sqllexer/sqllexer_utils.go +++ b/vendor/github.com/DataDog/go-sqllexer/sqllexer_utils.go @@ -9,9 +9,12 @@ type DBMSType string const ( // DBMSSQLServer is a MS SQL - DBMSSQLServer DBMSType = "mssql" + DBMSSQLServer DBMSType = "mssql" + DBMSSQLServerAlias1 DBMSType = "sql-server" // .Net tracer + DBMSSQLServerAlias2 DBMSType = "sqlserver" // Java tracer // DBMSPostgres is a PostgreSQL Server - DBMSPostgres DBMSType = "postgresql" + DBMSPostgres DBMSType = "postgresql" + DBMSPostgresAlias1 DBMSType = "postgres" // Ruby, JavaScript tracers // DBMSMySQL is a MySQL Server DBMSMySQL DBMSType = "mysql" // DBMSOracle is a Oracle Server @@ -20,242 +23,327 @@ const ( DBMSSnowflake DBMSType = "snowflake" ) -var commands = map[string]bool{ - "SELECT": true, - "INSERT": true, - "UPDATE": true, - "DELETE": true, - "CREATE": true, - "ALTER": true, - "DROP": true, - "JOIN": true, - "GRANT": true, - "REVOKE": true, - "COMMIT": true, - "BEGIN": true, - "TRUNCATE": true, - "MERGE": true, - "EXECUTE": true, - "EXEC": true, - "EXPLAIN": true, - "STRAIGHT_JOIN": true, - "USE": true, - "CLONE": true, +var dbmsAliases = map[DBMSType]DBMSType{ + DBMSSQLServerAlias1: DBMSSQLServer, + DBMSSQLServerAlias2: DBMSSQLServer, + DBMSPostgresAlias1: DBMSPostgres, } -var tableIndicators = map[string]bool{ - "FROM": true, - "JOIN": true, - "INTO": true, - "UPDATE": true, - "TABLE": true, - "STRAIGHT_JOIN": true, // MySQL - "CLONE": true, // Snowflake +func getDBMSFromAlias(alias DBMSType) DBMSType { + if canonical, exists := dbmsAliases[alias]; exists { + return canonical + } + return alias } -var keywords = map[string]bool{ - "SELECT": true, - "INSERT": true, - "UPDATE": true, - "DELETE": true, - "CREATE": true, - "ALTER": true, - "DROP": true, - "GRANT": true, - "REVOKE": true, - "ADD": true, - "ALL": true, - "AND": true, - "ANY": true, - "AS": true, - "ASC": true, - "BEGIN": true, - "BETWEEN": true, - "BY": true, - "CASE": true, - "CHECK": true, - "COLUMN": true, - "COMMIT": true, - "CONSTRAINT": true, - "DATABASE": true, - "DECLARE": true, - "DEFAULT": true, - "DESC": true, - "DISTINCT": true, - "ELSE": true, - "END": true, - "EXEC": true, - "EXISTS": true, - "FOREIGN": true, - "FROM": true, - "GROUP": true, - "HAVING": true, - "IN": true, - "INDEX": true, - "INNER": true, - "INTO": true, - "IS": true, - "JOIN": true, - "KEY": true, - "LEFT": true, - "LIKE": true, - "LIMIT": true, - "NOT": true, - "ON": true, - "OR": true, - "ORDER": true, - "OUTER": true, - "PRIMARY": true, - "PROCEDURE": true, - "REPLACE": true, - "RETURNS": true, - "RIGHT": true, - "ROLLBACK": true, - "ROWNUM": true, - "SET": true, - "SOME": true, - "TABLE": true, - "TOP": true, - "TRUNCATE": true, - "UNION": true, - "UNIQUE": true, - "USE": true, - "VALUES": true, - "VIEW": true, - "WHERE": true, - "CUBE": true, - "ROLLUP": true, - "LITERAL": true, - "WINDOW": true, - "VACCUM": true, - "ANALYZE": true, - "ILIKE": true, - "USING": true, - "ASSERTION": true, - "DOMAIN": true, - "CLUSTER": true, - "COPY": true, - "EXPLAIN": true, - "PLPGSQL": true, - "TRIGGER": true, - "TEMPORARY": true, - "UNLOGGED": true, - "RECURSIVE": true, - "RETURNING": true, - "OFFSET": true, - "OF": true, - "SKIP": true, +var commands = []string{ + "SELECT", + "INSERT", + "UPDATE", + "DELETE", + "CREATE", + "ALTER", + "DROP", + "JOIN", + "GRANT", + "REVOKE", + "COMMIT", + "BEGIN", + "TRUNCATE", + "MERGE", + "EXECUTE", + "EXEC", + "EXPLAIN", + "STRAIGHT_JOIN", + "USE", + "CLONE", } -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +var tableIndicatorCommands = []string{ + "JOIN", + "UPDATE", + "STRAIGHT_JOIN", // MySQL + "CLONE", // Snowflake +} + +var tableIndicatorKeywords = []string{ + "FROM", + "INTO", + "TABLE", + "EXISTS", // Drop Table If Exists + "ONLY", // PostgreSQL +} + +var keywords = []string{ + "ADD", + "ALL", + "AND", + "ANY", + "ASC", + "BETWEEN", + "BY", + "CASE", + "CHECK", + "COLUMN", + "CONSTRAINT", + "DATABASE", + "DECLARE", + "DEFAULT", + "DESC", + "DISTINCT", + "ELSE", + "END", + "ESCAPE", + "EXISTS", + "FOREIGN", + "FROM", + "GROUP", + "HAVING", + "IN", + "INDEX", + "INNER", + "INTO", + "IS", + "KEY", + "LEFT", + "LIKE", + "LIMIT", + "NOT", + "ON", + "OR", + "ORDER", + "OUT", + "OUTER", + "PRIMARY", + "PROCEDURE", + "REPLACE", + "RETURNS", + "RIGHT", + "ROLLBACK", + "ROWNUM", + "SET", + "SOME", + "TABLE", + "TOP", + "UNION", + "UNIQUE", + "VALUES", + "VIEW", + "WHERE", + "CUBE", + "ROLLUP", + "LITERAL", + "WINDOW", + "VACCUM", + "ANALYZE", + "ILIKE", + "USING", + "ASSERTION", + "DOMAIN", + "CLUSTER", + "COPY", + "PLPGSQL", + "TRIGGER", + "TEMPORARY", + "UNLOGGED", + "RECURSIVE", + "RETURNING", + "OFFSET", + "OF", + "SKIP", + "IF", + "ONLY", +} + +var ( + // Pre-defined constants for common values + booleanValues = []string{ + "TRUE", + "FALSE", + } + + nullValues = []string{ + "NULL", + } + + procedureNames = []string{ + "PROCEDURE", + "PROC", + } + + ctes = []string{ + "WITH", + } + + alias = []string{ + "AS", + } +) + +// buildCombinedTrie combines all types of SQL keywords into a single trie +// This trie is used for efficient case-insensitive keyword matching during lexing +func buildCombinedTrie() *trieNode { + root := &trieNode{children: make(map[rune]*trieNode)} + + // Add all types of keywords + addToTrie(root, commands, COMMAND, false) + addToTrie(root, keywords, KEYWORD, false) + addToTrie(root, tableIndicatorCommands, COMMAND, true) + addToTrie(root, tableIndicatorKeywords, KEYWORD, true) + addToTrie(root, booleanValues, BOOLEAN, false) + addToTrie(root, nullValues, NULL, false) + addToTrie(root, procedureNames, PROC_INDICATOR, false) + addToTrie(root, ctes, CTE_INDICATOR, false) + addToTrie(root, alias, ALIAS_INDICATOR, false) + + return root +} + +func addToTrie(root *trieNode, words []string, tokenType TokenType, isTableIndicator bool) { + for _, word := range words { + node := root + // Convert to uppercase for case-insensitive matching + for _, ch := range strings.ToUpper(word) { + if next, exists := node.children[ch]; exists { + node = next + } else { + next = &trieNode{children: make(map[rune]*trieNode)} + node.children[ch] = next + node = next + } + } + node.isEnd = true + node.tokenType = tokenType + node.isTableIndicator = isTableIndicator + } +} + +var keywordRoot = buildCombinedTrie() + +// TODO: Optimize these functions to work with rune positions instead of string operations +// They are currently used by obfuscator and normalizer, which we'll optimize later +func replaceDigits(token *Token, placeholder string) string { + var replacedToken strings.Builder + replacedToken.Grow(len(token.Value)) + + var lastWasDigit bool + for _, r := range token.Value { + if isDigit(r) { + if !lastWasDigit { + replacedToken.WriteString(placeholder) + lastWasDigit = true + } + } else { + replacedToken.WriteRune(r) + lastWasDigit = false + } + } + + return replacedToken.String() } +func trimQuotes(token *Token) string { + var trimmedToken strings.Builder + trimmedToken.Grow(len(token.Value)) + + for _, r := range token.Value { + if isDoubleQuote(r) || r == '[' || r == ']' || r == '`' { + // trimmedToken.WriteString(placeholder) + } else { + trimmedToken.WriteRune(r) + } + } + token.hasQuotes = false + return trimmedToken.String() +} + +// isDigit checks if a rune is a digit (0-9) func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' + return ch >= '0' && ch <= '9' +} + +// isLeadingDigit checks if a rune is + or - +func isLeadingSign(ch rune) bool { + return ch == '+' || ch == '-' } +// isExponent checks if a rune is an exponent (e or E) func isExpontent(ch rune) bool { return ch == 'e' || ch == 'E' } -func isLeadingSign(ch rune) bool { - return ch == '+' || ch == '-' +// isSpace checks if a rune is a space or newline +func isSpace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } +// isAsciiLetter checks if a rune is an ASCII letter (a-z or A-Z) +func isAsciiLetter(ch rune) bool { + return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') +} + +// isLetter checks if a rune is an ASCII letter (a-z or A-Z) or unicode letter func isLetter(ch rune) bool { - return unicode.IsLetter(ch) || ch == '_' + return isAsciiLetter(ch) || ch == '_' || + (ch > 127 && unicode.IsLetter(ch)) } +// isAlphaNumeric checks if a rune is an ASCII letter (a-z or A-Z), digit (0-9), or unicode number func isAlphaNumeric(ch rune) bool { - return isLetter(ch) || isDigit(ch) + return isLetter(ch) || isDigit(ch) || + (ch > 127 && unicode.IsNumber(ch)) } +// isDoubleQuote checks if a rune is a double quote (") func isDoubleQuote(ch rune) bool { return ch == '"' } +// isSingleQuote checks if a rune is a single quote (') func isSingleQuote(ch rune) bool { return ch == '\'' } +// isOperator checks if a rune is an operator func isOperator(ch rune) bool { - return ch == '+' || ch == '-' || ch == '*' || ch == '/' || ch == '=' || ch == '<' || ch == '>' || ch == '!' || ch == '&' || ch == '|' || ch == '^' || ch == '%' || ch == '~' || ch == '?' || ch == '@' || ch == ':' || ch == '#' + return ch == '+' || ch == '-' || ch == '*' || ch == '/' || ch == '=' || ch == '<' || ch == '>' || + ch == '!' || ch == '&' || ch == '|' || ch == '^' || ch == '%' || ch == '~' || ch == '?' || + ch == '@' || ch == ':' || ch == '#' } +// isWildcard checks if a rune is a wildcard (*) func isWildcard(ch rune) bool { return ch == '*' } +// isSinglelineComment checks if two runes are a single line comment (--) func isSingleLineComment(ch rune, nextCh rune) bool { return ch == '-' && nextCh == '-' } +// isMultiLineComment checks if two runes are a multi line comment (/*) func isMultiLineComment(ch rune, nextCh rune) bool { return ch == '/' && nextCh == '*' } +// isPunctuation checks if a rune is a punctuation character func isPunctuation(ch rune) bool { - return ch == '(' || ch == ')' || ch == ',' || ch == ';' || ch == '.' || ch == ':' || ch == '[' || ch == ']' || ch == '{' || ch == '}' + return ch == '(' || ch == ')' || ch == ',' || ch == ';' || ch == '.' || ch == ':' || + ch == '[' || ch == ']' || ch == '{' || ch == '}' } +// isEOF checks if a rune is EOF (end of file) func isEOF(ch rune) bool { return ch == 0 } -func isCommand(ident string) bool { - _, ok := commands[ident] - return ok -} - -func isTableIndicator(ident string) bool { - _, ok := tableIndicators[ident] - return ok -} - -func isSQLKeyword(token *Token) bool { - if token.Type != IDENT { - return false - } - _, ok := keywords[strings.ToUpper(token.Value)] - return ok -} - -func isProcedure(token *Token) bool { - if token.Type != IDENT { - return false - } - return strings.ToUpper(token.Value) == "PROCEDURE" || strings.ToUpper(token.Value) == "PROC" -} - -func isBoolean(ident string) bool { - return strings.ToUpper(ident) == "TRUE" || strings.ToUpper(ident) == "FALSE" -} - -func isNull(ident string) bool { - return strings.ToUpper(ident) == "NULL" -} - -func replaceDigits(input string, placeholder string) string { - var builder strings.Builder - - i := 0 - for i < len(input) { - if isDigit(rune(input[i])) { - builder.WriteString(placeholder) - for i < len(input) && isDigit(rune(input[i])) { - i++ - } - } else { - builder.WriteByte(input[i]) - i++ - } - } - - return builder.String() +// isIdentifier checks if a rune is an identifier +func isIdentifier(ch rune) bool { + return ch == '.' || ch == '?' || ch == '$' || ch == '#' || ch == '/' || ch == '@' || ch == '!' || isLetter(ch) || isDigit(ch) } -func trimQuotes(input string, delim string, closingDelim string) string { - replacer := strings.NewReplacer(delim, "", closingDelim, "") - return replacer.Replace(input) +// isValueToken checks if a token is a value token +// A value token is a token that is not a space, comment, or EOF +func isValueToken(token *Token) bool { + return token.Type != EOF && token.Type != SPACE && token.Type != COMMENT && token.Type != MULTILINE_COMMENT } diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/LICENSE b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/attributes.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/attributes.go new file mode 100644 index 00000000..133cf7b8 --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/attributes.go @@ -0,0 +1,304 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributes + +import ( + "fmt" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + semconv127 "go.opentelemetry.io/collector/semconv/v1.27.0" + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +// customContainerTagPrefix defines the prefix for custom container tags. +const customContainerTagPrefix = "datadog.container.tag." + +var ( + // coreMapping defines the mapping between OpenTelemetry semantic conventions + // and Datadog Agent conventions for env, service and version. + coreMapping = map[string]string{ + // Datadog conventions + // https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging/ + conventions.AttributeDeploymentEnvironment: "env", + semconv127.AttributeServiceName: "service", + semconv127.AttributeServiceVersion: "version", + semconv127.AttributeDeploymentEnvironmentName: "env", + } + + // ContainerMappings defines the mapping between OpenTelemetry semantic conventions + // and Datadog Agent conventions for containers. + ContainerMappings = map[string]string{ + // Containers + semconv127.AttributeContainerID: "container_id", + semconv127.AttributeContainerName: "container_name", + semconv127.AttributeContainerImageName: "image_name", + conventions.AttributeContainerImageTag: "image_tag", + semconv127.AttributeContainerRuntime: "runtime", + + // Cloud conventions + // https://www.datadoghq.com/blog/tagging-best-practices/ + semconv127.AttributeCloudProvider: "cloud_provider", + semconv127.AttributeCloudRegion: "region", + semconv127.AttributeCloudAvailabilityZone: "zone", + + // ECS conventions + // https://github.com/DataDog/datadog-agent/blob/e081bed/pkg/tagger/collectors/ecs_extract.go + semconv127.AttributeAWSECSTaskFamily: "task_family", + semconv127.AttributeAWSECSTaskARN: "task_arn", + semconv127.AttributeAWSECSClusterARN: "ecs_cluster_name", + semconv127.AttributeAWSECSTaskRevision: "task_version", + semconv127.AttributeAWSECSContainerARN: "ecs_container_name", + + // Kubernetes resource name (via semantic conventions) + // https://github.com/DataDog/datadog-agent/blob/e081bed/pkg/util/kubernetes/const.go + semconv127.AttributeK8SContainerName: "kube_container_name", + semconv127.AttributeK8SClusterName: "kube_cluster_name", + semconv127.AttributeK8SDeploymentName: "kube_deployment", + semconv127.AttributeK8SReplicaSetName: "kube_replica_set", + semconv127.AttributeK8SStatefulSetName: "kube_stateful_set", + semconv127.AttributeK8SDaemonSetName: "kube_daemon_set", + semconv127.AttributeK8SJobName: "kube_job", + semconv127.AttributeK8SCronJobName: "kube_cronjob", + semconv127.AttributeK8SNamespaceName: "kube_namespace", + semconv127.AttributeK8SPodName: "pod_name", + } + + // Kubernetes mappings defines the mapping between Kubernetes conventions (both general and Datadog specific) + // and Datadog Agent conventions. The Datadog Agent conventions can be found at + // https://github.com/DataDog/datadog-agent/blob/e081bed/pkg/tagger/collectors/const.go and + // https://github.com/DataDog/datadog-agent/blob/e081bed/pkg/util/kubernetes/const.go + kubernetesMapping = map[string]string{ + // Standard Datadog labels + "tags.datadoghq.com/env": "env", + "tags.datadoghq.com/service": "service", + "tags.datadoghq.com/version": "version", + + // Standard Kubernetes labels + "app.kubernetes.io/name": "kube_app_name", + "app.kubernetes.io/instance": "kube_app_instance", + "app.kubernetes.io/version": "kube_app_version", + "app.kuberenetes.io/component": "kube_app_component", + "app.kubernetes.io/part-of": "kube_app_part_of", + "app.kubernetes.io/managed-by": "kube_app_managed_by", + } + + // Kubernetes out of the box Datadog tags + // https://docs.datadoghq.com/containers/kubernetes/tag/?tab=containerizedagent#out-of-the-box-tags + // https://github.com/DataDog/datadog-agent/blob/d33d042d6786e8b85f72bb627fbf06ad8a658031/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go + // Note: if any OTel semantics happen to overlap with these tag names, they will also be added as Datadog tags. + kubernetesDDTags = map[string]struct{}{ + "architecture": {}, + "availability-zone": {}, + "chronos_job": {}, + "chronos_job_owner": {}, + "cluster_name": {}, + "container_id": {}, + "container_name": {}, + "dd_remote_config_id": {}, + "dd_remote_config_rev": {}, + "display_container_name": {}, + "docker_image": {}, + "ecs_cluster_name": {}, + "ecs_container_name": {}, + "eks_fargate_node": {}, + "env": {}, + "git.commit.sha": {}, + "git.repository_url": {}, + "image_id": {}, + "image_name": {}, + "image_tag": {}, + "kube_app_component": {}, + "kube_app_instance": {}, + "kube_app_managed_by": {}, + "kube_app_name": {}, + "kube_app_part_of": {}, + "kube_app_version": {}, + "kube_container_name": {}, + "kube_cronjob": {}, + "kube_daemon_set": {}, + "kube_deployment": {}, + "kube_job": {}, + "kube_namespace": {}, + "kube_ownerref_kind": {}, + "kube_ownerref_name": {}, + "kube_priority_class": {}, + "kube_qos": {}, + "kube_replica_set": {}, + "kube_replication_controller": {}, + "kube_service": {}, + "kube_stateful_set": {}, + "language": {}, + "marathon_app": {}, + "mesos_task": {}, + "nomad_dc": {}, + "nomad_group": {}, + "nomad_job": {}, + "nomad_namespace": {}, + "nomad_task": {}, + "oshift_deployment": {}, + "oshift_deployment_config": {}, + "os_name": {}, + "os_version": {}, + "persistentvolumeclaim": {}, + "pod_name": {}, + "pod_phase": {}, + "rancher_container": {}, + "rancher_service": {}, + "rancher_stack": {}, + "region": {}, + "service": {}, + "short_image": {}, + "swarm_namespace": {}, + "swarm_service": {}, + "task_name": {}, + "task_family": {}, + "task_version": {}, + "task_arn": {}, + "version": {}, + } + + // HTTPMappings defines the mapping between OpenTelemetry semantic conventions + // and Datadog Agent conventions for HTTP attributes. + HTTPMappings = map[string]string{ + semconv127.AttributeClientAddress: "http.client_ip", + semconv127.AttributeHTTPResponseBodySize: "http.response.content_length", + semconv127.AttributeHTTPResponseStatusCode: "http.status_code", + semconv127.AttributeHTTPRequestBodySize: "http.request.content_length", + "http.request.header.referrer": "http.referrer", + semconv127.AttributeHTTPRequestMethod: "http.method", + semconv127.AttributeHTTPRoute: "http.route", + semconv127.AttributeNetworkProtocolVersion: "http.version", + semconv127.AttributeServerAddress: "http.server_name", + semconv127.AttributeURLFull: "http.url", + semconv127.AttributeUserAgentOriginal: "http.useragent", + } +) + +// TagsFromAttributes converts a selected list of attributes +// to a tag list that can be added to metrics. +func TagsFromAttributes(attrs pcommon.Map) []string { + tags := make([]string, 0, attrs.Len()) + + var processAttributes processAttributes + var systemAttributes systemAttributes + + attrs.Range(func(key string, value pcommon.Value) bool { + switch key { + // Process attributes + case semconv127.AttributeProcessExecutableName: + processAttributes.ExecutableName = value.Str() + case semconv127.AttributeProcessExecutablePath: + processAttributes.ExecutablePath = value.Str() + case semconv127.AttributeProcessCommand: + processAttributes.Command = value.Str() + case semconv127.AttributeProcessCommandLine: + processAttributes.CommandLine = value.Str() + case semconv127.AttributeProcessPID: + processAttributes.PID = value.Int() + case semconv127.AttributeProcessOwner: + processAttributes.Owner = value.Str() + + // System attributes + case semconv127.AttributeOSType: + systemAttributes.OSType = value.Str() + } + + // core attributes mapping + if datadogKey, found := coreMapping[key]; found && value.Str() != "" { + tags = append(tags, fmt.Sprintf("%s:%s", datadogKey, value.Str())) + } + + // Kubernetes labels mapping + if datadogKey, found := kubernetesMapping[key]; found && value.Str() != "" { + tags = append(tags, fmt.Sprintf("%s:%s", datadogKey, value.Str())) + } + + // Kubernetes DD tags + if _, found := kubernetesDDTags[key]; found { + tags = append(tags, fmt.Sprintf("%s:%s", key, value.Str())) + } + return true + }) + + // Container Tag mappings + ctags := ContainerTagsFromResourceAttributes(attrs) + for key, val := range ctags { + tags = append(tags, fmt.Sprintf("%s:%s", key, val)) + } + + tags = append(tags, processAttributes.extractTags()...) + tags = append(tags, systemAttributes.extractTags()...) + + return tags +} + +// OriginIDFromAttributes gets the origin IDs from resource attributes. +// If not found, an empty string is returned for each of them. +func OriginIDFromAttributes(attrs pcommon.Map) (originID string) { + // originID is always empty. Container ID is preferred over Kubernetes pod UID. + // Prefixes come from pkg/util/kubernetes/kubelet and pkg/util/containers. + if containerID, ok := attrs.Get(conventions.AttributeContainerID); ok { + originID = "container_id://" + containerID.AsString() + } else if podUID, ok := attrs.Get(conventions.AttributeK8SPodUID); ok { + originID = "kubernetes_pod_uid://" + podUID.AsString() + } + return +} + +// ContainerTagFromResourceAttributes extracts container tags from the given +// set of resource attributes. Container tags are extracted via semantic +// conventions. Customer container tags are extracted via resource attributes +// prefixed by datadog.container.tag. Custom container tag values of a different type +// than ValueTypeStr will be ignored. +// In the case of duplicates between semantic conventions and custom resource attributes +// (e.g. container.id, datadog.container.tag.container_id) the semantic convention takes +// precedence. +func ContainerTagsFromResourceAttributes(attrs pcommon.Map) map[string]string { + ddtags := make(map[string]string) + attrs.Range(func(key string, value pcommon.Value) bool { + // Semantic Conventions + if datadogKey, found := ContainerMappings[key]; found && value.Str() != "" { + ddtags[datadogKey] = value.Str() + } + // Custom (datadog.container.tag namespace) + if strings.HasPrefix(key, customContainerTagPrefix) { + customKey := strings.TrimPrefix(key, customContainerTagPrefix) + if customKey != "" && value.Str() != "" { + // Do not replace if set via semantic conventions mappings. + if _, found := ddtags[customKey]; !found { + ddtags[customKey] = value.Str() + } + } + } + return true + }) + return ddtags +} + +// ContainerTagFromAttributes extracts the value of _dd.tags.container from the given +// set of attributes. +// Deprecated: Deprecated in favor of ContainerTagFromResourceAttributes. +func ContainerTagFromAttributes(attr map[string]string) map[string]string { + ddtags := make(map[string]string) + for key, val := range attr { + datadogKey, found := ContainerMappings[key] + if !found { + continue + } + ddtags[datadogKey] = val + } + return ddtags +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/azure/azure.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/azure/azure.go new file mode 100644 index 00000000..1b52838e --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/azure/azure.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +const ( + // AttributeResourceGroupName is the Azure resource group name attribute + AttributeResourceGroupName = "azure.resourcegroup.name" +) + +// HostInfo has the Azure host information +type HostInfo struct { + HostAliases []string +} + +// HostnameFromAttrs gets the Azure hostname from attributes. +func HostnameFromAttrs(attrs pcommon.Map) (string, bool) { + if vmID, ok := attrs.Get(conventions.AttributeHostID); ok { + return vmID.Str(), true + } + + if hostname, ok := attrs.Get(conventions.AttributeHostName); ok { + return hostname.Str(), true + } + + return "", false +} + +// ClusterNameFromAttributes gets the Azure cluster name from attributes +func ClusterNameFromAttributes(attrs pcommon.Map) (string, bool) { + // Get cluster name from resource group from pkg/util/cloudprovider/azure:GetClusterName + if resourceGroup, ok := attrs.Get(AttributeResourceGroupName); ok { + splitAll := strings.Split(resourceGroup.Str(), "_") + if len(splitAll) < 4 || strings.ToLower(splitAll[0]) != "mc" { + return "", false // Failed to parse + } + return splitAll[len(splitAll)-2], true + } + + return "", false +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/ec2/ec2.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/ec2/ec2.go new file mode 100644 index 00000000..1aa80649 --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/ec2/ec2.go @@ -0,0 +1,95 @@ +// Copyright OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ec2 + +import ( + "fmt" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +var ( + defaultPrefixes = [3]string{"ip-", "domu", "ec2amaz-"} + ec2TagPrefix = "ec2.tag." + clusterTagPrefix = ec2TagPrefix + "kubernetes.io/cluster/" +) + +// HostInfo holds the EC2 host information. +type HostInfo struct { + InstanceID string + EC2Hostname string + EC2Tags []string +} + +// isDefaultHostname checks if a hostname is an EC2 default +func isDefaultHostname(hostname string) bool { + for _, val := range defaultPrefixes { + if strings.HasPrefix(hostname, val) { + return true + } + } + + return false +} + +// HostnameFromAttrs gets a valid hostname from labels +// if available +func HostnameFromAttrs(attrs pcommon.Map) (string, bool) { + if hostID, ok := attrs.Get(conventions.AttributeHostID); ok { + return hostID.Str(), true + } + + return "", false +} + +// HostInfoFromAttributes gets EC2 host info from attributes following +// OpenTelemetry semantic conventions +func HostInfoFromAttributes(attrs pcommon.Map) (hostInfo *HostInfo) { + hostInfo = &HostInfo{} + + if hostID, ok := attrs.Get(conventions.AttributeHostID); ok { + hostInfo.InstanceID = hostID.Str() + } + + if hostName, ok := attrs.Get(conventions.AttributeHostName); ok { + hostInfo.EC2Hostname = hostName.Str() + } + + attrs.Range(func(k string, v pcommon.Value) bool { + if strings.HasPrefix(k, ec2TagPrefix) { + tag := fmt.Sprintf("%s:%s", strings.TrimPrefix(k, ec2TagPrefix), v.Str()) + hostInfo.EC2Tags = append(hostInfo.EC2Tags, tag) + } + return true + }) + + return +} + +// ClusterNameFromAttributes gets the AWS cluster name from attributes +func ClusterNameFromAttributes(attrs pcommon.Map) (clusterName string, ok bool) { + // Get cluster name from tag keys + // https://github.com/DataDog/datadog-agent/blob/1c94b11/pkg/util/ec2/ec2.go#L238 + attrs.Range(func(k string, _ pcommon.Value) bool { + if strings.HasPrefix(k, clusterTagPrefix) { + clusterName = strings.Split(k, "/")[2] + ok = true + } + return true + }) + return +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/gateway_usage.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/gateway_usage.go new file mode 100644 index 00000000..27d965f8 --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/gateway_usage.go @@ -0,0 +1,59 @@ +// Copyright OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributes + +import "sync" + +// GatewayUsage is a HostFromAttributesHandler that detects if the setup is a gateway. +// If two attributes have different hostnames, then we consider the setup is a gateway. +type GatewayUsage struct { + firstHostname string + gatewayUsage bool + m sync.Mutex +} + +var _ HostFromAttributesHandler = (*GatewayUsage)(nil) + +// NewGatewayUsage returns a new GatewayUsage. +// If two attributes have different hostnames, then we consider the setup is a gateway. +func NewGatewayUsage() *GatewayUsage { + return &GatewayUsage{} +} + +// OnHost implements HostFromAttributesHandler. +func (g *GatewayUsage) OnHost(host string) { + g.m.Lock() + defer g.m.Unlock() + if g.firstHostname == "" { + g.firstHostname = host + } else if g.firstHostname != host { + g.gatewayUsage = true + } +} + +// GatewayUsage returns true if the GatewayUsage was detected. +func (g *GatewayUsage) GatewayUsage() bool { + g.m.Lock() + defer g.m.Unlock() + return g.gatewayUsage +} + +// Gauge returns 1 if the GatewayUsage was detected, 0 otherwise. +func (g *GatewayUsage) Gauge() float64 { + if g.GatewayUsage() { + return 1 + } + return 0 +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/gcp/gcp.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/gcp/gcp.go new file mode 100644 index 00000000..81b9cdfd --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/gcp/gcp.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "fmt" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +// HostInfo holds the GCP host information. +type HostInfo struct { + HostAliases []string + GCPTags []string +} + +// HostnameFromAttrs gets the GCP Integration hostname from attributes +// if available. +func HostnameFromAttrs(attrs pcommon.Map) (string, bool) { + hostName, ok := attrs.Get(conventions.AttributeHostName) + if !ok { + // We need the hostname. + return "", false + } + + name := hostName.Str() + if strings.Count(name, ".") >= 3 { + // Unless the host.name attribute has been tampered with, use the same logic as the Agent to + // extract the hostname: https://github.com/DataDog/datadog-agent/blob/7.36.0/pkg/util/cloudproviders/gce/gce.go#L106 + name = strings.SplitN(name, ".", 2)[0] + } + + cloudAccount, ok := attrs.Get(conventions.AttributeCloudAccountID) + if !ok { + // We need the project ID. + return "", false + } + + alias := fmt.Sprintf("%s.%s", name, cloudAccount.Str()) + return alias, true +} + +// HostInfoFromAttrs gets GCP host info from attributes following +// OpenTelemetry semantic conventions +func HostInfoFromAttrs(attrs pcommon.Map) (hostInfo *HostInfo) { + hostInfo = &HostInfo{} + + if hostID, ok := attrs.Get(conventions.AttributeHostID); ok { + hostInfo.GCPTags = append(hostInfo.GCPTags, fmt.Sprintf("instance-id:%s", hostID.Str())) + } + + if cloudZone, ok := attrs.Get(conventions.AttributeCloudAvailabilityZone); ok { + hostInfo.GCPTags = append(hostInfo.GCPTags, fmt.Sprintf("zone:%s", cloudZone.Str())) + } + + if hostType, ok := attrs.Get(conventions.AttributeHostType); ok { + hostInfo.GCPTags = append(hostInfo.GCPTags, fmt.Sprintf("instance-type:%s", hostType.Str())) + } + + if cloudAccount, ok := attrs.Get(conventions.AttributeCloudAccountID); ok { + hostInfo.GCPTags = append(hostInfo.GCPTags, fmt.Sprintf("project:%s", cloudAccount.Str())) + } + + return +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/process.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/process.go new file mode 100644 index 00000000..29378e78 --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/process.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributes + +import ( + "fmt" + + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +type processAttributes struct { + ExecutableName string + ExecutablePath string + Command string + CommandLine string + PID int64 + Owner string +} + +func (pattrs *processAttributes) extractTags() []string { + tags := make([]string, 0, 1) + + // According to OTel conventions: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/process.md, + // a process can be defined by any of the 4 following attributes: process.executable.name, process.executable.path, process.command or process.command_line + // (process.command_args isn't in the current attribute conventions: https://github.com/open-telemetry/opentelemetry-collector/blob/ecb27f49d4e26ae42d82e6ea18d57b08e252452d/model/semconv/opentelemetry.go#L58-L63) + // We go through them, and add the first available one as a tag to identify the process. + // We don't want to add all of them to avoid unnecessarily increasing the number of tags attached to a metric. + + // TODO: check if this order should be changed. + switch { + case pattrs.ExecutableName != "": // otelcol + tags = append(tags, fmt.Sprintf("%s:%s", conventions.AttributeProcessExecutableName, pattrs.ExecutableName)) + case pattrs.ExecutablePath != "": // /usr/bin/cmd/otelcol + tags = append(tags, fmt.Sprintf("%s:%s", conventions.AttributeProcessExecutablePath, pattrs.ExecutablePath)) + case pattrs.Command != "": // cmd/otelcol + tags = append(tags, fmt.Sprintf("%s:%s", conventions.AttributeProcessCommand, pattrs.Command)) + case pattrs.CommandLine != "": // cmd/otelcol --config="/path/to/config.yaml" + tags = append(tags, fmt.Sprintf("%s:%s", conventions.AttributeProcessCommandLine, pattrs.CommandLine)) + } + + // For now, we don't care about the process ID nor the process owner. + + return tags +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source.go new file mode 100644 index 00000000..c145975f --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source.go @@ -0,0 +1,168 @@ +// Copyright OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributes + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/azure" + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/ec2" + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/gcp" + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source" +) + +const ( + // AttributeDatadogHostname the datadog host name attribute + AttributeDatadogHostname = "datadog.host.name" + // AttributeK8sNodeName the datadog k8s node name attribute + AttributeK8sNodeName = "k8s.node.name" + // Attribute host is a literal host tag. + // We check for this to avoid double tagging. + AttributeHost = "host" +) + +func getClusterName(attrs pcommon.Map) (string, bool) { + if k8sClusterName, ok := attrs.Get(conventions.AttributeK8SClusterName); ok { + return k8sClusterName.Str(), true + } + + cloudProvider, ok := attrs.Get(conventions.AttributeCloudProvider) + if ok && cloudProvider.Str() == conventions.AttributeCloudProviderAzure { + return azure.ClusterNameFromAttributes(attrs) + } else if ok && cloudProvider.Str() == conventions.AttributeCloudProviderAWS { + return ec2.ClusterNameFromAttributes(attrs) + } + + return "", false +} + +// hostnameFromAttributes tries to get a valid hostname from attributes by checking, in order: +// +// 1. the "host" attribute to avoid double tagging if present. +// +// 2. a custom Datadog hostname provided by the "datadog.host.name" attribute +// +// 3. cloud provider specific hostname for AWS, Azure or GCP, +// +// 4. the Kubernetes node name (and cluster name if available), +// +// 5. the cloud provider host ID and +// +// 6. the host.name attribute. +// +// It returns a boolean value indicated if any name was found +func hostnameFromAttributes(attrs pcommon.Map) (string, bool) { + // Check if the host is localhost or 0.0.0.0, if so discard it. + // We don't do the more strict validation done for metadata, + // to avoid breaking users existing invalid-but-accepted hostnames. + var invalidHosts = map[string]struct{}{ + "0.0.0.0": {}, + "127.0.0.1": {}, + "localhost": {}, + "localhost.localdomain": {}, + "localhost6.localdomain6": {}, + "ip6-localhost": {}, + } + + candidateHost, ok := unsanitizedHostnameFromAttributes(attrs) + if _, invalid := invalidHosts[candidateHost]; invalid { + return "", false + } + return candidateHost, ok +} + +func k8sHostnameFromAttributes(attrs pcommon.Map) (string, bool) { + node, ok := attrs.Get(AttributeK8sNodeName) + if !ok { + return "", false + } + + if cluster, ok := getClusterName(attrs); ok { + return node.Str() + "-" + cluster, true + } + return node.Str(), true +} + +func unsanitizedHostnameFromAttributes(attrs pcommon.Map) (string, bool) { + // Literal 'host' tag. Check and use to avoid double tagging. + if literalHost, ok := attrs.Get(AttributeHost); ok { + // Use even if not a string, so that we avoid double tagging if + // `resource_attributes_as_tags` is true and 'host' has a non-string value. + return literalHost.AsString(), true + } + + // Custom hostname: useful for overriding in k8s/cloud envs + if customHostname, ok := attrs.Get(AttributeDatadogHostname); ok { + return customHostname.Str(), true + } + + if launchType, ok := attrs.Get(conventions.AttributeAWSECSLaunchtype); ok && launchType.Str() == conventions.AttributeAWSECSLaunchtypeFargate { + // If on AWS ECS Fargate, we don't have a hostname + return "", false + } + + cloudProvider, ok := attrs.Get(conventions.AttributeCloudProvider) + switch { + case ok && cloudProvider.Str() == conventions.AttributeCloudProviderAWS: + return ec2.HostnameFromAttrs(attrs) + case ok && cloudProvider.Str() == conventions.AttributeCloudProviderGCP: + return gcp.HostnameFromAttrs(attrs) + case ok && cloudProvider.Str() == conventions.AttributeCloudProviderAzure: + return azure.HostnameFromAttrs(attrs) + } + + // Kubernetes: node-cluster if cluster name is available, else node + k8sName, k8sOk := k8sHostnameFromAttributes(attrs) + if k8sOk { + return k8sName, true + } + + // host id from cloud provider + if hostID, ok := attrs.Get(conventions.AttributeHostID); ok { + return hostID.Str(), true + } + + // hostname from cloud provider or OS + if hostName, ok := attrs.Get(conventions.AttributeHostName); ok { + return hostName.Str(), true + } + + return "", false +} + +// HostFromAttributesHandler calls OnHost when a hostname is extracted from attributes. +type HostFromAttributesHandler interface { + OnHost(string) +} + +// SourceFromAttrs gets a telemetry signal source from its attributes. +// Deprecated: Use Translator.ResourceToSource or Translator.AttributesToSource instead. +func SourceFromAttrs(attrs pcommon.Map, hostFromAttributesHandler HostFromAttributesHandler) (source.Source, bool) { + if launchType, ok := attrs.Get(conventions.AttributeAWSECSLaunchtype); ok && launchType.Str() == conventions.AttributeAWSECSLaunchtypeFargate { + if taskARN, ok := attrs.Get(conventions.AttributeAWSECSTaskARN); ok { + return source.Source{Kind: source.AWSECSFargateKind, Identifier: taskARN.Str()}, true + } + } + + if host, ok := hostnameFromAttributes(attrs); ok { + if hostFromAttributesHandler != nil { + hostFromAttributesHandler.OnHost(host) + } + return source.Source{Kind: source.HostnameKind, Identifier: host}, true + } + + return source.Source{}, false +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source/source_provider.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source/source_provider.go new file mode 100644 index 00000000..4f686f9b --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source/source_provider.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package source + +import ( + "context" + "fmt" +) + +// Kind of source +type Kind string + +const ( + // InvalidKind is an invalid kind. It is the zero value of Kind. + InvalidKind Kind = "" + // HostnameKind is a host source. + HostnameKind Kind = "host" + // AWSECSFargateKind is a serverless source on AWS ECS Fargate. + AWSECSFargateKind Kind = "task_arn" +) + +// Source represents a telemetry source. +type Source struct { + // Kind of source (serverless v. host). + Kind Kind + // Identifier that uniquely determines the source. + Identifier string +} + +// Tag associated to a source. +func (s *Source) Tag() string { + return fmt.Sprintf("%s:%s", s.Kind, s.Identifier) +} + +// Provider identifies a source. +type Provider interface { + // Source gets the source from the current context. + Source(ctx context.Context) (Source, error) +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/system.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/system.go new file mode 100644 index 00000000..01b45b9d --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/system.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributes + +import ( + "fmt" + + conventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +type systemAttributes struct { + OSType string +} + +func (sattrs *systemAttributes) extractTags() []string { + tags := make([]string, 0, 1) + + // Add OS type, eg. WINDOWS, LINUX, etc. + if sattrs.OSType != "" { + tags = append(tags, fmt.Sprintf("%s:%s", conventions.AttributeOSType, sattrs.OSType)) + } + + return tags +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/translator.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/translator.go new file mode 100644 index 00000000..e10cda81 --- /dev/null +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/translator.go @@ -0,0 +1,71 @@ +// Copyright OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attributes + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source" +) + +const missingSourceMetricName string = "datadog.otlp_translator.resources.missing_source" + +// Translator of attributes. +type Translator struct { + missingSources metric.Int64Counter +} + +// NewTranslator returns a new attributes translator. +func NewTranslator(set component.TelemetrySettings) (*Translator, error) { + meter := set.MeterProvider.Meter("github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes") + missingSources, err := meter.Int64Counter( + missingSourceMetricName, + metric.WithDescription("OTLP resources that are missing a source (e.g. hostname)"), + metric.WithUnit("{resource}"), + ) + if err != nil { + return nil, fmt.Errorf("failed to build missing source counter: %w", err) + } + + return &Translator{ + missingSources: missingSources, + }, nil +} + +// ResourceToSource gets a telemetry signal source from its resource attributes. +func (p *Translator) ResourceToSource(ctx context.Context, res pcommon.Resource, set attribute.Set, hostFromAttributesHandler HostFromAttributesHandler) (source.Source, bool) { + src, ok := SourceFromAttrs(res.Attributes(), hostFromAttributesHandler) + if !ok { + p.missingSources.Add(ctx, 1, metric.WithAttributeSet(set)) + } + + return src, ok +} + +// AttributesToSource gets a telemetry signal source from a set of attributes. +// As opposed to ResourceToSource, this does not keep track of failed requests. +// +// NOTE: This method SHOULD NOT generally be used: it is only used in the logs implementation +// because of a fallback logic that will be removed. The attributes detected are resource attributes, +// not attributes from a telemetry signal. +func (p *Translator) AttributesToSource(_ context.Context, attrs pcommon.Map) (source.Source, bool) { + return SourceFromAttrs(attrs, nil) +} diff --git a/vendor/github.com/DataDog/sketches-go/LICENSE b/vendor/github.com/DataDog/sketches-go/LICENSE index 7d3693be..7882f2d9 100644 --- a/vendor/github.com/DataDog/sketches-go/LICENSE +++ b/vendor/github.com/DataDog/sketches-go/LICENSE @@ -1,13 +1,200 @@ -Copyright 2021 DataDog, Inc. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - http://www.apache.org/licenses/LICENSE-2.0 + 1. Definitions. -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go b/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go index 33a0ea5b..10445f05 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go @@ -173,7 +173,13 @@ func (s *DDSketch) GetValueAtQuantile(quantile float64) (float64, error) { return math.NaN(), errEmptySketch } - rank := quantile * (count - 1) + // Use an explicit floating point conversion (as per Go specification) to make sure that no + // "fused multiply and add" (FMA) operation is used in the following code subtracting values + // from `rank`. Not doing so can lead to inconsistent rounding and return value for this + // function, depending on the architecture and whether FMA operations are used or not by the + // compiler. + rank := float64(quantile * (count - 1)) + negativeValueCount := s.negativeValueStore.TotalCount() if rank < negativeValueCount { return -s.Value(s.negativeValueStore.KeyAtRank(negativeValueCount - 1 - rank)), nil @@ -313,6 +319,23 @@ func (s *DDSketch) ToProto() *sketchpb.DDSketch { } } +func (s *DDSketch) EncodeProto(w io.Writer) { + builder := sketchpb.NewDDSketchBuilder(w) + + builder.SetMapping(func(indexMappingBuilder *sketchpb.IndexMappingBuilder) { + s.IndexMapping.EncodeProto(indexMappingBuilder) + }) + + builder.SetZeroCount(s.zeroCount) + builder.SetNegativeValues(func(storeBuilder *sketchpb.StoreBuilder) { + s.negativeValueStore.EncodeProto(storeBuilder) + }) + + builder.SetPositiveValues(func(storeBuilder *sketchpb.StoreBuilder) { + s.positiveValueStore.EncodeProto(storeBuilder) + }) +} + // FromProto builds a new instance of DDSketch based on the provided protobuf representation, using a Dense store. func FromProto(pb *sketchpb.DDSketch) (*DDSketch, error) { return FromProtoWithStoreProvider(pb, store.DenseStoreConstructor) diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/cubically_interpolated_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/cubically_interpolated_mapping.go index 933cdab6..477ded84 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/cubically_interpolated_mapping.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/cubically_interpolated_mapping.go @@ -131,6 +131,12 @@ func (m *CubicallyInterpolatedMapping) ToProto() *sketchpb.IndexMapping { } } +func (m *CubicallyInterpolatedMapping) EncodeProto(builder *sketchpb.IndexMappingBuilder) { + builder.SetGamma(m.gamma) + builder.SetIndexOffset(m.indexOffset) + builder.SetInterpolation(uint64(sketchpb.IndexMapping_CUBIC)) +} + func (m *CubicallyInterpolatedMapping) Encode(b *[]byte) { enc.EncodeFlag(b, enc.FlagIndexMappingBaseCubic) enc.EncodeFloat64LE(b, m.gamma) diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go index 88b92659..62262d2c 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go @@ -29,6 +29,7 @@ type IndexMapping interface { // MaxIndexableValue returns the maximum positive value that can be mapped to an index. MaxIndexableValue() float64 ToProto() *sketchpb.IndexMapping + EncodeProto(builder *sketchpb.IndexMappingBuilder) // Encode encodes a mapping and appends its content to the provided []byte. Encode(b *[]byte) } diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go index d9b0b740..aa449917 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go @@ -119,6 +119,12 @@ func (m *LinearlyInterpolatedMapping) ToProto() *sketchpb.IndexMapping { } } +func (m *LinearlyInterpolatedMapping) EncodeProto(builder *sketchpb.IndexMappingBuilder) { + builder.SetGamma(m.gamma) + builder.SetIndexOffset(m.indexOffset) + builder.SetInterpolation(uint64(sketchpb.IndexMapping_LINEAR)) +} + func (m *LinearlyInterpolatedMapping) Encode(b *[]byte) { enc.EncodeFlag(b, enc.FlagIndexMappingBaseLinear) enc.EncodeFloat64LE(b, m.gamma) diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go index 474e74d9..7ac2ac47 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go @@ -104,6 +104,12 @@ func (m *LogarithmicMapping) ToProto() *sketchpb.IndexMapping { } } +func (m *LogarithmicMapping) EncodeProto(builder *sketchpb.IndexMappingBuilder) { + builder.SetGamma(m.gamma) + builder.SetIndexOffset(m.indexOffset) + builder.SetInterpolation(uint64(sketchpb.IndexMapping_NONE)) +} + func (m *LogarithmicMapping) Encode(b *[]byte) { enc.EncodeFlag(b, enc.FlagIndexMappingBaseLogarithmic) enc.EncodeFloat64LE(b, m.gamma) diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go b/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go index 9dbd6f29..3bff93da 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go @@ -4,8 +4,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.34.2 +// protoc v4.25.1 // source: ddsketch.proto package sketchpb @@ -363,7 +363,7 @@ func file_ddsketch_proto_rawDescGZIP() []byte { var file_ddsketch_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_ddsketch_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_ddsketch_proto_goTypes = []interface{}{ +var file_ddsketch_proto_goTypes = []any{ (IndexMapping_Interpolation)(0), // 0: IndexMapping.Interpolation (*DDSketch)(nil), // 1: DDSketch (*IndexMapping)(nil), // 2: IndexMapping @@ -389,7 +389,7 @@ func file_ddsketch_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_ddsketch_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_ddsketch_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DDSketch); i { case 0: return &v.state @@ -401,7 +401,7 @@ func file_ddsketch_proto_init() { return nil } } - file_ddsketch_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_ddsketch_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*IndexMapping); i { case 0: return &v.state @@ -413,7 +413,7 @@ func file_ddsketch_proto_init() { return nil } } - file_ddsketch_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_ddsketch_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Store); i { case 0: return &v.state diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.proto_builder.go b/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.proto_builder.go new file mode 100644 index 00000000..898ec332 --- /dev/null +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.proto_builder.go @@ -0,0 +1,161 @@ +// THIS IS A GENERATED FILE +// DO NOT EDIT +package sketchpb + +import ( + bytes "bytes" + protowire "google.golang.org/protobuf/encoding/protowire" + io "io" + math "math" +) + +type DDSketchBuilder struct { + writer io.Writer + buf bytes.Buffer + scratch []byte + indexMappingBuilder IndexMappingBuilder + storeBuilder StoreBuilder +} + +func NewDDSketchBuilder(writer io.Writer) *DDSketchBuilder { + return &DDSketchBuilder{ + writer: writer, + } +} +func (x *DDSketchBuilder) Reset(writer io.Writer) { + x.buf.Reset() + x.writer = writer +} +func (x *DDSketchBuilder) SetMapping(cb func(w *IndexMappingBuilder)) { + x.buf.Reset() + x.indexMappingBuilder.writer = &x.buf + x.indexMappingBuilder.scratch = x.scratch + cb(&x.indexMappingBuilder) + x.scratch = protowire.AppendVarint(x.scratch[:0], 0xa) + x.scratch = protowire.AppendVarint(x.scratch, uint64(x.buf.Len())) + x.writer.Write(x.scratch) + x.writer.Write(x.buf.Bytes()) +} +func (x *DDSketchBuilder) SetPositiveValues(cb func(w *StoreBuilder)) { + x.buf.Reset() + x.storeBuilder.writer = &x.buf + x.storeBuilder.scratch = x.scratch + cb(&x.storeBuilder) + x.scratch = protowire.AppendVarint(x.scratch[:0], 0x12) + x.scratch = protowire.AppendVarint(x.scratch, uint64(x.buf.Len())) + x.writer.Write(x.scratch) + x.writer.Write(x.buf.Bytes()) +} +func (x *DDSketchBuilder) SetNegativeValues(cb func(w *StoreBuilder)) { + x.buf.Reset() + x.storeBuilder.writer = &x.buf + x.storeBuilder.scratch = x.scratch + cb(&x.storeBuilder) + x.scratch = protowire.AppendVarint(x.scratch[:0], 0x1a) + x.scratch = protowire.AppendVarint(x.scratch, uint64(x.buf.Len())) + x.writer.Write(x.scratch) + x.writer.Write(x.buf.Bytes()) +} +func (x *DDSketchBuilder) SetZeroCount(v float64) { + x.scratch = protowire.AppendVarint(x.scratch[:0], 0x21) + x.scratch = protowire.AppendFixed64(x.scratch, math.Float64bits(v)) + x.writer.Write(x.scratch) +} + +type IndexMappingBuilder struct { + writer io.Writer + buf bytes.Buffer + scratch []byte +} + +func NewIndexMappingBuilder(writer io.Writer) *IndexMappingBuilder { + return &IndexMappingBuilder{ + writer: writer, + } +} +func (x *IndexMappingBuilder) Reset(writer io.Writer) { + x.buf.Reset() + x.writer = writer +} +func (x *IndexMappingBuilder) SetGamma(v float64) { + x.scratch = protowire.AppendVarint(x.scratch[:0], 0x9) + x.scratch = protowire.AppendFixed64(x.scratch, math.Float64bits(v)) + x.writer.Write(x.scratch) +} +func (x *IndexMappingBuilder) SetIndexOffset(v float64) { + x.scratch = protowire.AppendVarint(x.scratch[:0], 0x11) + x.scratch = protowire.AppendFixed64(x.scratch, math.Float64bits(v)) + x.writer.Write(x.scratch) +} +func (x *IndexMappingBuilder) SetInterpolation(v uint64) { + if v != 0 { + x.scratch = protowire.AppendVarint(x.scratch[:0], 0x18) + x.scratch = protowire.AppendVarint(x.scratch, v) + x.writer.Write(x.scratch) + } +} + +type StoreBuilder struct { + writer io.Writer + buf bytes.Buffer + scratch []byte + store_BinCountsEntryBuilder Store_BinCountsEntryBuilder +} + +func NewStoreBuilder(writer io.Writer) *StoreBuilder { + return &StoreBuilder{ + writer: writer, + } +} +func (x *StoreBuilder) Reset(writer io.Writer) { + x.buf.Reset() + x.writer = writer +} +func (x *StoreBuilder) AddBinCounts(cb func(w *Store_BinCountsEntryBuilder)) { + x.buf.Reset() + x.store_BinCountsEntryBuilder.writer = &x.buf + x.store_BinCountsEntryBuilder.scratch = x.scratch + cb(&x.store_BinCountsEntryBuilder) + x.scratch = protowire.AppendVarint(x.scratch[:0], 0xa) + x.scratch = protowire.AppendVarint(x.scratch, uint64(x.buf.Len())) + x.writer.Write(x.scratch) + x.writer.Write(x.buf.Bytes()) +} +func (x *StoreBuilder) AddContiguousBinCounts(v float64) { + x.scratch = protowire.AppendVarint(x.scratch[:0], 0x11) + x.scratch = protowire.AppendFixed64(x.scratch, math.Float64bits(v)) + x.writer.Write(x.scratch) +} +func (x *StoreBuilder) SetContiguousBinIndexOffset(v int32) { + x.scratch = x.scratch[:0] + x.scratch = protowire.AppendVarint(x.scratch, 0x18) + x.scratch = protowire.AppendVarint(x.scratch, protowire.EncodeZigZag(int64(v))) + x.writer.Write(x.scratch) +} + +type Store_BinCountsEntryBuilder struct { + writer io.Writer + buf bytes.Buffer + scratch []byte +} + +func NewStore_BinCountsEntryBuilder(writer io.Writer) *Store_BinCountsEntryBuilder { + return &Store_BinCountsEntryBuilder{ + writer: writer, + } +} +func (x *Store_BinCountsEntryBuilder) Reset(writer io.Writer) { + x.buf.Reset() + x.writer = writer +} +func (x *Store_BinCountsEntryBuilder) SetKey(v int32) { + x.scratch = x.scratch[:0] + x.scratch = protowire.AppendVarint(x.scratch, 0x8) + x.scratch = protowire.AppendVarint(x.scratch, protowire.EncodeZigZag(int64(v))) + x.writer.Write(x.scratch) +} +func (x *Store_BinCountsEntryBuilder) SetValue(v float64) { + x.scratch = protowire.AppendVarint(x.scratch[:0], 0x11) + x.scratch = protowire.AppendFixed64(x.scratch, math.Float64bits(v)) + x.writer.Write(x.scratch) +} diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go index 11a56f91..a0a9e1e7 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go @@ -555,6 +555,20 @@ func (s *BufferedPaginatedStore) ToProto() *sketchpb.Store { } } +func (s *BufferedPaginatedStore) EncodeProto(builder *sketchpb.StoreBuilder) { + if s.IsEmpty() { + return + } + + s.ForEach(func(index int, count float64) (stop bool) { + builder.AddBinCounts(func(w *sketchpb.Store_BinCountsEntryBuilder) { + w.SetKey(int32(index)) + w.SetValue(count) + }) + return false + }) +} + func (s *BufferedPaginatedStore) Reweight(w float64) error { if w <= 0 { return errors.New("can't reweight by a negative factor") diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go index 2c4a3d4a..817d36e0 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go @@ -250,6 +250,17 @@ func (s *DenseStore) ToProto() *sketchpb.Store { } } +func (s *DenseStore) EncodeProto(builder *sketchpb.StoreBuilder) { + if s.IsEmpty() { + return + } + + for i := s.minIndex - s.offset; i < s.maxIndex-s.offset+1; i++ { + builder.AddContiguousBinCounts(s.bins[i]) + } + builder.SetContiguousBinIndexOffset(int32(s.minIndex)) +} + func (s *DenseStore) Reweight(w float64) error { if w <= 0 { return errors.New("can't reweight by a negative factor") diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go index 9a07836e..e0163f97 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go @@ -150,6 +150,16 @@ func (s *SparseStore) ToProto() *sketchpb.Store { return &sketchpb.Store{BinCounts: binCounts} } +func (s *SparseStore) EncodeProto(builder *sketchpb.StoreBuilder) { + + for index, count := range s.counts { + builder.AddBinCounts(func(w *sketchpb.Store_BinCountsEntryBuilder) { + w.SetKey(int32(index)) + w.SetValue(count) + }) + } +} + func (s *SparseStore) Reweight(w float64) error { if w <= 0 { return errors.New("can't reweight by a negative factor") diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go index 64a5e3d5..ca4fd92f 100644 --- a/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go +++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go @@ -7,7 +7,6 @@ package store import ( "errors" - enc "github.com/DataDog/sketches-go/ddsketch/encoding" "github.com/DataDog/sketches-go/ddsketch/pb/sketchpb" ) @@ -54,6 +53,7 @@ type Store interface { KeyAtRank(rank float64) int MergeWith(store Store) ToProto() *sketchpb.Store + EncodeProto(builder *sketchpb.StoreBuilder) // Reweight multiplies all values from the store by w, but keeps the same global distribution. Reweight(w float64) error // Encode encodes the bins of the store and appends its content to the diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 00000000..6b061e61 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 00000000..fbc63325 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,27 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - govet + - staticcheck + - errcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 00000000..f95a504f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,242 @@ +# Changelog + +## 3.3.0 (2024-08-27) + +### Added + +- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser) +- #213: nil version equality checking (thanks @KnutZuidema) + +### Changed + +- #241: Simplify StrictNewVersion parsing (thanks @grosser) +- Testing support up through Go 1.23 +- Minimum version set to 1.21 as this is what's tested now +- Fuzz testing now supports caching + +## 3.2.1 (2023-04-10) + +### Changed + +- #198: Improved testing around pre-release names +- #200: Improved code scanning with addition of CodeQL +- #201: Testing now includes Go 1.20. Go 1.17 has been dropped +- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily +- #203: Docs updated for security details + +### Fixed + +- #199: Fixed issue with range transformations + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 00000000..9ff7da9c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 00000000..9ca87a2c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,31 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go env GOCACHE + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2 diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 00000000..ed569360 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,258 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +## Package Versions + +Note, import `github.com/Masterminds/semver/v3` to use the latest version. + +There are three major versions fo the `semver` package. + +* 3.x.x is the stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include pre-releases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering pre-releases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The variable a will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification, pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer's comparisons using constraints without a pre-release comparator will skip +pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's +parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`. + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://github.com/Masterminds/semver) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 00000000..a30a66b1 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 00000000..a7823589 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 00000000..8461c7ed --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,594 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 00000000..74f97caa --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 00000000..304edc34 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,645 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // Extract build metadata + if strings.Contains(parts[2], "+") { + extra := strings.SplitN(parts[2], "+", 2) + sv.metadata = extra[1] + parts[2] = extra[0] + if err := validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + // Extract build prerelease + if strings.Contains(parts[2], "-") { + extra := strings.SplitN(parts[2], "-", 2) + sv.pre = extra[1] + parts[2] = extra[0] + if err := validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract major, minor, and patch + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[5], + pre: m[4], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanEqual tests if one version is less or equal than another one. +func (v *Version) LessThanEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanEqual tests if one version is greater or equal than another one. +func (v *Version) GreaterThanEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + if v == o { + return true + } + if v == nil || o == nil { + return false + } + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if p == "" { + return ErrInvalidMetadata + } else if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if p == "" { + return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/cenkalti/backoff/v5/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore new file mode 100644 index 00000000..50d95c54 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md new file mode 100644 index 00000000..658c3743 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [5.0.0] - 2024-12-19 + +### Added + +- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry. + +### Changed + +- Retry function now accepts additional options for specifying max number of tries and max elapsed time. +- Retry function now accepts a context.Context. +- Operation function signature changed to return result (any type) and error. + +### Removed + +- RetryNotify* and RetryWithData functions. Only single Retry function remains. +- Optional arguments from ExponentialBackoff constructor. +- Clock and Timer interfaces. + +### Fixed + +- The original error is returned from Retry if there's a PermanentError. (#144) +- The Retry function respects the wrapped PermanentError. (#140) diff --git a/vendor/github.com/cenkalti/backoff/v5/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE new file mode 100644 index 00000000..89b81799 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v5/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md new file mode 100644 index 00000000..4611b1d1 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/README.md @@ -0,0 +1,31 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end. + +For most cases, use `Retry` function. See [example_test.go][example] for an example. + +If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go +[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go diff --git a/vendor/github.com/cenkalti/backoff/v5/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go new file mode 100644 index 00000000..dd2b24ca --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // backoff.Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff() + // if duration == backoff.Stop { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go new file mode 100644 index 00000000..beb2b38a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/error.go @@ -0,0 +1,46 @@ +package backoff + +import ( + "fmt" + "time" +) + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} + +// Error returns a string representation of the Permanent error. +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the wrapped error. +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// RetryAfterError signals that the operation should be retried after the given duration. +type RetryAfterError struct { + Duration time.Duration +} + +// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying. +func RetryAfter(seconds int) error { + return &RetryAfterError{Duration: time.Duration(seconds) * time.Second} +} + +// Error returns a string representation of the RetryAfter error. +func (e *RetryAfterError) Error() string { + return fmt.Sprintf("retry after %s", e.Duration) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go new file mode 100644 index 00000000..79d425e8 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -0,0 +1,118 @@ +package backoff + +import ( + "math/rand/v2" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +Example: Given the following default arguments, for 9 tries the sequence will be: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + + currentInterval time.Duration +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + return &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + } +} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval +} + +// NextBackOff calculates the next backoff interval using the formula: +// +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + if b.currentInterval == 0 { + b.currentInterval = b.InitialInterval + } + + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + return next +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go new file mode 100644 index 00000000..32a7f988 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -0,0 +1,139 @@ +package backoff + +import ( + "context" + "errors" + "time" +) + +// DefaultMaxElapsedTime sets a default limit for the total retry duration. +const DefaultMaxElapsedTime = 15 * time.Minute + +// Operation is a function that attempts an operation and may be retried. +type Operation[T any] func() (T, error) + +// Notify is a function called on operation error with the error and backoff duration. +type Notify func(error, time.Duration) + +// retryOptions holds configuration settings for the retry mechanism. +type retryOptions struct { + BackOff BackOff // Strategy for calculating backoff periods. + Timer timer // Timer to manage retry delays. + Notify Notify // Optional function to notify on each retry error. + MaxTries uint // Maximum number of retry attempts. + MaxElapsedTime time.Duration // Maximum total time for all retries. +} + +type RetryOption func(*retryOptions) + +// WithBackOff configures a custom backoff strategy. +func WithBackOff(b BackOff) RetryOption { + return func(args *retryOptions) { + args.BackOff = b + } +} + +// withTimer sets a custom timer for managing delays between retries. +func withTimer(t timer) RetryOption { + return func(args *retryOptions) { + args.Timer = t + } +} + +// WithNotify sets a notification function to handle retry errors. +func WithNotify(n Notify) RetryOption { + return func(args *retryOptions) { + args.Notify = n + } +} + +// WithMaxTries limits the number of all attempts. +func WithMaxTries(n uint) RetryOption { + return func(args *retryOptions) { + args.MaxTries = n + } +} + +// WithMaxElapsedTime limits the total duration for retry attempts. +func WithMaxElapsedTime(d time.Duration) RetryOption { + return func(args *retryOptions) { + args.MaxElapsedTime = d + } +} + +// Retry attempts the operation until success, a permanent error, or backoff completion. +// It ensures the operation is executed at least once. +// +// Returns the operation result or error if retries are exhausted or context is cancelled. +func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) { + // Initialize default retry options. + args := &retryOptions{ + BackOff: NewExponentialBackOff(), + Timer: &defaultTimer{}, + MaxElapsedTime: DefaultMaxElapsedTime, + } + + // Apply user-provided options to the default settings. + for _, opt := range opts { + opt(args) + } + + defer args.Timer.Stop() + + startedAt := time.Now() + args.BackOff.Reset() + for numTries := uint(1); ; numTries++ { + // Execute the operation. + res, err := operation() + if err == nil { + return res, nil + } + + // Stop retrying if maximum tries exceeded. + if args.MaxTries > 0 && numTries >= args.MaxTries { + return res, err + } + + // Handle permanent errors without retrying. + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Unwrap() + } + + // Stop retrying if context is cancelled. + if cerr := context.Cause(ctx); cerr != nil { + return res, cerr + } + + // Calculate next backoff duration. + next := args.BackOff.NextBackOff() + if next == Stop { + return res, err + } + + // Reset backoff if RetryAfterError is encountered. + var retryAfter *RetryAfterError + if errors.As(err, &retryAfter) { + next = retryAfter.Duration + args.BackOff.Reset() + } + + // Stop retrying if maximum elapsed time exceeded. + if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime { + return res, err + } + + // Notify on error if a notifier function is provided. + if args.Notify != nil { + args.Notify(err, next) + } + + // Wait for the next backoff period or context cancellation. + args.Timer.Start(next) + select { + case <-args.Timer.C(): + case <-ctx.Done(): + return res, context.Cause(ctx) + } + } +} diff --git a/vendor/github.com/cenkalti/backoff/v5/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go new file mode 100644 index 00000000..f0d4b2ae --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go @@ -0,0 +1,83 @@ +package backoff + +import ( + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + timer timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + timer: &defaultTimer{}, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v5/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go new file mode 100644 index 00000000..a8953097 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cihub/seelog/LICENSE.txt b/vendor/github.com/cihub/seelog/LICENSE.txt new file mode 100644 index 00000000..8c706814 --- /dev/null +++ b/vendor/github.com/cihub/seelog/LICENSE.txt @@ -0,0 +1,24 @@ +Copyright (c) 2012, Cloud Instruments Co., Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Cloud Instruments Co., Ltd. nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cihub/seelog/README.markdown b/vendor/github.com/cihub/seelog/README.markdown new file mode 100644 index 00000000..7dd1ab35 --- /dev/null +++ b/vendor/github.com/cihub/seelog/README.markdown @@ -0,0 +1,116 @@ +Seelog +======= + +Seelog is a powerful and easy-to-learn logging framework that provides functionality for flexible dispatching, filtering, and formatting log messages. +It is natively written in the [Go](http://golang.org/) programming language. + +[![Build Status](https://drone.io/github.com/cihub/seelog/status.png)](https://drone.io/github.com/cihub/seelog/latest) + +Features +------------------ + +* Xml configuring to be able to change logger parameters without recompilation +* Changing configurations on the fly without app restart +* Possibility to set different log configurations for different project files and functions +* Adjustable message formatting +* Simultaneous log output to multiple streams +* Choosing logger priority strategy to minimize performance hit +* Different output writers + * Console writer + * File writer + * Buffered writer (Chunk writer) + * Rolling log writer (Logging with rotation) + * SMTP writer + * Others... (See [Wiki](https://github.com/cihub/seelog/wiki)) +* Log message wrappers (JSON, XML, etc.) +* Global variables and functions for easy usage in standalone apps +* Functions for flexible usage in libraries + +Quick-start +----------- + +```go +package main + +import log "github.com/cihub/seelog" + +func main() { + defer log.Flush() + log.Info("Hello from Seelog!") +} +``` + +Installation +------------ + +If you don't have the Go development environment installed, visit the +[Getting Started](http://golang.org/doc/install.html) document and follow the instructions. Once you're ready, execute the following command: + +``` +go get -u github.com/cihub/seelog +``` + +*IMPORTANT*: If you are not using the latest release version of Go, check out this [wiki page](https://github.com/cihub/seelog/wiki/Notes-on-'go-get') + +Documentation +--------------- + +Seelog has github wiki pages, which contain detailed how-tos references: https://github.com/cihub/seelog/wiki + +Examples +--------------- + +Seelog examples can be found here: [seelog-examples](https://github.com/cihub/seelog-examples) + +Issues +--------------- + +Feel free to push issues that could make Seelog better: https://github.com/cihub/seelog/issues + +Changelog +--------------- +* **v2.6** : Config using code and custom formatters + * Configuration using code in addition to xml (All internal receiver/dispatcher/logger types are now exported). + * Custom formatters. Check [wiki](https://github.com/cihub/seelog/wiki/Custom-formatters) + * Bugfixes and internal improvements. +* **v2.5** : Interaction with other systems. Part 2: custom receivers + * Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers) + * Added 'LoggerFromCustomReceiver' + * Added 'LoggerFromWriterWithMinLevelAndFormat' + * Added 'LoggerFromCustomReceiver' + * Added 'LoggerFromParamConfigAs...' +* **v2.4** : Interaction with other systems. Part 1: wrapping seelog + * Added configurable caller stack skip logic + * Added 'SetAdditionalStackDepth' to 'LoggerInterface' +* **v2.3** : Rethinking 'rolling' receiver + * Reimplemented 'rolling' receiver + * Added 'Max rolls' feature for 'rolling' receiver with type='date' + * Fixed 'rolling' receiver issue: renaming on Windows +* **v2.2** : go1.0 compatibility point [go1.0 tag] + * Fixed internal bugs + * Added 'ANSI n [;k]' format identifier: %EscN + * Made current release go1 compatible +* **v2.1** : Some new features + * Rolling receiver archiving option. + * Added format identifier: %Line + * Smtp: added paths to PEM files directories + * Added format identifier: %FuncShort + * Warn, Error and Critical methods now return an error +* **v2.0** : Second major release. BREAKING CHANGES. + * Support of binaries with stripped symbols + * Added log strategy: adaptive + * Critical message now forces Flush() + * Added predefined formats: xml-debug, xml-debug-short, xml, xml-short, json-debug, json-debug-short, json, json-short, debug, debug-short, fast + * Added receiver: conn (network connection writer) + * BREAKING CHANGE: added Tracef, Debugf, Infof, etc. to satisfy the print/printf principle + * Bug fixes +* **v1.0** : Initial release. Features: + * Xml config + * Changing configurations on the fly without app restart + * Contraints and exceptions + * Formatting + * Log strategies: sync, async loop, async timer + * Receivers: buffered, console, file, rolling, smtp + + + diff --git a/vendor/github.com/cihub/seelog/archive/archive.go b/vendor/github.com/cihub/seelog/archive/archive.go new file mode 100644 index 00000000..923036f2 --- /dev/null +++ b/vendor/github.com/cihub/seelog/archive/archive.go @@ -0,0 +1,198 @@ +package archive + +import ( + "archive/tar" + "archive/zip" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/cihub/seelog/archive/gzip" +) + +// Reader is the interface for reading files from an archive. +type Reader interface { + NextFile() (name string, err error) + io.Reader +} + +// ReadCloser is the interface that groups Reader with the Close method. +type ReadCloser interface { + Reader + io.Closer +} + +// Writer is the interface for writing files to an archived format. +type Writer interface { + NextFile(name string, fi os.FileInfo) error + io.Writer +} + +// WriteCloser is the interface that groups Writer with the Close method. +type WriteCloser interface { + Writer + io.Closer +} + +type nopCloser struct{ Reader } + +func (nopCloser) Close() error { return nil } + +// NopCloser returns a ReadCloser with a no-op Close method wrapping the +// provided Reader r. +func NopCloser(r Reader) ReadCloser { + return nopCloser{r} +} + +// Copy copies from src to dest until either EOF is reached on src or an error +// occurs. +// +// When the archive format of src matches that of dst, Copy streams the files +// directly into dst. Otherwise, copy buffers the contents to disk to compute +// headers before writing to dst. +func Copy(dst Writer, src Reader) error { + switch src := src.(type) { + case tarReader: + if dst, ok := dst.(tarWriter); ok { + return copyTar(dst, src) + } + case zipReader: + if dst, ok := dst.(zipWriter); ok { + return copyZip(dst, src) + } + // Switch on concrete type because gzip has no special methods + case *gzip.Reader: + if dst, ok := dst.(*gzip.Writer); ok { + _, err := io.Copy(dst, src) + return err + } + } + + return copyBuffer(dst, src) +} + +func copyBuffer(dst Writer, src Reader) (err error) { + const defaultFileMode = 0666 + + buf, err := ioutil.TempFile("", "archive_copy_buffer") + if err != nil { + return err + } + defer os.Remove(buf.Name()) // Do not care about failure removing temp + defer buf.Close() // Do not care about failure closing temp + for { + // Handle the next file + name, err := src.NextFile() + switch err { + case io.EOF: // Done copying + return nil + default: // Failed to write: bail out + return err + case nil: // Proceed below + } + + // Buffer the file + if _, err := io.Copy(buf, src); err != nil { + return fmt.Errorf("buffer to disk: %v", err) + } + + // Seek to the start of the file for full file copy + if _, err := buf.Seek(0, os.SEEK_SET); err != nil { + return err + } + + // Set desired file permissions + if err := os.Chmod(buf.Name(), defaultFileMode); err != nil { + return err + } + fi, err := buf.Stat() + if err != nil { + return err + } + + // Write the buffered file + if err := dst.NextFile(name, fi); err != nil { + return err + } + if _, err := io.Copy(dst, buf); err != nil { + return fmt.Errorf("copy to dst: %v", err) + } + if err := buf.Truncate(0); err != nil { + return err + } + if _, err := buf.Seek(0, os.SEEK_SET); err != nil { + return err + } + } +} + +type tarReader interface { + Next() (*tar.Header, error) + io.Reader +} + +type tarWriter interface { + WriteHeader(hdr *tar.Header) error + io.Writer +} + +type zipReader interface { + Files() []*zip.File +} + +type zipWriter interface { + CreateHeader(fh *zip.FileHeader) (io.Writer, error) +} + +func copyTar(w tarWriter, r tarReader) error { + for { + hdr, err := r.Next() + switch err { + case io.EOF: + return nil + default: // Handle error + return err + case nil: // Proceed below + } + + info := hdr.FileInfo() + // Skip directories + if info.IsDir() { + continue + } + if err := w.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(w, r); err != nil { + return err + } + } +} + +func copyZip(zw zipWriter, r zipReader) error { + for _, f := range r.Files() { + if err := copyZipFile(zw, f); err != nil { + return err + } + } + return nil +} + +func copyZipFile(zw zipWriter, f *zip.File) error { + rc, err := f.Open() + if err != nil { + return err + } + defer rc.Close() // Read-only + + hdr := f.FileHeader + hdr.SetModTime(time.Now()) + w, err := zw.CreateHeader(&hdr) + if err != nil { + return err + } + _, err = io.Copy(w, rc) + return err +} diff --git a/vendor/github.com/cihub/seelog/archive/gzip/gzip.go b/vendor/github.com/cihub/seelog/archive/gzip/gzip.go new file mode 100644 index 00000000..ea121018 --- /dev/null +++ b/vendor/github.com/cihub/seelog/archive/gzip/gzip.go @@ -0,0 +1,64 @@ +// Package gzip implements reading and writing of gzip format compressed files. +// See the compress/gzip package for more details. +package gzip + +import ( + "compress/gzip" + "fmt" + "io" + "os" +) + +// Reader is an io.Reader that can be read to retrieve uncompressed data from a +// gzip-format compressed file. +type Reader struct { + gzip.Reader + name string + isEOF bool +} + +// NewReader creates a new Reader reading the given reader. +func NewReader(r io.Reader, name string) (*Reader, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &Reader{ + Reader: *gr, + name: name, + }, nil +} + +// NextFile returns the file name. Calls subsequent to the first call will +// return EOF. +func (r *Reader) NextFile() (name string, err error) { + if r.isEOF { + return "", io.EOF + } + + r.isEOF = true + return r.name, nil +} + +// Writer is an io.WriteCloser. Writes to a Writer are compressed and written to w. +type Writer struct { + gzip.Writer + name string + noMoreFiles bool +} + +// NextFile never returns a next file, and should not be called more than once. +func (w *Writer) NextFile(name string, _ os.FileInfo) error { + if w.noMoreFiles { + return fmt.Errorf("gzip: only accepts one file: already received %q and now %q", w.name, name) + } + w.noMoreFiles = true + w.name = name + return nil +} + +// NewWriter returns a new Writer. Writes to the returned writer are compressed +// and written to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{Writer: *gzip.NewWriter(w)} +} diff --git a/vendor/github.com/cihub/seelog/archive/tar/tar.go b/vendor/github.com/cihub/seelog/archive/tar/tar.go new file mode 100644 index 00000000..8dd87f57 --- /dev/null +++ b/vendor/github.com/cihub/seelog/archive/tar/tar.go @@ -0,0 +1,72 @@ +package tar + +import ( + "archive/tar" + "io" + "os" +) + +// Reader provides sequential access to the contents of a tar archive. +type Reader struct { + tar.Reader +} + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { + return &Reader{Reader: *tar.NewReader(r)} +} + +// NextFile advances to the next file in the tar archive. +func (r *Reader) NextFile() (name string, err error) { + hdr, err := r.Next() + if err != nil { + return "", err + } + return hdr.Name, nil +} + +// Writer provides sequential writing of a tar archive in POSIX.1 format. +type Writer struct { + tar.Writer + closers []io.Closer +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{Writer: *tar.NewWriter(w)} +} + +// NewWriteMultiCloser creates a new Writer writing to w that also closes all +// closers in order on close. +func NewWriteMultiCloser(w io.WriteCloser, closers ...io.Closer) *Writer { + return &Writer{ + Writer: *tar.NewWriter(w), + closers: closers, + } +} + +// NextFile computes and writes a header and prepares to accept the file's +// contents. +func (w *Writer) NextFile(name string, fi os.FileInfo) error { + if name == "" { + name = fi.Name() + } + hdr, err := tar.FileInfoHeader(fi, name) + if err != nil { + return err + } + hdr.Name = name + return w.WriteHeader(hdr) +} + +// Close closes the tar archive and all other closers, flushing any unwritten +// data to the underlying writer. +func (w *Writer) Close() error { + err := w.Writer.Close() + for _, c := range w.closers { + if cerr := c.Close(); cerr != nil && err == nil { + err = cerr + } + } + return err +} diff --git a/vendor/github.com/cihub/seelog/archive/zip/zip.go b/vendor/github.com/cihub/seelog/archive/zip/zip.go new file mode 100644 index 00000000..4210b03b --- /dev/null +++ b/vendor/github.com/cihub/seelog/archive/zip/zip.go @@ -0,0 +1,89 @@ +package zip + +import ( + "archive/zip" + "io" + "os" +) + +// Reader provides sequential access to the contents of a zip archive. +type Reader struct { + zip.Reader + unread []*zip.File + rc io.ReadCloser +} + +// NewReader returns a new Reader reading from r, which is assumed to have the +// given size in bytes. +func NewReader(r io.ReaderAt, size int64) (*Reader, error) { + zr, err := zip.NewReader(r, size) + if err != nil { + return nil, err + } + return &Reader{Reader: *zr}, nil +} + +// NextFile advances to the next file in the zip archive. +func (r *Reader) NextFile() (name string, err error) { + // Initialize unread + if r.unread == nil { + r.unread = r.Files()[:] + } + + // Close previous file + if r.rc != nil { + r.rc.Close() // Read-only + } + + if len(r.unread) == 0 { + return "", io.EOF + } + + // Open and return next unread + f := r.unread[0] + name, r.unread = f.Name, r.unread[1:] + r.rc, err = f.Open() + if err != nil { + return "", err + } + return name, nil +} + +func (r *Reader) Read(p []byte) (n int, err error) { + return r.rc.Read(p) +} + +// Files returns the full list of files in the zip archive. +func (r *Reader) Files() []*zip.File { + return r.File +} + +// Writer provides sequential writing of a zip archive.1 format. +type Writer struct { + zip.Writer + w io.Writer +} + +// NewWriter returns a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{Writer: *zip.NewWriter(w)} +} + +// NextFile computes and writes a header and prepares to accept the file's +// contents. +func (w *Writer) NextFile(name string, fi os.FileInfo) error { + if name == "" { + name = fi.Name() + } + hdr, err := zip.FileInfoHeader(fi) + if err != nil { + return err + } + hdr.Name = name + w.w, err = w.CreateHeader(hdr) + return err +} + +func (w *Writer) Write(p []byte) (n int, err error) { + return w.w.Write(p) +} diff --git a/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go b/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go new file mode 100644 index 00000000..0c640cae --- /dev/null +++ b/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go @@ -0,0 +1,129 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "math" + "time" +) + +var ( + adaptiveLoggerMaxInterval = time.Minute + adaptiveLoggerMaxCriticalMsgCount = uint32(1000) +) + +// asyncAdaptiveLogger represents asynchronous adaptive logger which acts like +// an async timer logger, but its interval depends on the current message count +// in the queue. +// +// Interval = I, minInterval = m, maxInterval = M, criticalMsgCount = C, msgCount = c: +// I = m + (C - Min(c, C)) / C * (M - m) +type asyncAdaptiveLogger struct { + asyncLogger + minInterval time.Duration + criticalMsgCount uint32 + maxInterval time.Duration +} + +// NewAsyncLoopLogger creates a new asynchronous adaptive logger +func NewAsyncAdaptiveLogger( + config *logConfig, + minInterval time.Duration, + maxInterval time.Duration, + criticalMsgCount uint32) (*asyncAdaptiveLogger, error) { + + if minInterval <= 0 { + return nil, errors.New("async adaptive logger min interval should be > 0") + } + + if maxInterval > adaptiveLoggerMaxInterval { + return nil, fmt.Errorf("async adaptive logger max interval should be <= %s", + adaptiveLoggerMaxInterval) + } + + if criticalMsgCount <= 0 { + return nil, errors.New("async adaptive logger critical msg count should be > 0") + } + + if criticalMsgCount > adaptiveLoggerMaxCriticalMsgCount { + return nil, fmt.Errorf("async adaptive logger critical msg count should be <= %s", + adaptiveLoggerMaxInterval) + } + + asnAdaptiveLogger := new(asyncAdaptiveLogger) + + asnAdaptiveLogger.asyncLogger = *newAsyncLogger(config) + asnAdaptiveLogger.minInterval = minInterval + asnAdaptiveLogger.maxInterval = maxInterval + asnAdaptiveLogger.criticalMsgCount = criticalMsgCount + + go asnAdaptiveLogger.processQueue() + + return asnAdaptiveLogger, nil +} + +func (asnAdaptiveLogger *asyncAdaptiveLogger) processItem() (closed bool, itemCount int) { + asnAdaptiveLogger.queueHasElements.L.Lock() + defer asnAdaptiveLogger.queueHasElements.L.Unlock() + + for asnAdaptiveLogger.msgQueue.Len() == 0 && !asnAdaptiveLogger.Closed() { + asnAdaptiveLogger.queueHasElements.Wait() + } + + if asnAdaptiveLogger.Closed() { + return true, asnAdaptiveLogger.msgQueue.Len() + } + + asnAdaptiveLogger.processQueueElement() + return false, asnAdaptiveLogger.msgQueue.Len() - 1 +} + +// I = m + (C - Min(c, C)) / C * (M - m) => +// I = m + cDiff * mDiff, +// cDiff = (C - Min(c, C)) / C) +// mDiff = (M - m) +func (asnAdaptiveLogger *asyncAdaptiveLogger) calcAdaptiveInterval(msgCount int) time.Duration { + critCountF := float64(asnAdaptiveLogger.criticalMsgCount) + cDiff := (critCountF - math.Min(float64(msgCount), critCountF)) / critCountF + mDiff := float64(asnAdaptiveLogger.maxInterval - asnAdaptiveLogger.minInterval) + + return asnAdaptiveLogger.minInterval + time.Duration(cDiff*mDiff) +} + +func (asnAdaptiveLogger *asyncAdaptiveLogger) processQueue() { + for !asnAdaptiveLogger.Closed() { + closed, itemCount := asnAdaptiveLogger.processItem() + + if closed { + break + } + + interval := asnAdaptiveLogger.calcAdaptiveInterval(itemCount) + + <-time.After(interval) + } +} diff --git a/vendor/github.com/cihub/seelog/behavior_asynclogger.go b/vendor/github.com/cihub/seelog/behavior_asynclogger.go new file mode 100644 index 00000000..75231067 --- /dev/null +++ b/vendor/github.com/cihub/seelog/behavior_asynclogger.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "container/list" + "fmt" + "sync" +) + +// MaxQueueSize is the critical number of messages in the queue that result in an immediate flush. +const ( + MaxQueueSize = 10000 +) + +type msgQueueItem struct { + level LogLevel + context LogContextInterface + message fmt.Stringer +} + +// asyncLogger represents common data for all asynchronous loggers +type asyncLogger struct { + commonLogger + msgQueue *list.List + queueHasElements *sync.Cond +} + +// newAsyncLogger creates a new asynchronous logger +func newAsyncLogger(config *logConfig) *asyncLogger { + asnLogger := new(asyncLogger) + + asnLogger.msgQueue = list.New() + asnLogger.queueHasElements = sync.NewCond(new(sync.Mutex)) + + asnLogger.commonLogger = *newCommonLogger(config, asnLogger) + + return asnLogger +} + +func (asnLogger *asyncLogger) innerLog( + level LogLevel, + context LogContextInterface, + message fmt.Stringer) { + + asnLogger.addMsgToQueue(level, context, message) +} + +func (asnLogger *asyncLogger) Close() { + asnLogger.m.Lock() + defer asnLogger.m.Unlock() + + if !asnLogger.Closed() { + asnLogger.flushQueue(true) + asnLogger.config.RootDispatcher.Flush() + + if err := asnLogger.config.RootDispatcher.Close(); err != nil { + reportInternalError(err) + } + + asnLogger.closedM.Lock() + asnLogger.closed = true + asnLogger.closedM.Unlock() + asnLogger.queueHasElements.Broadcast() + } +} + +func (asnLogger *asyncLogger) Flush() { + asnLogger.m.Lock() + defer asnLogger.m.Unlock() + + if !asnLogger.Closed() { + asnLogger.flushQueue(true) + asnLogger.config.RootDispatcher.Flush() + } +} + +func (asnLogger *asyncLogger) flushQueue(lockNeeded bool) { + if lockNeeded { + asnLogger.queueHasElements.L.Lock() + defer asnLogger.queueHasElements.L.Unlock() + } + + for asnLogger.msgQueue.Len() > 0 { + asnLogger.processQueueElement() + } +} + +func (asnLogger *asyncLogger) processQueueElement() { + if asnLogger.msgQueue.Len() > 0 { + backElement := asnLogger.msgQueue.Front() + msg, _ := backElement.Value.(msgQueueItem) + asnLogger.processLogMsg(msg.level, msg.message, msg.context) + asnLogger.msgQueue.Remove(backElement) + } +} + +func (asnLogger *asyncLogger) addMsgToQueue( + level LogLevel, + context LogContextInterface, + message fmt.Stringer) { + + if !asnLogger.Closed() { + asnLogger.queueHasElements.L.Lock() + defer asnLogger.queueHasElements.L.Unlock() + + if asnLogger.msgQueue.Len() >= MaxQueueSize { + fmt.Printf("Seelog queue overflow: more than %v messages in the queue. Flushing.\n", MaxQueueSize) + asnLogger.flushQueue(false) + } + + queueItem := msgQueueItem{level, context, message} + + asnLogger.msgQueue.PushBack(queueItem) + asnLogger.queueHasElements.Broadcast() + } else { + err := fmt.Errorf("queue closed! Cannot process element: %d %#v", level, message) + reportInternalError(err) + } +} diff --git a/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go b/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go new file mode 100644 index 00000000..972467b3 --- /dev/null +++ b/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go @@ -0,0 +1,69 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +// asyncLoopLogger represents asynchronous logger which processes the log queue in +// a 'for' loop +type asyncLoopLogger struct { + asyncLogger +} + +// NewAsyncLoopLogger creates a new asynchronous loop logger +func NewAsyncLoopLogger(config *logConfig) *asyncLoopLogger { + + asnLoopLogger := new(asyncLoopLogger) + + asnLoopLogger.asyncLogger = *newAsyncLogger(config) + + go asnLoopLogger.processQueue() + + return asnLoopLogger +} + +func (asnLoopLogger *asyncLoopLogger) processItem() (closed bool) { + asnLoopLogger.queueHasElements.L.Lock() + defer asnLoopLogger.queueHasElements.L.Unlock() + + for asnLoopLogger.msgQueue.Len() == 0 && !asnLoopLogger.Closed() { + asnLoopLogger.queueHasElements.Wait() + } + + if asnLoopLogger.Closed() { + return true + } + + asnLoopLogger.processQueueElement() + return false +} + +func (asnLoopLogger *asyncLoopLogger) processQueue() { + for !asnLoopLogger.Closed() { + closed := asnLoopLogger.processItem() + + if closed { + break + } + } +} diff --git a/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go b/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go new file mode 100644 index 00000000..8118f205 --- /dev/null +++ b/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go @@ -0,0 +1,82 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "time" +) + +// asyncTimerLogger represents asynchronous logger which processes the log queue each +// 'duration' nanoseconds +type asyncTimerLogger struct { + asyncLogger + interval time.Duration +} + +// NewAsyncLoopLogger creates a new asynchronous loop logger +func NewAsyncTimerLogger(config *logConfig, interval time.Duration) (*asyncTimerLogger, error) { + + if interval <= 0 { + return nil, errors.New("async logger interval should be > 0") + } + + asnTimerLogger := new(asyncTimerLogger) + + asnTimerLogger.asyncLogger = *newAsyncLogger(config) + asnTimerLogger.interval = interval + + go asnTimerLogger.processQueue() + + return asnTimerLogger, nil +} + +func (asnTimerLogger *asyncTimerLogger) processItem() (closed bool) { + asnTimerLogger.queueHasElements.L.Lock() + defer asnTimerLogger.queueHasElements.L.Unlock() + + for asnTimerLogger.msgQueue.Len() == 0 && !asnTimerLogger.Closed() { + asnTimerLogger.queueHasElements.Wait() + } + + if asnTimerLogger.Closed() { + return true + } + + asnTimerLogger.processQueueElement() + return false +} + +func (asnTimerLogger *asyncTimerLogger) processQueue() { + for !asnTimerLogger.Closed() { + closed := asnTimerLogger.processItem() + + if closed { + break + } + + <-time.After(asnTimerLogger.interval) + } +} diff --git a/vendor/github.com/cihub/seelog/behavior_synclogger.go b/vendor/github.com/cihub/seelog/behavior_synclogger.go new file mode 100644 index 00000000..5a022ebc --- /dev/null +++ b/vendor/github.com/cihub/seelog/behavior_synclogger.go @@ -0,0 +1,75 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" +) + +// syncLogger performs logging in the same goroutine where 'Trace/Debug/...' +// func was called +type syncLogger struct { + commonLogger +} + +// NewSyncLogger creates a new synchronous logger +func NewSyncLogger(config *logConfig) *syncLogger { + syncLogger := new(syncLogger) + + syncLogger.commonLogger = *newCommonLogger(config, syncLogger) + + return syncLogger +} + +func (syncLogger *syncLogger) innerLog( + level LogLevel, + context LogContextInterface, + message fmt.Stringer) { + + syncLogger.processLogMsg(level, message, context) +} + +func (syncLogger *syncLogger) Close() { + syncLogger.m.Lock() + defer syncLogger.m.Unlock() + + if !syncLogger.Closed() { + if err := syncLogger.config.RootDispatcher.Close(); err != nil { + reportInternalError(err) + } + syncLogger.closedM.Lock() + syncLogger.closed = true + syncLogger.closedM.Unlock() + } +} + +func (syncLogger *syncLogger) Flush() { + syncLogger.m.Lock() + defer syncLogger.m.Unlock() + + if !syncLogger.Closed() { + syncLogger.config.RootDispatcher.Flush() + } +} diff --git a/vendor/github.com/cihub/seelog/cfg_config.go b/vendor/github.com/cihub/seelog/cfg_config.go new file mode 100644 index 00000000..76554fca --- /dev/null +++ b/vendor/github.com/cihub/seelog/cfg_config.go @@ -0,0 +1,212 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "os" +) + +// LoggerFromConfigAsFile creates logger with config from file. File should contain valid seelog xml. +func LoggerFromConfigAsFile(fileName string) (LoggerInterface, error) { + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + conf, err := configFromReader(file) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromConfigAsBytes creates a logger with config from bytes stream. Bytes should contain valid seelog xml. +func LoggerFromConfigAsBytes(data []byte) (LoggerInterface, error) { + conf, err := configFromReader(bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromConfigAsString creates a logger with config from a string. String should contain valid seelog xml. +func LoggerFromConfigAsString(data string) (LoggerInterface, error) { + return LoggerFromConfigAsBytes([]byte(data)) +} + +// LoggerFromParamConfigAsFile does the same as LoggerFromConfigAsFile, but includes special parser options. +// See 'CfgParseParams' comments. +func LoggerFromParamConfigAsFile(fileName string, parserParams *CfgParseParams) (LoggerInterface, error) { + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + conf, err := configFromReaderWithConfig(file, parserParams) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromParamConfigAsBytes does the same as LoggerFromConfigAsBytes, but includes special parser options. +// See 'CfgParseParams' comments. +func LoggerFromParamConfigAsBytes(data []byte, parserParams *CfgParseParams) (LoggerInterface, error) { + conf, err := configFromReaderWithConfig(bytes.NewBuffer(data), parserParams) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromParamConfigAsString does the same as LoggerFromConfigAsString, but includes special parser options. +// See 'CfgParseParams' comments. +func LoggerFromParamConfigAsString(data string, parserParams *CfgParseParams) (LoggerInterface, error) { + return LoggerFromParamConfigAsBytes([]byte(data), parserParams) +} + +// LoggerFromWriterWithMinLevel is shortcut for LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) +func LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) { + return LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) +} + +// LoggerFromWriterWithMinLevelAndFormat creates a proxy logger that uses io.Writer as the +// receiver with minimal level = minLevel and with specified format. +// +// All messages with level more or equal to minLevel will be written to output and +// formatted using the default seelog format. +// +// Can be called for usage with non-Seelog systems +func LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) { + constraints, err := NewMinMaxConstraints(minLevel, CriticalLvl) + if err != nil { + return nil, err + } + formatter, err := NewFormatter(format) + if err != nil { + return nil, err + } + dispatcher, err := NewSplitDispatcher(formatter, []interface{}{output}) + if err != nil { + return nil, err + } + + conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromXMLDecoder creates logger with config from a XML decoder starting from a specific node. +// It should contain valid seelog xml, except for root node name. +func LoggerFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (LoggerInterface, error) { + conf, err := configFromXMLDecoder(xmlParser, rootNode) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromCustomReceiver creates a proxy logger that uses a CustomReceiver as the +// receiver. +// +// All messages will be sent to the specified custom receiver without additional +// formatting ('%Msg' format is used). +// +// Check CustomReceiver, RegisterReceiver for additional info. +// +// NOTE 1: CustomReceiver.AfterParse is only called when a receiver is instantiated +// by the config parser while parsing config. So, if you are not planning to use the +// same CustomReceiver for both proxying (via LoggerFromCustomReceiver call) and +// loading from config, just leave AfterParse implementation empty. +// +// NOTE 2: Unlike RegisterReceiver, LoggerFromCustomReceiver takes an already initialized +// instance that implements CustomReceiver. So, fill it with data and perform any initialization +// logic before calling this func and it won't be lost. +// +// So: +// * RegisterReceiver takes value just to get the reflect.Type from it and then +// instantiate it as many times as config is reloaded. +// +// * LoggerFromCustomReceiver takes value and uses it without modification and +// reinstantiation, directy passing it to the dispatcher tree. +func LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error) { + constraints, err := NewMinMaxConstraints(TraceLvl, CriticalLvl) + if err != nil { + return nil, err + } + + output, err := NewCustomReceiverDispatcherByValue(msgonlyformatter, receiver, "user-proxy", CustomReceiverInitArgs{}) + if err != nil { + return nil, err + } + dispatcher, err := NewSplitDispatcher(msgonlyformatter, []interface{}{output}) + if err != nil { + return nil, err + } + + conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +func CloneLogger(logger LoggerInterface) (LoggerInterface, error) { + switch logger := logger.(type) { + default: + return nil, fmt.Errorf("unexpected type %T", logger) + case *asyncAdaptiveLogger: + clone, err := NewAsyncAdaptiveLogger(logger.commonLogger.config, logger.minInterval, logger.maxInterval, logger.criticalMsgCount) + if err != nil { + return nil, err + } + return clone, nil + case *asyncLoopLogger: + return NewAsyncLoopLogger(logger.commonLogger.config), nil + case *asyncTimerLogger: + clone, err := NewAsyncTimerLogger(logger.commonLogger.config, logger.interval) + if err != nil { + return nil, err + } + return clone, nil + case *syncLogger: + return NewSyncLogger(logger.commonLogger.config), nil + } +} diff --git a/vendor/github.com/cihub/seelog/cfg_errors.go b/vendor/github.com/cihub/seelog/cfg_errors.go new file mode 100644 index 00000000..c1fb4d10 --- /dev/null +++ b/vendor/github.com/cihub/seelog/cfg_errors.go @@ -0,0 +1,61 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" +) + +var ( + errNodeMustHaveChildren = errors.New("node must have children") + errNodeCannotHaveChildren = errors.New("node cannot have children") +) + +type unexpectedChildElementError struct { + baseError +} + +func newUnexpectedChildElementError(msg string) *unexpectedChildElementError { + custmsg := "Unexpected child element: " + msg + return &unexpectedChildElementError{baseError{message: custmsg}} +} + +type missingArgumentError struct { + baseError +} + +func newMissingArgumentError(nodeName, attrName string) *missingArgumentError { + custmsg := "Output '" + nodeName + "' has no '" + attrName + "' attribute" + return &missingArgumentError{baseError{message: custmsg}} +} + +type unexpectedAttributeError struct { + baseError +} + +func newUnexpectedAttributeError(nodeName, attr string) *unexpectedAttributeError { + custmsg := nodeName + " has unexpected attribute: " + attr + return &unexpectedAttributeError{baseError{message: custmsg}} +} diff --git a/vendor/github.com/cihub/seelog/cfg_logconfig.go b/vendor/github.com/cihub/seelog/cfg_logconfig.go new file mode 100644 index 00000000..6ba6f9a9 --- /dev/null +++ b/vendor/github.com/cihub/seelog/cfg_logconfig.go @@ -0,0 +1,141 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" +) + +type loggerTypeFromString uint8 + +const ( + syncloggerTypeFromString = iota + asyncLooploggerTypeFromString + asyncTimerloggerTypeFromString + adaptiveLoggerTypeFromString + defaultloggerTypeFromString = asyncLooploggerTypeFromString +) + +const ( + syncloggerTypeFromStringStr = "sync" + asyncloggerTypeFromStringStr = "asyncloop" + asyncTimerloggerTypeFromStringStr = "asynctimer" + adaptiveLoggerTypeFromStringStr = "adaptive" +) + +// asyncTimerLoggerData represents specific data for async timer logger +type asyncTimerLoggerData struct { + AsyncInterval uint32 +} + +// adaptiveLoggerData represents specific data for adaptive timer logger +type adaptiveLoggerData struct { + MinInterval uint32 + MaxInterval uint32 + CriticalMsgCount uint32 +} + +var loggerTypeToStringRepresentations = map[loggerTypeFromString]string{ + syncloggerTypeFromString: syncloggerTypeFromStringStr, + asyncLooploggerTypeFromString: asyncloggerTypeFromStringStr, + asyncTimerloggerTypeFromString: asyncTimerloggerTypeFromStringStr, + adaptiveLoggerTypeFromString: adaptiveLoggerTypeFromStringStr, +} + +// getLoggerTypeFromString parses a string and returns a corresponding logger type, if successful. +func getLoggerTypeFromString(logTypeString string) (level loggerTypeFromString, found bool) { + for logType, logTypeStr := range loggerTypeToStringRepresentations { + if logTypeStr == logTypeString { + return logType, true + } + } + + return 0, false +} + +// logConfig stores logging configuration. Contains messages dispatcher, allowed log level rules +// (general constraints and exceptions) +type logConfig struct { + Constraints logLevelConstraints // General log level rules (>min and ' element. It takes the 'name' attribute + // of the element and tries to find a match in two places: + // 1) CfgParseParams.CustomReceiverProducers map + // 2) Global type map, filled by RegisterReceiver + // + // If a match is found in the CustomReceiverProducers map, parser calls the corresponding producer func + // passing the init args to it. The func takes exactly the same args as CustomReceiver.AfterParse. + // The producer func must return a correct receiver or an error. If case of error, seelog will behave + // in the same way as with any other config error. + // + // You may use this param to set custom producers in case you need to pass some context when instantiating + // a custom receiver or if you frequently change custom receivers with different parameters or in any other + // situation where package-level registering (RegisterReceiver) is not an option for you. + CustomReceiverProducers map[string]CustomReceiverProducer +} + +func (cfg *CfgParseParams) String() string { + return fmt.Sprintf("CfgParams: {custom_recs=%d}", len(cfg.CustomReceiverProducers)) +} + +type elementMapEntry struct { + constructor func(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) +} + +var elementMap map[string]elementMapEntry +var predefinedFormats map[string]*formatter + +func init() { + elementMap = map[string]elementMapEntry{ + fileWriterID: {createfileWriter}, + splitterDispatcherID: {createSplitter}, + customReceiverID: {createCustomReceiver}, + filterDispatcherID: {createFilter}, + consoleWriterID: {createConsoleWriter}, + rollingfileWriterID: {createRollingFileWriter}, + bufferedWriterID: {createbufferedWriter}, + smtpWriterID: {createSMTPWriter}, + connWriterID: {createconnWriter}, + } + + err := fillPredefinedFormats() + if err != nil { + panic(fmt.Sprintf("Seelog couldn't start: predefined formats creation failed. Error: %s", err.Error())) + } +} + +func fillPredefinedFormats() error { + predefinedFormatsWithoutPrefix := map[string]string{ + "xml-debug": `%Lev%Msg%RelFile%Func%Line`, + "xml-debug-short": `%Ns%l%Msg

%RelFile

%Func`, + "xml": `%Lev%Msg`, + "xml-short": `%Ns%l%Msg`, + + "json-debug": `{"time":%Ns,"lev":"%Lev","msg":"%Msg","path":"%RelFile","func":"%Func","line":"%Line"}`, + "json-debug-short": `{"t":%Ns,"l":"%Lev","m":"%Msg","p":"%RelFile","f":"%Func"}`, + "json": `{"time":%Ns,"lev":"%Lev","msg":"%Msg"}`, + "json-short": `{"t":%Ns,"l":"%Lev","m":"%Msg"}`, + + "debug": `[%LEVEL] %RelFile:%Func.%Line %Date %Time %Msg%n`, + "debug-short": `[%LEVEL] %Date %Time %Msg%n`, + "fast": `%Ns %l %Msg%n`, + } + + predefinedFormats = make(map[string]*formatter) + + for formatKey, format := range predefinedFormatsWithoutPrefix { + formatter, err := NewFormatter(format) + if err != nil { + return err + } + + predefinedFormats[predefinedPrefix+formatKey] = formatter + } + + return nil +} + +// configFromXMLDecoder parses data from a given XML decoder. +// Returns parsed config which can be used to create logger in case no errors occured. +// Returns error if format is incorrect or anything happened. +func configFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (*configForParsing, error) { + return configFromXMLDecoderWithConfig(xmlParser, rootNode, nil) +} + +// configFromXMLDecoderWithConfig parses data from a given XML decoder. +// Returns parsed config which can be used to create logger in case no errors occured. +// Returns error if format is incorrect or anything happened. +func configFromXMLDecoderWithConfig(xmlParser *xml.Decoder, rootNode xml.Token, cfg *CfgParseParams) (*configForParsing, error) { + _, ok := rootNode.(xml.StartElement) + if !ok { + return nil, errors.New("rootNode must be XML startElement") + } + + config, err := unmarshalNode(xmlParser, rootNode) + if err != nil { + return nil, err + } + if config == nil { + return nil, errors.New("xml has no content") + } + + return configFromXMLNodeWithConfig(config, cfg) +} + +// configFromReader parses data from a given reader. +// Returns parsed config which can be used to create logger in case no errors occured. +// Returns error if format is incorrect or anything happened. +func configFromReader(reader io.Reader) (*configForParsing, error) { + return configFromReaderWithConfig(reader, nil) +} + +// configFromReaderWithConfig parses data from a given reader. +// Returns parsed config which can be used to create logger in case no errors occured. +// Returns error if format is incorrect or anything happened. +func configFromReaderWithConfig(reader io.Reader, cfg *CfgParseParams) (*configForParsing, error) { + config, err := unmarshalConfig(reader) + if err != nil { + return nil, err + } + + if config.name != seelogConfigID { + return nil, errors.New("root xml tag must be '" + seelogConfigID + "'") + } + + return configFromXMLNodeWithConfig(config, cfg) +} + +func configFromXMLNodeWithConfig(config *xmlNode, cfg *CfgParseParams) (*configForParsing, error) { + err := checkUnexpectedAttribute( + config, + minLevelID, + maxLevelID, + levelsID, + loggerTypeFromStringAttr, + asyncLoggerIntervalAttr, + adaptLoggerMinIntervalAttr, + adaptLoggerMaxIntervalAttr, + adaptLoggerCriticalMsgCountAttr, + ) + if err != nil { + return nil, err + } + + err = checkExpectedElements(config, optionalElement(outputsID), optionalElement(formatsID), optionalElement(exceptionsID)) + if err != nil { + return nil, err + } + + constraints, err := getConstraints(config) + if err != nil { + return nil, err + } + + exceptions, err := getExceptions(config) + if err != nil { + return nil, err + } + err = checkDistinctExceptions(exceptions) + if err != nil { + return nil, err + } + + formats, err := getFormats(config) + if err != nil { + return nil, err + } + + dispatcher, err := getOutputsTree(config, formats, cfg) + if err != nil { + // If we open several files, but then fail to parse the config, we should close + // those files before reporting that config is invalid. + if dispatcher != nil { + dispatcher.Close() + } + + return nil, err + } + + loggerType, logData, err := getloggerTypeFromStringData(config) + if err != nil { + return nil, err + } + + return newFullLoggerConfig(constraints, exceptions, dispatcher, loggerType, logData, cfg) +} + +func getConstraints(node *xmlNode) (logLevelConstraints, error) { + minLevelStr, isMinLevel := node.attributes[minLevelID] + maxLevelStr, isMaxLevel := node.attributes[maxLevelID] + levelsStr, isLevels := node.attributes[levelsID] + + if isLevels && (isMinLevel && isMaxLevel) { + return nil, errors.New("for level declaration use '" + levelsID + "'' OR '" + minLevelID + + "', '" + maxLevelID + "'") + } + + offString := LogLevel(Off).String() + + if (isLevels && strings.TrimSpace(levelsStr) == offString) || + (isMinLevel && !isMaxLevel && minLevelStr == offString) { + + return NewOffConstraints() + } + + if isLevels { + levels, err := parseLevels(levelsStr) + if err != nil { + return nil, err + } + return NewListConstraints(levels) + } + + var minLevel = LogLevel(TraceLvl) + if isMinLevel { + found := true + minLevel, found = LogLevelFromString(minLevelStr) + if !found { + return nil, errors.New("declared " + minLevelID + " not found: " + minLevelStr) + } + } + + var maxLevel = LogLevel(CriticalLvl) + if isMaxLevel { + found := true + maxLevel, found = LogLevelFromString(maxLevelStr) + if !found { + return nil, errors.New("declared " + maxLevelID + " not found: " + maxLevelStr) + } + } + + return NewMinMaxConstraints(minLevel, maxLevel) +} + +func parseLevels(str string) ([]LogLevel, error) { + levelsStrArr := strings.Split(strings.Replace(str, " ", "", -1), ",") + var levels []LogLevel + for _, levelStr := range levelsStrArr { + level, found := LogLevelFromString(levelStr) + if !found { + return nil, errors.New("declared level not found: " + levelStr) + } + + levels = append(levels, level) + } + + return levels, nil +} + +func getExceptions(config *xmlNode) ([]*LogLevelException, error) { + var exceptions []*LogLevelException + + var exceptionsNode *xmlNode + for _, child := range config.children { + if child.name == exceptionsID { + exceptionsNode = child + break + } + } + + if exceptionsNode == nil { + return exceptions, nil + } + + err := checkUnexpectedAttribute(exceptionsNode) + if err != nil { + return nil, err + } + + err = checkExpectedElements(exceptionsNode, multipleMandatoryElements("exception")) + if err != nil { + return nil, err + } + + for _, exceptionNode := range exceptionsNode.children { + if exceptionNode.name != exceptionID { + return nil, errors.New("incorrect nested element in exceptions section: " + exceptionNode.name) + } + + err := checkUnexpectedAttribute(exceptionNode, minLevelID, maxLevelID, levelsID, funcPatternID, filePatternID) + if err != nil { + return nil, err + } + + constraints, err := getConstraints(exceptionNode) + if err != nil { + return nil, errors.New("incorrect " + exceptionsID + " node: " + err.Error()) + } + + funcPattern, isFuncPattern := exceptionNode.attributes[funcPatternID] + filePattern, isFilePattern := exceptionNode.attributes[filePatternID] + if !isFuncPattern { + funcPattern = "*" + } + if !isFilePattern { + filePattern = "*" + } + + exception, err := NewLogLevelException(funcPattern, filePattern, constraints) + if err != nil { + return nil, errors.New("incorrect exception node: " + err.Error()) + } + + exceptions = append(exceptions, exception) + } + + return exceptions, nil +} + +func checkDistinctExceptions(exceptions []*LogLevelException) error { + for i, exception := range exceptions { + for j, exception1 := range exceptions { + if i == j { + continue + } + + if exception.FuncPattern() == exception1.FuncPattern() && + exception.FilePattern() == exception1.FilePattern() { + + return fmt.Errorf("there are two or more duplicate exceptions. Func: %v, file %v", + exception.FuncPattern(), exception.FilePattern()) + } + } + } + + return nil +} + +func getFormats(config *xmlNode) (map[string]*formatter, error) { + formats := make(map[string]*formatter, 0) + + var formatsNode *xmlNode + for _, child := range config.children { + if child.name == formatsID { + formatsNode = child + break + } + } + + if formatsNode == nil { + return formats, nil + } + + err := checkUnexpectedAttribute(formatsNode) + if err != nil { + return nil, err + } + + err = checkExpectedElements(formatsNode, multipleMandatoryElements("format")) + if err != nil { + return nil, err + } + + for _, formatNode := range formatsNode.children { + if formatNode.name != formatID { + return nil, errors.New("incorrect nested element in " + formatsID + " section: " + formatNode.name) + } + + err := checkUnexpectedAttribute(formatNode, formatKeyAttrID, formatID) + if err != nil { + return nil, err + } + + id, isID := formatNode.attributes[formatKeyAttrID] + formatStr, isFormat := formatNode.attributes[formatAttrID] + if !isID { + return nil, errors.New("format has no '" + formatKeyAttrID + "' attribute") + } + if !isFormat { + return nil, errors.New("format[" + id + "] has no '" + formatAttrID + "' attribute") + } + + formatter, err := NewFormatter(formatStr) + if err != nil { + return nil, err + } + + formats[id] = formatter + } + + return formats, nil +} + +func getloggerTypeFromStringData(config *xmlNode) (logType loggerTypeFromString, logData interface{}, err error) { + logTypeStr, loggerTypeExists := config.attributes[loggerTypeFromStringAttr] + + if !loggerTypeExists { + return defaultloggerTypeFromString, nil, nil + } + + logType, found := getLoggerTypeFromString(logTypeStr) + + if !found { + return 0, nil, fmt.Errorf("unknown logger type: %s", logTypeStr) + } + + if logType == asyncTimerloggerTypeFromString { + intervalStr, intervalExists := config.attributes[asyncLoggerIntervalAttr] + if !intervalExists { + return 0, nil, newMissingArgumentError(config.name, asyncLoggerIntervalAttr) + } + + interval, err := strconv.ParseUint(intervalStr, 10, 32) + if err != nil { + return 0, nil, err + } + + logData = asyncTimerLoggerData{uint32(interval)} + } else if logType == adaptiveLoggerTypeFromString { + + // Min interval + minIntStr, minIntExists := config.attributes[adaptLoggerMinIntervalAttr] + if !minIntExists { + return 0, nil, newMissingArgumentError(config.name, adaptLoggerMinIntervalAttr) + } + minInterval, err := strconv.ParseUint(minIntStr, 10, 32) + if err != nil { + return 0, nil, err + } + + // Max interval + maxIntStr, maxIntExists := config.attributes[adaptLoggerMaxIntervalAttr] + if !maxIntExists { + return 0, nil, newMissingArgumentError(config.name, adaptLoggerMaxIntervalAttr) + } + maxInterval, err := strconv.ParseUint(maxIntStr, 10, 32) + if err != nil { + return 0, nil, err + } + + // Critical msg count + criticalMsgCountStr, criticalMsgCountExists := config.attributes[adaptLoggerCriticalMsgCountAttr] + if !criticalMsgCountExists { + return 0, nil, newMissingArgumentError(config.name, adaptLoggerCriticalMsgCountAttr) + } + criticalMsgCount, err := strconv.ParseUint(criticalMsgCountStr, 10, 32) + if err != nil { + return 0, nil, err + } + + logData = adaptiveLoggerData{uint32(minInterval), uint32(maxInterval), uint32(criticalMsgCount)} + } + + return logType, logData, nil +} + +func getOutputsTree(config *xmlNode, formats map[string]*formatter, cfg *CfgParseParams) (dispatcherInterface, error) { + var outputsNode *xmlNode + for _, child := range config.children { + if child.name == outputsID { + outputsNode = child + break + } + } + + if outputsNode != nil { + err := checkUnexpectedAttribute(outputsNode, outputFormatID) + if err != nil { + return nil, err + } + + formatter, err := getCurrentFormat(outputsNode, DefaultFormatter, formats) + if err != nil { + return nil, err + } + + output, err := createSplitter(outputsNode, formatter, formats, cfg) + if err != nil { + return nil, err + } + + dispatcher, ok := output.(dispatcherInterface) + if ok { + return dispatcher, nil + } + } + + console, err := NewConsoleWriter() + if err != nil { + return nil, err + } + return NewSplitDispatcher(DefaultFormatter, []interface{}{console}) +} + +func getCurrentFormat(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter) (*formatter, error) { + formatID, isFormatID := node.attributes[outputFormatID] + if !isFormatID { + return formatFromParent, nil + } + + format, ok := formats[formatID] + if ok { + return format, nil + } + + // Test for predefined format match + pdFormat, pdOk := predefinedFormats[formatID] + + if !pdOk { + return nil, errors.New("formatid = '" + formatID + "' doesn't exist") + } + + return pdFormat, nil +} + +func createInnerReceivers(node *xmlNode, format *formatter, formats map[string]*formatter, cfg *CfgParseParams) ([]interface{}, error) { + var outputs []interface{} + for _, childNode := range node.children { + entry, ok := elementMap[childNode.name] + if !ok { + return nil, errors.New("unnknown tag '" + childNode.name + "' in outputs section") + } + + output, err := entry.constructor(childNode, format, formats, cfg) + if err != nil { + return nil, err + } + + outputs = append(outputs, output) + } + + return outputs, nil +} + +func createSplitter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID) + if err != nil { + return nil, err + } + + if !node.hasChildren() { + return nil, errNodeMustHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) + if err != nil { + return nil, err + } + + return NewSplitDispatcher(currentFormat, receivers) +} + +func createCustomReceiver(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + dataCustomPrefixes := make(map[string]string) + // Expecting only 'formatid', 'name' and 'data-' attrs + for attr, attrval := range node.attributes { + isExpected := false + if attr == outputFormatID || + attr == customNameAttrID { + isExpected = true + } + if strings.HasPrefix(attr, customNameDataAttrPrefix) { + dataCustomPrefixes[attr[len(customNameDataAttrPrefix):]] = attrval + isExpected = true + } + if !isExpected { + return nil, newUnexpectedAttributeError(node.name, attr) + } + } + + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + customName, hasCustomName := node.attributes[customNameAttrID] + if !hasCustomName { + return nil, newMissingArgumentError(node.name, customNameAttrID) + } + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + args := CustomReceiverInitArgs{ + XmlCustomAttrs: dataCustomPrefixes, + } + + if cfg != nil && cfg.CustomReceiverProducers != nil { + if prod, ok := cfg.CustomReceiverProducers[customName]; ok { + rec, err := prod(args) + if err != nil { + return nil, err + } + creceiver, err := NewCustomReceiverDispatcherByValue(currentFormat, rec, customName, args) + if err != nil { + return nil, err + } + err = rec.AfterParse(args) + if err != nil { + return nil, err + } + return creceiver, nil + } + } + + return NewCustomReceiverDispatcher(currentFormat, customName, args) +} + +func createFilter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID, filterLevelsAttrID) + if err != nil { + return nil, err + } + + if !node.hasChildren() { + return nil, errNodeMustHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + levelsStr, isLevels := node.attributes[filterLevelsAttrID] + if !isLevels { + return nil, newMissingArgumentError(node.name, filterLevelsAttrID) + } + + levels, err := parseLevels(levelsStr) + if err != nil { + return nil, err + } + + receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) + if err != nil { + return nil, err + } + + return NewFilterDispatcher(currentFormat, receivers, levels...) +} + +func createfileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID, pathID) + if err != nil { + return nil, err + } + + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + path, isPath := node.attributes[pathID] + if !isPath { + return nil, newMissingArgumentError(node.name, pathID) + } + + fileWriter, err := NewFileWriter(path) + if err != nil { + return nil, err + } + + return NewFormattedWriter(fileWriter, currentFormat) +} + +// Creates new SMTP writer if encountered in the config file. +func createSMTPWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID, senderaddressID, senderNameID, hostNameID, hostPortID, userNameID, userPassID, subjectID) + if err != nil { + return nil, err + } + // Node must have children. + if !node.hasChildren() { + return nil, errNodeMustHaveChildren + } + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + senderAddress, ok := node.attributes[senderaddressID] + if !ok { + return nil, newMissingArgumentError(node.name, senderaddressID) + } + senderName, ok := node.attributes[senderNameID] + if !ok { + return nil, newMissingArgumentError(node.name, senderNameID) + } + // Process child nodes scanning for recipient email addresses and/or CA certificate paths. + var recipientAddresses []string + var caCertDirPaths []string + var mailHeaders []string + for _, childNode := range node.children { + switch childNode.name { + // Extract recipient address from child nodes. + case recipientID: + address, ok := childNode.attributes[addressID] + if !ok { + return nil, newMissingArgumentError(childNode.name, addressID) + } + recipientAddresses = append(recipientAddresses, address) + // Extract CA certificate file path from child nodes. + case cACertDirpathID: + path, ok := childNode.attributes[pathID] + if !ok { + return nil, newMissingArgumentError(childNode.name, pathID) + } + caCertDirPaths = append(caCertDirPaths, path) + + // Extract email headers from child nodes. + case mailHeaderID: + headerName, ok := childNode.attributes[mailHeaderNameID] + if !ok { + return nil, newMissingArgumentError(childNode.name, mailHeaderNameID) + } + + headerValue, ok := childNode.attributes[mailHeaderValueID] + if !ok { + return nil, newMissingArgumentError(childNode.name, mailHeaderValueID) + } + + // Build header line + mailHeaders = append(mailHeaders, fmt.Sprintf("%s: %s", headerName, headerValue)) + default: + return nil, newUnexpectedChildElementError(childNode.name) + } + } + hostName, ok := node.attributes[hostNameID] + if !ok { + return nil, newMissingArgumentError(node.name, hostNameID) + } + + hostPort, ok := node.attributes[hostPortID] + if !ok { + return nil, newMissingArgumentError(node.name, hostPortID) + } + + // Check if the string can really be converted into int. + if _, err := strconv.Atoi(hostPort); err != nil { + return nil, errors.New("invalid host port number") + } + + userName, ok := node.attributes[userNameID] + if !ok { + return nil, newMissingArgumentError(node.name, userNameID) + } + + userPass, ok := node.attributes[userPassID] + if !ok { + return nil, newMissingArgumentError(node.name, userPassID) + } + + // subject is optionally set by configuration. + // default value is defined by DefaultSubjectPhrase constant in the writers_smtpwriter.go + var subjectPhrase = DefaultSubjectPhrase + + subject, ok := node.attributes[subjectID] + if ok { + subjectPhrase = subject + } + + smtpWriter := NewSMTPWriter( + senderAddress, + senderName, + recipientAddresses, + hostName, + hostPort, + userName, + userPass, + caCertDirPaths, + subjectPhrase, + mailHeaders, + ) + + return NewFormattedWriter(smtpWriter, currentFormat) +} + +func createConsoleWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID) + if err != nil { + return nil, err + } + + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + consoleWriter, err := NewConsoleWriter() + if err != nil { + return nil, err + } + + return NewFormattedWriter(consoleWriter, currentFormat) +} + +func createconnWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + + err := checkUnexpectedAttribute(node, outputFormatID, connWriterAddrAttr, connWriterNetAttr, connWriterReconnectOnMsgAttr, connWriterUseTLSAttr, connWriterInsecureSkipVerifyAttr) + if err != nil { + return nil, err + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + addr, isAddr := node.attributes[connWriterAddrAttr] + if !isAddr { + return nil, newMissingArgumentError(node.name, connWriterAddrAttr) + } + + net, isNet := node.attributes[connWriterNetAttr] + if !isNet { + return nil, newMissingArgumentError(node.name, connWriterNetAttr) + } + + reconnectOnMsg := false + reconnectOnMsgStr, isReconnectOnMsgStr := node.attributes[connWriterReconnectOnMsgAttr] + if isReconnectOnMsgStr { + if reconnectOnMsgStr == "true" { + reconnectOnMsg = true + } else if reconnectOnMsgStr == "false" { + reconnectOnMsg = false + } else { + return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterReconnectOnMsgAttr + "' attribute value") + } + } + + useTLS := false + useTLSStr, isUseTLSStr := node.attributes[connWriterUseTLSAttr] + if isUseTLSStr { + if useTLSStr == "true" { + useTLS = true + } else if useTLSStr == "false" { + useTLS = false + } else { + return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterUseTLSAttr + "' attribute value") + } + if useTLS { + insecureSkipVerify := false + insecureSkipVerifyStr, isInsecureSkipVerify := node.attributes[connWriterInsecureSkipVerifyAttr] + if isInsecureSkipVerify { + if insecureSkipVerifyStr == "true" { + insecureSkipVerify = true + } else if insecureSkipVerifyStr == "false" { + insecureSkipVerify = false + } else { + return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterInsecureSkipVerifyAttr + "' attribute value") + } + } + config := tls.Config{InsecureSkipVerify: insecureSkipVerify} + connWriter := newTLSWriter(net, addr, reconnectOnMsg, &config) + return NewFormattedWriter(connWriter, currentFormat) + } + } + + connWriter := NewConnWriter(net, addr, reconnectOnMsg) + + return NewFormattedWriter(connWriter, currentFormat) +} + +func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + + rollingTypeStr, isRollingType := node.attributes[rollingFileTypeAttr] + if !isRollingType { + return nil, newMissingArgumentError(node.name, rollingFileTypeAttr) + } + + rollingType, ok := rollingTypeFromString(rollingTypeStr) + if !ok { + return nil, errors.New("unknown rolling file type: " + rollingTypeStr) + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + path, isPath := node.attributes[rollingFilePathAttr] + if !isPath { + return nil, newMissingArgumentError(node.name, rollingFilePathAttr) + } + + rollingArchiveStr, archiveAttrExists := node.attributes[rollingFileArchiveAttr] + + var rArchiveType rollingArchiveType + var rArchivePath string + var rArchiveExploded bool = false + if !archiveAttrExists { + rArchiveType = rollingArchiveNone + rArchivePath = "" + } else { + rArchiveType, ok = rollingArchiveTypeFromString(rollingArchiveStr) + if !ok { + return nil, errors.New("unknown rolling archive type: " + rollingArchiveStr) + } + + if rArchiveType == rollingArchiveNone { + rArchivePath = "" + } else { + if rArchiveExplodedAttr, ok := node.attributes[rollingFileArchiveExplodedAttr]; ok { + if rArchiveExploded, err = strconv.ParseBool(rArchiveExplodedAttr); err != nil { + return nil, fmt.Errorf("archive exploded should be true or false, but was %v", + rArchiveExploded) + } + } + + rArchivePath, ok = node.attributes[rollingFileArchivePathAttr] + if ok { + if rArchivePath == "" { + return nil, fmt.Errorf("empty archive path is not supported") + } + } else { + if rArchiveExploded { + rArchivePath = rollingArchiveDefaultExplodedName + + } else { + rArchivePath, err = rollingArchiveTypeDefaultName(rArchiveType, false) + if err != nil { + return nil, err + } + } + } + } + } + + nameMode := rollingNameMode(rollingNameModePostfix) + nameModeStr, ok := node.attributes[rollingFileNameModeAttr] + if ok { + mode, found := rollingNameModeFromString(nameModeStr) + if !found { + return nil, errors.New("unknown rolling filename mode: " + nameModeStr) + } else { + nameMode = mode + } + } + + if rollingType == rollingTypeSize { + err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, + rollingFileMaxSizeAttr, rollingFileMaxRollsAttr, rollingFileArchiveAttr, + rollingFileArchivePathAttr, rollingFileArchiveExplodedAttr, rollingFileNameModeAttr) + if err != nil { + return nil, err + } + + maxSizeStr, ok := node.attributes[rollingFileMaxSizeAttr] + if !ok { + return nil, newMissingArgumentError(node.name, rollingFileMaxSizeAttr) + } + + maxSize, err := strconv.ParseInt(maxSizeStr, 10, 64) + if err != nil { + return nil, err + } + + maxRolls := 0 + maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] + if ok { + maxRolls, err = strconv.Atoi(maxRollsStr) + if err != nil { + return nil, err + } + } + + rollingWriter, err := NewRollingFileWriterSize(path, rArchiveType, rArchivePath, maxSize, maxRolls, nameMode, rArchiveExploded) + if err != nil { + return nil, err + } + + return NewFormattedWriter(rollingWriter, currentFormat) + + } else if rollingType == rollingTypeTime { + err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, + rollingFileDataPatternAttr, rollingFileArchiveAttr, rollingFileMaxRollsAttr, + rollingFileArchivePathAttr, rollingFileArchiveExplodedAttr, rollingFileNameModeAttr, + rollingFileFullNameAttr) + if err != nil { + return nil, err + } + + maxRolls := 0 + maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] + if ok { + maxRolls, err = strconv.Atoi(maxRollsStr) + if err != nil { + return nil, err + } + } + + fullName := false + fn, ok := node.attributes[rollingFileFullNameAttr] + if ok { + if fn == "true" { + fullName = true + } else if fn == "false" { + fullName = false + } else { + return nil, errors.New("node '" + node.name + "' has incorrect '" + rollingFileFullNameAttr + "' attribute value") + } + } + + dataPattern, ok := node.attributes[rollingFileDataPatternAttr] + if !ok { + return nil, newMissingArgumentError(node.name, rollingFileDataPatternAttr) + } + + rollingWriter, err := NewRollingFileWriterTime(path, rArchiveType, rArchivePath, maxRolls, dataPattern, nameMode, rArchiveExploded, fullName) + if err != nil { + return nil, err + } + + return NewFormattedWriter(rollingWriter, currentFormat) + } + + return nil, errors.New("incorrect rolling writer type " + rollingTypeStr) +} + +func createbufferedWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID, bufferedSizeAttr, bufferedFlushPeriodAttr) + if err != nil { + return nil, err + } + + if !node.hasChildren() { + return nil, errNodeMustHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + sizeStr, isSize := node.attributes[bufferedSizeAttr] + if !isSize { + return nil, newMissingArgumentError(node.name, bufferedSizeAttr) + } + + size, err := strconv.Atoi(sizeStr) + if err != nil { + return nil, err + } + + flushPeriod := 0 + flushPeriodStr, isFlushPeriod := node.attributes[bufferedFlushPeriodAttr] + if isFlushPeriod { + flushPeriod, err = strconv.Atoi(flushPeriodStr) + if err != nil { + return nil, err + } + } + + // Inner writer couldn't have its own format, so we pass 'currentFormat' as its parent format + receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) + if err != nil { + return nil, err + } + + formattedWriter, ok := receivers[0].(*formattedWriter) + if !ok { + return nil, errors.New("buffered writer's child is not writer") + } + + // ... and then we check that it hasn't changed + if formattedWriter.Format() != currentFormat { + return nil, errors.New("inner writer cannot have his own format") + } + + bufferedWriter, err := NewBufferedWriter(formattedWriter.Writer(), size, time.Duration(flushPeriod)) + if err != nil { + return nil, err + } + + return NewFormattedWriter(bufferedWriter, currentFormat) +} + +// Returns an error if node has any attributes not listed in expectedAttrs. +func checkUnexpectedAttribute(node *xmlNode, expectedAttrs ...string) error { + for attr := range node.attributes { + isExpected := false + for _, expected := range expectedAttrs { + if attr == expected { + isExpected = true + break + } + } + if !isExpected { + return newUnexpectedAttributeError(node.name, attr) + } + } + + return nil +} + +type expectedElementInfo struct { + name string + mandatory bool + multiple bool +} + +func optionalElement(name string) expectedElementInfo { + return expectedElementInfo{name, false, false} +} +func mandatoryElement(name string) expectedElementInfo { + return expectedElementInfo{name, true, false} +} +func multipleElements(name string) expectedElementInfo { + return expectedElementInfo{name, false, true} +} +func multipleMandatoryElements(name string) expectedElementInfo { + return expectedElementInfo{name, true, true} +} + +func checkExpectedElements(node *xmlNode, elements ...expectedElementInfo) error { + for _, element := range elements { + count := 0 + for _, child := range node.children { + if child.name == element.name { + count++ + } + } + + if count == 0 && element.mandatory { + return errors.New(node.name + " does not have mandatory subnode - " + element.name) + } + if count > 1 && !element.multiple { + return errors.New(node.name + " has more then one subnode - " + element.name) + } + } + + for _, child := range node.children { + isExpected := false + for _, element := range elements { + if child.name == element.name { + isExpected = true + } + } + + if !isExpected { + return errors.New(node.name + " has unexpected child: " + child.name) + } + } + + return nil +} diff --git a/vendor/github.com/cihub/seelog/common_closer.go b/vendor/github.com/cihub/seelog/common_closer.go new file mode 100644 index 00000000..1319c221 --- /dev/null +++ b/vendor/github.com/cihub/seelog/common_closer.go @@ -0,0 +1,25 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog diff --git a/vendor/github.com/cihub/seelog/common_constraints.go b/vendor/github.com/cihub/seelog/common_constraints.go new file mode 100644 index 00000000..7ec2fe5b --- /dev/null +++ b/vendor/github.com/cihub/seelog/common_constraints.go @@ -0,0 +1,162 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "strings" +) + +// Represents constraints which form a general rule for log levels selection +type logLevelConstraints interface { + IsAllowed(level LogLevel) bool +} + +// A minMaxConstraints represents constraints which use minimal and maximal allowed log levels. +type minMaxConstraints struct { + min LogLevel + max LogLevel +} + +// NewMinMaxConstraints creates a new minMaxConstraints struct with the specified min and max levels. +func NewMinMaxConstraints(min LogLevel, max LogLevel) (*minMaxConstraints, error) { + if min > max { + return nil, fmt.Errorf("min level can't be greater than max. Got min: %d, max: %d", min, max) + } + if min < TraceLvl || min > CriticalLvl { + return nil, fmt.Errorf("min level can't be less than Trace or greater than Critical. Got min: %d", min) + } + if max < TraceLvl || max > CriticalLvl { + return nil, fmt.Errorf("max level can't be less than Trace or greater than Critical. Got max: %d", max) + } + + return &minMaxConstraints{min, max}, nil +} + +// IsAllowed returns true, if log level is in [min, max] range (inclusive). +func (minMaxConstr *minMaxConstraints) IsAllowed(level LogLevel) bool { + return level >= minMaxConstr.min && level <= minMaxConstr.max +} + +func (minMaxConstr *minMaxConstraints) String() string { + return fmt.Sprintf("Min: %s. Max: %s", minMaxConstr.min, minMaxConstr.max) +} + +//======================================================= + +// A listConstraints represents constraints which use allowed log levels list. +type listConstraints struct { + allowedLevels map[LogLevel]bool +} + +// NewListConstraints creates a new listConstraints struct with the specified allowed levels. +func NewListConstraints(allowList []LogLevel) (*listConstraints, error) { + if allowList == nil { + return nil, errors.New("list can't be nil") + } + + allowLevels, err := createMapFromList(allowList) + if err != nil { + return nil, err + } + err = validateOffLevel(allowLevels) + if err != nil { + return nil, err + } + + return &listConstraints{allowLevels}, nil +} + +func (listConstr *listConstraints) String() string { + allowedList := "List: " + + listLevel := make([]string, len(listConstr.allowedLevels)) + + var logLevel LogLevel + i := 0 + for logLevel = TraceLvl; logLevel <= Off; logLevel++ { + if listConstr.allowedLevels[logLevel] { + listLevel[i] = logLevel.String() + i++ + } + } + + allowedList += strings.Join(listLevel, ",") + + return allowedList +} + +func createMapFromList(allowedList []LogLevel) (map[LogLevel]bool, error) { + allowedLevels := make(map[LogLevel]bool, 0) + for _, level := range allowedList { + if level < TraceLvl || level > Off { + return nil, fmt.Errorf("level can't be less than Trace or greater than Critical. Got level: %d", level) + } + allowedLevels[level] = true + } + return allowedLevels, nil +} +func validateOffLevel(allowedLevels map[LogLevel]bool) error { + if _, ok := allowedLevels[Off]; ok && len(allowedLevels) > 1 { + return errors.New("logLevel Off cant be mixed with other levels") + } + + return nil +} + +// IsAllowed returns true, if log level is in allowed log levels list. +// If the list contains the only item 'common.Off' then IsAllowed will always return false for any input values. +func (listConstr *listConstraints) IsAllowed(level LogLevel) bool { + for l := range listConstr.allowedLevels { + if l == level && level != Off { + return true + } + } + + return false +} + +// AllowedLevels returns allowed levels configuration as a map. +func (listConstr *listConstraints) AllowedLevels() map[LogLevel]bool { + return listConstr.allowedLevels +} + +//======================================================= + +type offConstraints struct { +} + +func NewOffConstraints() (*offConstraints, error) { + return &offConstraints{}, nil +} + +func (offConstr *offConstraints) IsAllowed(level LogLevel) bool { + return false +} + +func (offConstr *offConstraints) String() string { + return "Off constraint" +} diff --git a/vendor/github.com/cihub/seelog/common_context.go b/vendor/github.com/cihub/seelog/common_context.go new file mode 100644 index 00000000..230a76ca --- /dev/null +++ b/vendor/github.com/cihub/seelog/common_context.go @@ -0,0 +1,234 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" +) + +var ( + workingDir = "/" + stackCache map[uintptr]*logContext + stackCacheLock sync.RWMutex +) + +func init() { + wd, err := os.Getwd() + if err == nil { + workingDir = filepath.ToSlash(wd) + "/" + } + stackCache = make(map[uintptr]*logContext) +} + +// Represents runtime caller context. +type LogContextInterface interface { + // Caller's function name. + Func() string + // Caller's line number. + Line() int + // Caller's file short path (in slashed form). + ShortPath() string + // Caller's file full path (in slashed form). + FullPath() string + // Caller's file name (without path). + FileName() string + // True if the context is correct and may be used. + // If false, then an error in context evaluation occurred and + // all its other data may be corrupted. + IsValid() bool + // Time when log function was called. + CallTime() time.Time + // Custom context that can be set by calling logger.SetContext + CustomContext() interface{} +} + +// Returns context of the caller +func currentContext(custom interface{}) (LogContextInterface, error) { + return specifyContext(1, custom) +} + +func extractCallerInfo(skip int) (*logContext, error) { + var stack [1]uintptr + if runtime.Callers(skip+1, stack[:]) != 1 { + return nil, errors.New("error during runtime.Callers") + } + pc := stack[0] + + // do we have a cache entry? + stackCacheLock.RLock() + ctx, ok := stackCache[pc] + stackCacheLock.RUnlock() + if ok { + return ctx, nil + } + + // look up the details of the given caller + funcInfo := runtime.FuncForPC(pc) + if funcInfo == nil { + return nil, errors.New("error during runtime.FuncForPC") + } + + var shortPath string + fullPath, line := funcInfo.FileLine(pc) + if strings.HasPrefix(fullPath, workingDir) { + shortPath = fullPath[len(workingDir):] + } else { + shortPath = fullPath + } + funcName := funcInfo.Name() + if strings.HasPrefix(funcName, workingDir) { + funcName = funcName[len(workingDir):] + } + + ctx = &logContext{ + funcName: funcName, + line: line, + shortPath: shortPath, + fullPath: fullPath, + fileName: filepath.Base(fullPath), + } + + // save the details in the cache; note that it's possible we might + // have written an entry into the map in between the test above and + // this section, but the behaviour is still correct + stackCacheLock.Lock() + stackCache[pc] = ctx + stackCacheLock.Unlock() + return ctx, nil +} + +// Returns context of the function with placed "skip" stack frames of the caller +// If skip == 0 then behaves like currentContext +// Context is returned in any situation, even if error occurs. But, if an error +// occurs, the returned context is an error context, which contains no paths +// or names, but states that they can't be extracted. +func specifyContext(skip int, custom interface{}) (LogContextInterface, error) { + callTime := time.Now() + if skip < 0 { + err := fmt.Errorf("can not skip negative stack frames") + return &errorContext{callTime, err}, err + } + caller, err := extractCallerInfo(skip + 2) + if err != nil { + return &errorContext{callTime, err}, err + } + ctx := new(logContext) + *ctx = *caller + ctx.callTime = callTime + ctx.custom = custom + return ctx, nil +} + +// Represents a normal runtime caller context. +type logContext struct { + funcName string + line int + shortPath string + fullPath string + fileName string + callTime time.Time + custom interface{} +} + +func (context *logContext) IsValid() bool { + return true +} + +func (context *logContext) Func() string { + return context.funcName +} + +func (context *logContext) Line() int { + return context.line +} + +func (context *logContext) ShortPath() string { + return context.shortPath +} + +func (context *logContext) FullPath() string { + return context.fullPath +} + +func (context *logContext) FileName() string { + return context.fileName +} + +func (context *logContext) CallTime() time.Time { + return context.callTime +} + +func (context *logContext) CustomContext() interface{} { + return context.custom +} + +// Represents an error context +type errorContext struct { + errorTime time.Time + err error +} + +func (errContext *errorContext) getErrorText(prefix string) string { + return fmt.Sprintf("%s() error: %s", prefix, errContext.err) +} + +func (errContext *errorContext) IsValid() bool { + return false +} + +func (errContext *errorContext) Line() int { + return -1 +} + +func (errContext *errorContext) Func() string { + return errContext.getErrorText("Func") +} + +func (errContext *errorContext) ShortPath() string { + return errContext.getErrorText("ShortPath") +} + +func (errContext *errorContext) FullPath() string { + return errContext.getErrorText("FullPath") +} + +func (errContext *errorContext) FileName() string { + return errContext.getErrorText("FileName") +} + +func (errContext *errorContext) CallTime() time.Time { + return errContext.errorTime +} + +func (errContext *errorContext) CustomContext() interface{} { + return nil +} diff --git a/vendor/github.com/cihub/seelog/common_exception.go b/vendor/github.com/cihub/seelog/common_exception.go new file mode 100644 index 00000000..9acc2750 --- /dev/null +++ b/vendor/github.com/cihub/seelog/common_exception.go @@ -0,0 +1,194 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// Used in rules creation to validate input file and func filters +var ( + fileFormatValidator = regexp.MustCompile(`[a-zA-Z0-9\\/ _\*\.]*`) + funcFormatValidator = regexp.MustCompile(`[a-zA-Z0-9_\*\.]*`) +) + +// LogLevelException represents an exceptional case used when you need some specific files or funcs to +// override general constraints and to use their own. +type LogLevelException struct { + funcPatternParts []string + filePatternParts []string + + funcPattern string + filePattern string + + constraints logLevelConstraints +} + +// NewLogLevelException creates a new exception. +func NewLogLevelException(funcPattern string, filePattern string, constraints logLevelConstraints) (*LogLevelException, error) { + if constraints == nil { + return nil, errors.New("constraints can not be nil") + } + + exception := new(LogLevelException) + + err := exception.initFuncPatternParts(funcPattern) + if err != nil { + return nil, err + } + exception.funcPattern = strings.Join(exception.funcPatternParts, "") + + err = exception.initFilePatternParts(filePattern) + if err != nil { + return nil, err + } + exception.filePattern = strings.Join(exception.filePatternParts, "") + + exception.constraints = constraints + + return exception, nil +} + +// MatchesContext returns true if context matches the patterns of this LogLevelException +func (logLevelEx *LogLevelException) MatchesContext(context LogContextInterface) bool { + return logLevelEx.match(context.Func(), context.FullPath()) +} + +// IsAllowed returns true if log level is allowed according to the constraints of this LogLevelException +func (logLevelEx *LogLevelException) IsAllowed(level LogLevel) bool { + return logLevelEx.constraints.IsAllowed(level) +} + +// FuncPattern returns the function pattern of a exception +func (logLevelEx *LogLevelException) FuncPattern() string { + return logLevelEx.funcPattern +} + +// FuncPattern returns the file pattern of a exception +func (logLevelEx *LogLevelException) FilePattern() string { + return logLevelEx.filePattern +} + +// initFuncPatternParts checks whether the func filter has a correct format and splits funcPattern on parts +func (logLevelEx *LogLevelException) initFuncPatternParts(funcPattern string) (err error) { + + if funcFormatValidator.FindString(funcPattern) != funcPattern { + return errors.New("func path \"" + funcPattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 _ * . allowed)") + } + + logLevelEx.funcPatternParts = splitPattern(funcPattern) + return nil +} + +// Checks whether the file filter has a correct format and splits file patterns using splitPattern. +func (logLevelEx *LogLevelException) initFilePatternParts(filePattern string) (err error) { + + if fileFormatValidator.FindString(filePattern) != filePattern { + return errors.New("file path \"" + filePattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 \\ / _ * . allowed)") + } + + logLevelEx.filePatternParts = splitPattern(filePattern) + return err +} + +func (logLevelEx *LogLevelException) match(funcPath string, filePath string) bool { + if !stringMatchesPattern(logLevelEx.funcPatternParts, funcPath) { + return false + } + return stringMatchesPattern(logLevelEx.filePatternParts, filePath) +} + +func (logLevelEx *LogLevelException) String() string { + str := fmt.Sprintf("Func: %s File: %s", logLevelEx.funcPattern, logLevelEx.filePattern) + + if logLevelEx.constraints != nil { + str += fmt.Sprintf("Constr: %s", logLevelEx.constraints) + } else { + str += "nil" + } + + return str +} + +// splitPattern splits pattern into strings and asterisks. Example: "ab*cde**f" -> ["ab", "*", "cde", "*", "f"] +func splitPattern(pattern string) []string { + var patternParts []string + var lastChar rune + for _, char := range pattern { + if char == '*' { + if lastChar != '*' { + patternParts = append(patternParts, "*") + } + } else { + if len(patternParts) != 0 && lastChar != '*' { + patternParts[len(patternParts)-1] += string(char) + } else { + patternParts = append(patternParts, string(char)) + } + } + lastChar = char + } + + return patternParts +} + +// stringMatchesPattern check whether testString matches pattern with asterisks. +// Standard regexp functionality is not used here because of performance issues. +func stringMatchesPattern(patternparts []string, testString string) bool { + if len(patternparts) == 0 { + return len(testString) == 0 + } + + part := patternparts[0] + if part != "*" { + index := strings.Index(testString, part) + if index == 0 { + return stringMatchesPattern(patternparts[1:], testString[len(part):]) + } + } else { + if len(patternparts) == 1 { + return true + } + + newTestString := testString + part = patternparts[1] + for { + index := strings.Index(newTestString, part) + if index == -1 { + break + } + + newTestString = newTestString[index+len(part):] + result := stringMatchesPattern(patternparts[2:], newTestString) + if result { + return true + } + } + } + return false +} diff --git a/vendor/github.com/cihub/seelog/common_flusher.go b/vendor/github.com/cihub/seelog/common_flusher.go new file mode 100644 index 00000000..0ef077c8 --- /dev/null +++ b/vendor/github.com/cihub/seelog/common_flusher.go @@ -0,0 +1,31 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +// flusherInterface represents all objects that have to do cleanup +// at certain moments of time (e.g. before app shutdown to avoid data loss) +type flusherInterface interface { + Flush() +} diff --git a/vendor/github.com/cihub/seelog/common_loglevel.go b/vendor/github.com/cihub/seelog/common_loglevel.go new file mode 100644 index 00000000..d54ecf27 --- /dev/null +++ b/vendor/github.com/cihub/seelog/common_loglevel.go @@ -0,0 +1,81 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +// Log level type +type LogLevel uint8 + +// Log levels +const ( + TraceLvl = iota + DebugLvl + InfoLvl + WarnLvl + ErrorLvl + CriticalLvl + Off +) + +// Log level string representations (used in configuration files) +const ( + TraceStr = "trace" + DebugStr = "debug" + InfoStr = "info" + WarnStr = "warn" + ErrorStr = "error" + CriticalStr = "critical" + OffStr = "off" +) + +var levelToStringRepresentations = map[LogLevel]string{ + TraceLvl: TraceStr, + DebugLvl: DebugStr, + InfoLvl: InfoStr, + WarnLvl: WarnStr, + ErrorLvl: ErrorStr, + CriticalLvl: CriticalStr, + Off: OffStr, +} + +// LogLevelFromString parses a string and returns a corresponding log level, if sucessfull. +func LogLevelFromString(levelStr string) (level LogLevel, found bool) { + for lvl, lvlStr := range levelToStringRepresentations { + if lvlStr == levelStr { + return lvl, true + } + } + + return 0, false +} + +// LogLevelToString returns seelog string representation for a specified level. Returns "" for invalid log levels. +func (level LogLevel) String() string { + levelStr, ok := levelToStringRepresentations[level] + if ok { + return levelStr + } + + return "" +} diff --git a/vendor/github.com/cihub/seelog/dispatch_custom.go b/vendor/github.com/cihub/seelog/dispatch_custom.go new file mode 100644 index 00000000..383a7705 --- /dev/null +++ b/vendor/github.com/cihub/seelog/dispatch_custom.go @@ -0,0 +1,242 @@ +// Copyright (c) 2013 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +var registeredReceivers = make(map[string]reflect.Type) + +// RegisterReceiver records a custom receiver type, identified by a value +// of that type (second argument), under the specified name. Registered +// names can be used in the "name" attribute of config items. +// +// RegisterReceiver takes the type of the receiver argument, without taking +// the value into the account. So do NOT enter any data to the second argument +// and only call it like: +// RegisterReceiver("somename", &MyReceiverType{}) +// +// After that, when a '' config tag with this name is used, +// a receiver of the specified type would be instantiated. Check +// CustomReceiver comments for interface details. +// +// NOTE 1: RegisterReceiver fails if you attempt to register different types +// with the same name. +// +// NOTE 2: RegisterReceiver registers those receivers that must be used in +// the configuration files ( items). Basically it is just the way +// you tell seelog config parser what should it do when it meets a +// tag with a specific name and data attributes. +// +// But If you are only using seelog as a proxy to an already instantiated +// CustomReceiver (via LoggerFromCustomReceiver func), you should not call RegisterReceiver. +func RegisterReceiver(name string, receiver CustomReceiver) { + newType := reflect.TypeOf(reflect.ValueOf(receiver).Elem().Interface()) + if t, ok := registeredReceivers[name]; ok && t != newType { + panic(fmt.Sprintf("duplicate types for %s: %s != %s", name, t, newType)) + } + registeredReceivers[name] = newType +} + +func customReceiverByName(name string) (creceiver CustomReceiver, err error) { + rt, ok := registeredReceivers[name] + if !ok { + return nil, fmt.Errorf("custom receiver name not registered: '%s'", name) + } + v, ok := reflect.New(rt).Interface().(CustomReceiver) + if !ok { + return nil, fmt.Errorf("cannot instantiate receiver with name='%s'", name) + } + return v, nil +} + +// CustomReceiverInitArgs represent arguments passed to the CustomReceiver.Init +// func when custom receiver is being initialized. +type CustomReceiverInitArgs struct { + // XmlCustomAttrs represent '' xml config item attributes that + // start with "data-". Map keys will be the attribute names without the "data-". + // Map values will the those attribute values. + // + // E.g. if you have a '' + // you will get map with 2 key-value pairs: "attr1"->"a1", "attr2"->"a2" + // + // Note that in custom items you can only use allowed attributes, like "name" and + // your custom attributes, starting with "data-". Any other will lead to a + // parsing error. + XmlCustomAttrs map[string]string +} + +// CustomReceiver is the interface that external custom seelog message receivers +// must implement in order to be able to process seelog messages. Those receivers +// are set in the xml config file using the tag. Check receivers reference +// wiki section on that. +// +// Use seelog.RegisterReceiver on the receiver type before using it. +type CustomReceiver interface { + // ReceiveMessage is called when the custom receiver gets seelog message from + // a parent dispatcher. + // + // Message, level and context args represent all data that was included in the seelog + // message at the time it was logged. + // + // The formatting is already applied to the message and depends on the config + // like with any other receiver. + // + // If you would like to inform seelog of an error that happened during the handling of + // the message, return a non-nil error. This way you'll end up seeing your error like + // any other internal seelog error. + ReceiveMessage(message string, level LogLevel, context LogContextInterface) error + + // AfterParse is called immediately after your custom receiver is instantiated by + // the xml config parser. So, if you need to do any startup logic after config parsing, + // like opening file or allocating any resources after the receiver is instantiated, do it here. + // + // If this func returns a non-nil error, then the loading procedure will fail. E.g. + // if you are loading a seelog xml config, the parser would not finish the loading + // procedure and inform about an error like with any other config error. + // + // If your custom logger needs some configuration, you can use custom attributes in + // your config. Check CustomReceiverInitArgs.XmlCustomAttrs comments. + // + // IMPORTANT: This func is NOT called when the LoggerFromCustomReceiver func is used + // to create seelog proxy logger using the custom receiver. This func is only called when + // receiver is instantiated from a config. + AfterParse(initArgs CustomReceiverInitArgs) error + + // Flush is called when the custom receiver gets a 'flush' directive from a + // parent receiver. If custom receiver implements some kind of buffering or + // queing, then the appropriate reaction on a flush message is synchronous + // flushing of all those queues/buffers. If custom receiver doesn't have + // such mechanisms, then flush implementation may be left empty. + Flush() + + // Close is called when the custom receiver gets a 'close' directive from a + // parent receiver. This happens when a top-level seelog dispatcher is sending + // 'close' to all child nodes and it means that current seelog logger is being closed. + // If you need to do any cleanup after your custom receiver is done, you should do + // it here. + Close() error +} + +type customReceiverDispatcher struct { + formatter *formatter + innerReceiver CustomReceiver + customReceiverName string + usedArgs CustomReceiverInitArgs +} + +// NewCustomReceiverDispatcher creates a customReceiverDispatcher which dispatches data to a specific receiver created +// using a tag in the config file. +func NewCustomReceiverDispatcher(formatter *formatter, customReceiverName string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { + if formatter == nil { + return nil, errors.New("formatter cannot be nil") + } + if len(customReceiverName) == 0 { + return nil, errors.New("custom receiver name cannot be empty") + } + + creceiver, err := customReceiverByName(customReceiverName) + if err != nil { + return nil, err + } + err = creceiver.AfterParse(cArgs) + if err != nil { + return nil, err + } + disp := &customReceiverDispatcher{formatter, creceiver, customReceiverName, cArgs} + + return disp, nil +} + +// NewCustomReceiverDispatcherByValue is basically the same as NewCustomReceiverDispatcher, but using +// a specific CustomReceiver value instead of instantiating a new one by type. +func NewCustomReceiverDispatcherByValue(formatter *formatter, customReceiver CustomReceiver, name string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { + if formatter == nil { + return nil, errors.New("formatter cannot be nil") + } + if customReceiver == nil { + return nil, errors.New("customReceiver cannot be nil") + } + disp := &customReceiverDispatcher{formatter, customReceiver, name, cArgs} + + return disp, nil +} + +// CustomReceiver implementation. Check CustomReceiver comments. +func (disp *customReceiverDispatcher) Dispatch( + message string, + level LogLevel, + context LogContextInterface, + errorFunc func(err error)) { + + defer func() { + if err := recover(); err != nil { + errorFunc(fmt.Errorf("panic in custom receiver '%s'.Dispatch: %s", reflect.TypeOf(disp.innerReceiver), err)) + } + }() + + err := disp.innerReceiver.ReceiveMessage(disp.formatter.Format(message, level, context), level, context) + if err != nil { + errorFunc(err) + } +} + +// CustomReceiver implementation. Check CustomReceiver comments. +func (disp *customReceiverDispatcher) Flush() { + disp.innerReceiver.Flush() +} + +// CustomReceiver implementation. Check CustomReceiver comments. +func (disp *customReceiverDispatcher) Close() error { + disp.innerReceiver.Flush() + + err := disp.innerReceiver.Close() + if err != nil { + return err + } + + return nil +} + +func (disp *customReceiverDispatcher) String() string { + datas := "" + skeys := make([]string, 0, len(disp.usedArgs.XmlCustomAttrs)) + for i := range disp.usedArgs.XmlCustomAttrs { + skeys = append(skeys, i) + } + sort.Strings(skeys) + for _, key := range skeys { + datas += fmt.Sprintf("<%s, %s> ", key, disp.usedArgs.XmlCustomAttrs[key]) + } + + str := fmt.Sprintf("Custom receiver %s [fmt='%s'],[data='%s'],[inner='%s']\n", + disp.customReceiverName, disp.formatter.String(), datas, disp.innerReceiver) + + return str +} diff --git a/vendor/github.com/cihub/seelog/dispatch_dispatcher.go b/vendor/github.com/cihub/seelog/dispatch_dispatcher.go new file mode 100644 index 00000000..2bd3b4a4 --- /dev/null +++ b/vendor/github.com/cihub/seelog/dispatch_dispatcher.go @@ -0,0 +1,189 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "io" +) + +// A dispatcherInterface is used to dispatch message to all underlying receivers. +// Dispatch logic depends on given context and log level. Any errors are reported using errorFunc. +// Also, as underlying receivers may have a state, dispatcher has a ShuttingDown method which performs +// an immediate cleanup of all data that is stored in the receivers +type dispatcherInterface interface { + flusherInterface + io.Closer + Dispatch(message string, level LogLevel, context LogContextInterface, errorFunc func(err error)) +} + +type dispatcher struct { + formatter *formatter + writers []*formattedWriter + dispatchers []dispatcherInterface +} + +// Creates a dispatcher which dispatches data to a list of receivers. +// Each receiver should be either a Dispatcher or io.Writer, otherwise an error will be returned +func createDispatcher(formatter *formatter, receivers []interface{}) (*dispatcher, error) { + if formatter == nil { + return nil, errors.New("formatter cannot be nil") + } + if receivers == nil || len(receivers) == 0 { + return nil, errors.New("receivers cannot be nil or empty") + } + + disp := &dispatcher{formatter, make([]*formattedWriter, 0), make([]dispatcherInterface, 0)} + for _, receiver := range receivers { + writer, ok := receiver.(*formattedWriter) + if ok { + disp.writers = append(disp.writers, writer) + continue + } + + ioWriter, ok := receiver.(io.Writer) + if ok { + writer, err := NewFormattedWriter(ioWriter, disp.formatter) + if err != nil { + return nil, err + } + disp.writers = append(disp.writers, writer) + continue + } + + dispInterface, ok := receiver.(dispatcherInterface) + if ok { + disp.dispatchers = append(disp.dispatchers, dispInterface) + continue + } + + return nil, errors.New("method can receive either io.Writer or dispatcherInterface") + } + + return disp, nil +} + +func (disp *dispatcher) Dispatch( + message string, + level LogLevel, + context LogContextInterface, + errorFunc func(err error)) { + + for _, writer := range disp.writers { + err := writer.Write(message, level, context) + if err != nil { + errorFunc(err) + } + } + + for _, dispInterface := range disp.dispatchers { + dispInterface.Dispatch(message, level, context, errorFunc) + } +} + +// Flush goes through all underlying writers which implement flusherInterface interface +// and closes them. Recursively performs the same action for underlying dispatchers +func (disp *dispatcher) Flush() { + for _, disp := range disp.Dispatchers() { + disp.Flush() + } + + for _, formatWriter := range disp.Writers() { + flusher, ok := formatWriter.Writer().(flusherInterface) + if ok { + flusher.Flush() + } + } +} + +// Close goes through all underlying writers which implement io.Closer interface +// and closes them. Recursively performs the same action for underlying dispatchers +// Before closing, writers are flushed to prevent loss of any buffered data, so +// a call to Flush() func before Close() is not necessary +func (disp *dispatcher) Close() error { + for _, disp := range disp.Dispatchers() { + disp.Flush() + err := disp.Close() + if err != nil { + return err + } + } + + for _, formatWriter := range disp.Writers() { + flusher, ok := formatWriter.Writer().(flusherInterface) + if ok { + flusher.Flush() + } + + closer, ok := formatWriter.Writer().(io.Closer) + if ok { + err := closer.Close() + if err != nil { + return err + } + } + } + + return nil +} + +func (disp *dispatcher) Writers() []*formattedWriter { + return disp.writers +} + +func (disp *dispatcher) Dispatchers() []dispatcherInterface { + return disp.dispatchers +} + +func (disp *dispatcher) String() string { + str := "formatter: " + disp.formatter.String() + "\n" + + str += " ->Dispatchers:" + + if len(disp.dispatchers) == 0 { + str += "none\n" + } else { + str += "\n" + + for _, disp := range disp.dispatchers { + str += fmt.Sprintf(" ->%s", disp) + } + } + + str += " ->Writers:" + + if len(disp.writers) == 0 { + str += "none\n" + } else { + str += "\n" + + for _, writer := range disp.writers { + str += fmt.Sprintf(" ->%s\n", writer) + } + } + + return str +} diff --git a/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go b/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go new file mode 100644 index 00000000..9de8a722 --- /dev/null +++ b/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go @@ -0,0 +1,66 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" +) + +// A filterDispatcher writes the given message to underlying receivers only if message log level +// is in the allowed list. +type filterDispatcher struct { + *dispatcher + allowList map[LogLevel]bool +} + +// NewFilterDispatcher creates a new filterDispatcher using a list of allowed levels. +func NewFilterDispatcher(formatter *formatter, receivers []interface{}, allowList ...LogLevel) (*filterDispatcher, error) { + disp, err := createDispatcher(formatter, receivers) + if err != nil { + return nil, err + } + + allows := make(map[LogLevel]bool) + for _, allowLevel := range allowList { + allows[allowLevel] = true + } + + return &filterDispatcher{disp, allows}, nil +} + +func (filter *filterDispatcher) Dispatch( + message string, + level LogLevel, + context LogContextInterface, + errorFunc func(err error)) { + isAllowed, ok := filter.allowList[level] + if ok && isAllowed { + filter.dispatcher.Dispatch(message, level, context, errorFunc) + } +} + +func (filter *filterDispatcher) String() string { + return fmt.Sprintf("filterDispatcher ->\n%s", filter.dispatcher) +} diff --git a/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go b/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go new file mode 100644 index 00000000..1d0fe7ea --- /dev/null +++ b/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go @@ -0,0 +1,47 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" +) + +// A splitDispatcher just writes the given message to underlying receivers. (Splits the message stream.) +type splitDispatcher struct { + *dispatcher +} + +func NewSplitDispatcher(formatter *formatter, receivers []interface{}) (*splitDispatcher, error) { + disp, err := createDispatcher(formatter, receivers) + if err != nil { + return nil, err + } + + return &splitDispatcher{disp}, nil +} + +func (splitter *splitDispatcher) String() string { + return fmt.Sprintf("splitDispatcher ->\n%s", splitter.dispatcher.String()) +} diff --git a/vendor/github.com/cihub/seelog/doc.go b/vendor/github.com/cihub/seelog/doc.go new file mode 100644 index 00000000..2734c9cb --- /dev/null +++ b/vendor/github.com/cihub/seelog/doc.go @@ -0,0 +1,175 @@ +// Copyright (c) 2014 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package seelog implements logging functionality with flexible dispatching, filtering, and formatting. + +Creation + +To create a logger, use one of the following constructors: + func LoggerFromConfigAsBytes + func LoggerFromConfigAsFile + func LoggerFromConfigAsString + func LoggerFromWriterWithMinLevel + func LoggerFromWriterWithMinLevelAndFormat + func LoggerFromCustomReceiver (check https://github.com/cihub/seelog/wiki/Custom-receivers) +Example: + import log "github.com/cihub/seelog" + + func main() { + logger, err := log.LoggerFromConfigAsFile("seelog.xml") + if err != nil { + panic(err) + } + defer logger.Flush() + ... use logger ... + } +The "defer" line is important because if you are using asynchronous logger behavior, without this line you may end up losing some +messages when you close your application because they are processed in another non-blocking goroutine. To avoid that you +explicitly defer flushing all messages before closing. + +Usage + +Logger created using one of the LoggerFrom* funcs can be used directly by calling one of the main log funcs. +Example: + import log "github.com/cihub/seelog" + + func main() { + logger, err := log.LoggerFromConfigAsFile("seelog.xml") + if err != nil { + panic(err) + } + defer logger.Flush() + logger.Trace("test") + logger.Debugf("var = %s", "abc") + } + +Having loggers as variables is convenient if you are writing your own package with internal logging or if you have +several loggers with different options. +But for most standalone apps it is more convenient to use package level funcs and vars. There is a package level +var 'Current' made for it. You can replace it with another logger using 'ReplaceLogger' and then use package level funcs: + import log "github.com/cihub/seelog" + + func main() { + logger, err := log.LoggerFromConfigAsFile("seelog.xml") + if err != nil { + panic(err) + } + log.ReplaceLogger(logger) + defer log.Flush() + log.Trace("test") + log.Debugf("var = %s", "abc") + } +Last lines + log.Trace("test") + log.Debugf("var = %s", "abc") +do the same as + log.Current.Trace("test") + log.Current.Debugf("var = %s", "abc") +In this example the 'Current' logger was replaced using a 'ReplaceLogger' call and became equal to 'logger' variable created from config. +This way you are able to use package level funcs instead of passing the logger variable. + +Configuration + +Main seelog point is to configure logger via config files and not the code. +The configuration is read by LoggerFrom* funcs. These funcs read xml configuration from different sources and try +to create a logger using it. + +All the configuration features are covered in detail in the official wiki: https://github.com/cihub/seelog/wiki. +There are many sections covering different aspects of seelog, but the most important for understanding configs are: + https://github.com/cihub/seelog/wiki/Constraints-and-exceptions + https://github.com/cihub/seelog/wiki/Dispatchers-and-receivers + https://github.com/cihub/seelog/wiki/Formatting + https://github.com/cihub/seelog/wiki/Logger-types +After you understand these concepts, check the 'Reference' section on the main wiki page to get the up-to-date +list of dispatchers, receivers, formats, and logger types. + +Here is an example config with all these features: + + + + + + + + + + + + + + + + + + + + + +This config represents a logger with adaptive timeout between log messages (check logger types reference) which +logs to console, all.log, and errors.log depending on the log level. Its output formats also depend on log level. This logger will only +use log level 'debug' and higher (minlevel is set) for all files with names that don't start with 'test'. For files starting with 'test' +this logger prohibits all levels below 'error'. + +Configuration using code + +Although configuration using code is not recommended, it is sometimes needed and it is possible to do with seelog. Basically, what +you need to do to get started is to create constraints, exceptions and a dispatcher tree (same as with config). Most of the New* +functions in this package are used to provide such capabilities. + +Here is an example of configuration in code, that demonstrates an async loop logger that logs to a simple split dispatcher with +a console receiver using a specified format and is filtered using a top-level min-max constraints and one expection for +the 'main.go' file. So, this is basically a demonstration of configuration of most of the features: + + package main + + import log "github.com/cihub/seelog" + + func main() { + defer log.Flush() + log.Info("Hello from Seelog!") + + consoleWriter, _ := log.NewConsoleWriter() + formatter, _ := log.NewFormatter("%Level %Msg %File%n") + root, _ := log.NewSplitDispatcher(formatter, []interface{}{consoleWriter}) + constraints, _ := log.NewMinMaxConstraints(log.TraceLvl, log.CriticalLvl) + specificConstraints, _ := log.NewListConstraints([]log.LogLevel{log.InfoLvl, log.ErrorLvl}) + ex, _ := log.NewLogLevelException("*", "*main.go", specificConstraints) + exceptions := []*log.LogLevelException{ex} + + logger := log.NewAsyncLoopLogger(log.NewLoggerConfig(constraints, exceptions, root)) + log.ReplaceLogger(logger) + + log.Trace("This should not be seen") + log.Debug("This should not be seen") + log.Info("Test") + log.Error("Test2") + } + +Examples + +To learn seelog features faster you should check the examples package: https://github.com/cihub/seelog-examples +It contains many example configs and usecases. +*/ +package seelog diff --git a/vendor/github.com/cihub/seelog/format.go b/vendor/github.com/cihub/seelog/format.go new file mode 100644 index 00000000..ec47b457 --- /dev/null +++ b/vendor/github.com/cihub/seelog/format.go @@ -0,0 +1,466 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// FormatterSymbol is a special symbol used in config files to mark special format aliases. +const ( + FormatterSymbol = '%' +) + +const ( + formatterParameterStart = '(' + formatterParameterEnd = ')' +) + +// Time and date formats used for %Date and %Time aliases. +const ( + DateDefaultFormat = "2006-01-02" + TimeFormat = "15:04:05" +) + +var DefaultMsgFormat = "%Ns [%Level] %Msg%n" + +var ( + DefaultFormatter *formatter + msgonlyformatter *formatter +) + +func init() { + var err error + if DefaultFormatter, err = NewFormatter(DefaultMsgFormat); err != nil { + reportInternalError(fmt.Errorf("error during creating DefaultFormatter: %s", err)) + } + if msgonlyformatter, err = NewFormatter("%Msg"); err != nil { + reportInternalError(fmt.Errorf("error during creating msgonlyformatter: %s", err)) + } +} + +// FormatterFunc represents one formatter object that starts with '%' sign in the 'format' attribute +// of the 'format' config item. These special symbols are replaced with context values or special +// strings when message is written to byte receiver. +// +// Check https://github.com/cihub/seelog/wiki/Formatting for details. +// Full list (with descriptions) of formatters: https://github.com/cihub/seelog/wiki/Format-reference +// +// FormatterFunc takes raw log message, level, log context and returns a string, number (of any type) or any object +// that can be evaluated as string. +type FormatterFunc func(message string, level LogLevel, context LogContextInterface) interface{} + +// FormatterFuncCreator is a factory of FormatterFunc objects. It is used to generate parameterized +// formatters (such as %Date or %EscM) and custom user formatters. +type FormatterFuncCreator func(param string) FormatterFunc + +var formatterFuncs = map[string]FormatterFunc{ + "Level": formatterLevel, + "Lev": formatterLev, + "LEVEL": formatterLEVEL, + "LEV": formatterLEV, + "l": formatterl, + "Msg": formatterMsg, + "FullPath": formatterFullPath, + "File": formatterFile, + "RelFile": formatterRelFile, + "Func": FormatterFunction, + "FuncShort": FormatterFunctionShort, + "Line": formatterLine, + "Time": formatterTime, + "UTCTime": formatterUTCTime, + "Ns": formatterNs, + "UTCNs": formatterUTCNs, + "r": formatterr, + "n": formattern, + "t": formattert, +} + +var formatterFuncsParameterized = map[string]FormatterFuncCreator{ + "Date": createDateTimeFormatterFunc, + "UTCDate": createUTCDateTimeFormatterFunc, + "EscM": createANSIEscapeFunc, +} + +func errorAliasReserved(name string) error { + return fmt.Errorf("cannot use '%s' as custom formatter name. Name is reserved", name) +} + +// RegisterCustomFormatter registers a new custom formatter factory with a given name. If returned error is nil, +// then this name (prepended by '%' symbol) can be used in 'format' attributes in configuration and +// it will be treated like the standard parameterized formatter identifiers. +// +// RegisterCustomFormatter needs to be called before creating a logger for it to take effect. The general recommendation +// is to call it once in 'init' func of your application or any initializer func. +// +// For usage examples, check https://github.com/cihub/seelog/wiki/Custom-formatters. +// +// Name must only consist of letters (unicode.IsLetter). +// +// Name must not be one of the already registered standard formatter names +// (https://github.com/cihub/seelog/wiki/Format-reference) and previously registered +// custom format names. To avoid any potential name conflicts (in future releases), it is recommended +// to start your custom formatter name with a namespace (e.g. 'MyCompanySomething') or a 'Custom' keyword. +func RegisterCustomFormatter(name string, creator FormatterFuncCreator) error { + if _, ok := formatterFuncs[name]; ok { + return errorAliasReserved(name) + } + if _, ok := formatterFuncsParameterized[name]; ok { + return errorAliasReserved(name) + } + formatterFuncsParameterized[name] = creator + return nil +} + +// formatter is used to write messages in a specific format, inserting such additional data +// as log level, date/time, etc. +type formatter struct { + fmtStringOriginal string + fmtString string + formatterFuncs []FormatterFunc +} + +// NewFormatter creates a new formatter using a format string +func NewFormatter(formatString string) (*formatter, error) { + fmtr := new(formatter) + fmtr.fmtStringOriginal = formatString + if err := buildFormatterFuncs(fmtr); err != nil { + return nil, err + } + return fmtr, nil +} + +func buildFormatterFuncs(formatter *formatter) error { + var ( + fsbuf = new(bytes.Buffer) + fsolm1 = len(formatter.fmtStringOriginal) - 1 + ) + for i := 0; i <= fsolm1; i++ { + if char := formatter.fmtStringOriginal[i]; char != FormatterSymbol { + fsbuf.WriteByte(char) + continue + } + // Check if the index is at the end of the string. + if i == fsolm1 { + return fmt.Errorf("format error: %c cannot be last symbol", FormatterSymbol) + } + // Check if the formatter symbol is doubled and skip it as nonmatching. + if formatter.fmtStringOriginal[i+1] == FormatterSymbol { + fsbuf.WriteRune(FormatterSymbol) + i++ + continue + } + function, ni, err := formatter.extractFormatterFunc(i + 1) + if err != nil { + return err + } + // Append formatting string "%v". + fsbuf.Write([]byte{37, 118}) + i = ni + formatter.formatterFuncs = append(formatter.formatterFuncs, function) + } + formatter.fmtString = fsbuf.String() + return nil +} + +func (formatter *formatter) extractFormatterFunc(index int) (FormatterFunc, int, error) { + letterSequence := formatter.extractLetterSequence(index) + if len(letterSequence) == 0 { + return nil, 0, fmt.Errorf("format error: lack of formatter after %c at %d", FormatterSymbol, index) + } + + function, formatterLength, ok := formatter.findFormatterFunc(letterSequence) + if ok { + return function, index + formatterLength - 1, nil + } + + function, formatterLength, ok, err := formatter.findFormatterFuncParametrized(letterSequence, index) + if err != nil { + return nil, 0, err + } + if ok { + return function, index + formatterLength - 1, nil + } + + return nil, 0, errors.New("format error: unrecognized formatter at " + strconv.Itoa(index) + ": " + letterSequence) +} + +func (formatter *formatter) extractLetterSequence(index int) string { + letters := "" + + bytesToParse := []byte(formatter.fmtStringOriginal[index:]) + runeCount := utf8.RuneCount(bytesToParse) + for i := 0; i < runeCount; i++ { + rune, runeSize := utf8.DecodeRune(bytesToParse) + bytesToParse = bytesToParse[runeSize:] + + if unicode.IsLetter(rune) { + letters += string(rune) + } else { + break + } + } + return letters +} + +func (formatter *formatter) findFormatterFunc(letters string) (FormatterFunc, int, bool) { + currentVerb := letters + for i := 0; i < len(letters); i++ { + function, ok := formatterFuncs[currentVerb] + if ok { + return function, len(currentVerb), ok + } + currentVerb = currentVerb[:len(currentVerb)-1] + } + + return nil, 0, false +} + +func (formatter *formatter) findFormatterFuncParametrized(letters string, lettersStartIndex int) (FormatterFunc, int, bool, error) { + currentVerb := letters + for i := 0; i < len(letters); i++ { + functionCreator, ok := formatterFuncsParameterized[currentVerb] + if ok { + parameter := "" + parameterLen := 0 + isVerbEqualsLetters := i == 0 // if not, then letter goes after formatter, and formatter is parameterless + if isVerbEqualsLetters { + userParameter := "" + var err error + userParameter, parameterLen, ok, err = formatter.findparameter(lettersStartIndex + len(currentVerb)) + if ok { + parameter = userParameter + } else if err != nil { + return nil, 0, false, err + } + } + + return functionCreator(parameter), len(currentVerb) + parameterLen, true, nil + } + + currentVerb = currentVerb[:len(currentVerb)-1] + } + + return nil, 0, false, nil +} + +func (formatter *formatter) findparameter(startIndex int) (string, int, bool, error) { + if len(formatter.fmtStringOriginal) == startIndex || formatter.fmtStringOriginal[startIndex] != formatterParameterStart { + return "", 0, false, nil + } + + endIndex := strings.Index(formatter.fmtStringOriginal[startIndex:], string(formatterParameterEnd)) + if endIndex == -1 { + return "", 0, false, fmt.Errorf("Unmatched parenthesis or invalid parameter at %d: %s", + startIndex, formatter.fmtStringOriginal[startIndex:]) + } + endIndex += startIndex + + length := endIndex - startIndex + 1 + + return formatter.fmtStringOriginal[startIndex+1 : endIndex], length, true, nil +} + +// Format processes a message with special formatters, log level, and context. Returns formatted string +// with all formatter identifiers changed to appropriate values. +func (formatter *formatter) Format(message string, level LogLevel, context LogContextInterface) string { + if len(formatter.formatterFuncs) == 0 { + return formatter.fmtString + } + + params := make([]interface{}, len(formatter.formatterFuncs)) + for i, function := range formatter.formatterFuncs { + params[i] = function(message, level, context) + } + + return fmt.Sprintf(formatter.fmtString, params...) +} + +func (formatter *formatter) String() string { + return formatter.fmtStringOriginal +} + +//===================================================== + +const ( + wrongLogLevel = "WRONG_LOGLEVEL" + wrongEscapeCode = "WRONG_ESCAPE" +) + +var levelToString = map[LogLevel]string{ + TraceLvl: "Trace", + DebugLvl: "Debug", + InfoLvl: "Info", + WarnLvl: "Warn", + ErrorLvl: "Error", + CriticalLvl: "Critical", + Off: "Off", +} + +var levelToShortString = map[LogLevel]string{ + TraceLvl: "Trc", + DebugLvl: "Dbg", + InfoLvl: "Inf", + WarnLvl: "Wrn", + ErrorLvl: "Err", + CriticalLvl: "Crt", + Off: "Off", +} + +var levelToShortestString = map[LogLevel]string{ + TraceLvl: "t", + DebugLvl: "d", + InfoLvl: "i", + WarnLvl: "w", + ErrorLvl: "e", + CriticalLvl: "c", + Off: "o", +} + +func formatterLevel(message string, level LogLevel, context LogContextInterface) interface{} { + levelStr, ok := levelToString[level] + if !ok { + return wrongLogLevel + } + return levelStr +} + +func formatterLev(message string, level LogLevel, context LogContextInterface) interface{} { + levelStr, ok := levelToShortString[level] + if !ok { + return wrongLogLevel + } + return levelStr +} + +func formatterLEVEL(message string, level LogLevel, context LogContextInterface) interface{} { + return strings.ToTitle(formatterLevel(message, level, context).(string)) +} + +func formatterLEV(message string, level LogLevel, context LogContextInterface) interface{} { + return strings.ToTitle(formatterLev(message, level, context).(string)) +} + +func formatterl(message string, level LogLevel, context LogContextInterface) interface{} { + levelStr, ok := levelToShortestString[level] + if !ok { + return wrongLogLevel + } + return levelStr +} + +func formatterMsg(message string, level LogLevel, context LogContextInterface) interface{} { + return message +} + +func formatterFullPath(message string, level LogLevel, context LogContextInterface) interface{} { + return context.FullPath() +} + +func formatterFile(message string, level LogLevel, context LogContextInterface) interface{} { + return context.FileName() +} + +func formatterRelFile(message string, level LogLevel, context LogContextInterface) interface{} { + return context.ShortPath() +} + +func FormatterFunction(message string, level LogLevel, context LogContextInterface) interface{} { + return context.Func() +} + +func FormatterFunctionShort(message string, level LogLevel, context LogContextInterface) interface{} { + f := context.Func() + spl := strings.Split(f, ".") + return spl[len(spl)-1] +} + +func formatterLine(message string, level LogLevel, context LogContextInterface) interface{} { + return context.Line() +} + +func formatterTime(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().Format(TimeFormat) +} + +func formatterUTCTime(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().UTC().Format(TimeFormat) +} + +func formatterNs(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().UnixNano() +} + +func formatterUTCNs(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().UTC().UnixNano() +} + +func formatterr(message string, level LogLevel, context LogContextInterface) interface{} { + return "\r" +} + +func formattern(message string, level LogLevel, context LogContextInterface) interface{} { + return "\n" +} + +func formattert(message string, level LogLevel, context LogContextInterface) interface{} { + return "\t" +} + +func createDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { + format := dateTimeFormat + if format == "" { + format = DateDefaultFormat + } + return func(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().Format(format) + } +} + +func createUTCDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { + format := dateTimeFormat + if format == "" { + format = DateDefaultFormat + } + return func(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().UTC().Format(format) + } +} + +func createANSIEscapeFunc(escapeCodeString string) FormatterFunc { + return func(message string, level LogLevel, context LogContextInterface) interface{} { + if len(escapeCodeString) == 0 { + return wrongEscapeCode + } + + return fmt.Sprintf("%c[%sm", 0x1B, escapeCodeString) + } +} diff --git a/vendor/github.com/cihub/seelog/internals_baseerror.go b/vendor/github.com/cihub/seelog/internals_baseerror.go new file mode 100644 index 00000000..c0b271d7 --- /dev/null +++ b/vendor/github.com/cihub/seelog/internals_baseerror.go @@ -0,0 +1,10 @@ +package seelog + +// Base struct for custom errors. +type baseError struct { + message string +} + +func (be baseError) Error() string { + return be.message +} diff --git a/vendor/github.com/cihub/seelog/internals_fsutils.go b/vendor/github.com/cihub/seelog/internals_fsutils.go new file mode 100644 index 00000000..c0a0e0e4 --- /dev/null +++ b/vendor/github.com/cihub/seelog/internals_fsutils.go @@ -0,0 +1,320 @@ +package seelog + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +// File and directory permitions. +const ( + defaultFilePermissions = 0666 + defaultDirectoryPermissions = 0767 +) + +const ( + // Max number of directories can be read asynchronously. + maxDirNumberReadAsync = 1000 +) + +type cannotOpenFileError struct { + baseError +} + +func newCannotOpenFileError(fname string) *cannotOpenFileError { + return &cannotOpenFileError{baseError{message: "Cannot open file: " + fname}} +} + +type notDirectoryError struct { + baseError +} + +func newNotDirectoryError(dname string) *notDirectoryError { + return ¬DirectoryError{baseError{message: dname + " is not directory"}} +} + +// fileFilter is a filtering criteria function for '*os.File'. +// Must return 'false' to set aside the given file. +type fileFilter func(os.FileInfo, *os.File) bool + +// filePathFilter is a filtering creteria function for file path. +// Must return 'false' to set aside the given file. +type filePathFilter func(filePath string) bool + +// GetSubdirNames returns a list of directories found in +// the given one with dirPath. +func getSubdirNames(dirPath string) ([]string, error) { + fi, err := os.Stat(dirPath) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, newNotDirectoryError(dirPath) + } + dd, err := os.Open(dirPath) + // Cannot open file. + if err != nil { + if dd != nil { + dd.Close() + } + return nil, err + } + defer dd.Close() + // TODO: Improve performance by buffering reading. + allEntities, err := dd.Readdir(-1) + if err != nil { + return nil, err + } + subDirs := []string{} + for _, entity := range allEntities { + if entity.IsDir() { + subDirs = append(subDirs, entity.Name()) + } + } + return subDirs, nil +} + +// getSubdirAbsPaths recursively visit all the subdirectories +// starting from the given directory and returns absolute paths for them. +func getAllSubdirAbsPaths(dirPath string) (res []string, err error) { + dps, err := getSubdirAbsPaths(dirPath) + if err != nil { + res = []string{} + return + } + res = append(res, dps...) + for _, dp := range dps { + sdps, err := getAllSubdirAbsPaths(dp) + if err != nil { + return []string{}, err + } + res = append(res, sdps...) + } + return +} + +// getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory. +// Input: (I1) dirPath - absolute path of a directory in question. +// Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation. +// Remark: If error (O2) is non-nil then (O1) is nil and vice versa. +func getSubdirAbsPaths(dirPath string) ([]string, error) { + sdns, err := getSubdirNames(dirPath) + if err != nil { + return nil, err + } + rsdns := []string{} + for _, sdn := range sdns { + rsdns = append(rsdns, filepath.Join(dirPath, sdn)) + } + return rsdns, nil +} + +// getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory. +// Remark: Ignores files for which fileFilter returns false +func getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) { + dfi, err := os.Open(dirPath) + if err != nil { + return nil, newCannotOpenFileError("Cannot open directory " + dirPath) + } + defer dfi.Close() + // Size of read buffer (i.e. chunk of items read at a time). + rbs := 64 + resFiles := []*os.File{} +L: + for { + // Read directory entities by reasonable chuncks + // to prevent overflows on big number of files. + fis, e := dfi.Readdir(rbs) + switch e { + // It's OK. + case nil: + // Do nothing, just continue cycle. + case io.EOF: + break L + // Something went wrong. + default: + return nil, e + } + // THINK: Maybe, use async running. + for _, fi := range fis { + // NB: On Linux this could be a problem as + // there are lots of file types available. + if !fi.IsDir() { + f, e := os.Open(filepath.Join(dirPath, fi.Name())) + if e != nil { + if f != nil { + f.Close() + } + // THINK: Add nil as indicator that a problem occurred. + resFiles = append(resFiles, nil) + continue + } + // Check filter condition. + if fFilter != nil && !fFilter(fi, f) { + continue + } + resFiles = append(resFiles, f) + } + } + } + return resFiles, nil +} + +func isRegular(m os.FileMode) bool { + return m&os.ModeType == 0 +} + +// getDirFilePaths return full paths of the files located in the directory. +// Remark: Ignores files for which fileFilter returns false. +func getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) { + dfi, err := os.Open(dirPath) + if err != nil { + return nil, newCannotOpenFileError("Cannot open directory " + dirPath) + } + defer dfi.Close() + + var absDirPath string + if !filepath.IsAbs(dirPath) { + absDirPath, err = filepath.Abs(dirPath) + if err != nil { + return nil, fmt.Errorf("cannot get absolute path of directory: %s", err.Error()) + } + } else { + absDirPath = dirPath + } + + // TODO: check if dirPath is really directory. + // Size of read buffer (i.e. chunk of items read at a time). + rbs := 2 << 5 + filePaths := []string{} + + var fp string +L: + for { + // Read directory entities by reasonable chuncks + // to prevent overflows on big number of files. + fis, e := dfi.Readdir(rbs) + switch e { + // It's OK. + case nil: + // Do nothing, just continue cycle. + case io.EOF: + break L + // Indicate that something went wrong. + default: + return nil, e + } + // THINK: Maybe, use async running. + for _, fi := range fis { + // NB: Should work on every Windows and non-Windows OS. + if isRegular(fi.Mode()) { + if pathIsName { + fp = fi.Name() + } else { + // Build full path of a file. + fp = filepath.Join(absDirPath, fi.Name()) + } + // Check filter condition. + if fpFilter != nil && !fpFilter(fp) { + continue + } + filePaths = append(filePaths, fp) + } + } + } + return filePaths, nil +} + +// getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs +// in map 'filesInDirMap': Key - directory name, value - *os.File slice. +func getOpenFilesByDirectoryAsync( + dirPaths []string, + fFilter fileFilter, + filesInDirMap map[string][]*os.File, +) error { + n := len(dirPaths) + if n > maxDirNumberReadAsync { + return fmt.Errorf("number of input directories to be read exceeded max value %d", maxDirNumberReadAsync) + } + type filesInDirResult struct { + DirName string + Files []*os.File + Error error + } + dirFilesChan := make(chan *filesInDirResult, n) + var wg sync.WaitGroup + // Register n goroutines which are going to do work. + wg.Add(n) + for i := 0; i < n; i++ { + // Launch asynchronously the piece of work. + go func(dirPath string) { + fs, e := getOpenFilesInDir(dirPath, fFilter) + dirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e} + // Mark the current goroutine as finished (work is done). + wg.Done() + }(dirPaths[i]) + } + // Wait for all goroutines to finish their work. + wg.Wait() + // Close the error channel to let for-range clause + // get all the buffered values without blocking and quit in the end. + close(dirFilesChan) + for fidr := range dirFilesChan { + if fidr.Error == nil { + // THINK: What will happen if the key is already present? + filesInDirMap[fidr.DirName] = fidr.Files + } else { + return fidr.Error + } + } + return nil +} + +// fileExists return flag whether a given file exists +// and operation error if an unclassified failure occurs. +func fileExists(path string) (bool, error) { + _, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// createDirectory makes directory with a given name +// making all parent directories if necessary. +func createDirectory(dirPath string) error { + var dPath string + var err error + if !filepath.IsAbs(dirPath) { + dPath, err = filepath.Abs(dirPath) + if err != nil { + return err + } + } else { + dPath = dirPath + } + exists, err := fileExists(dPath) + if err != nil { + return err + } + if exists { + return nil + } + return os.MkdirAll(dPath, os.ModeDir) +} + +// tryRemoveFile gives a try removing the file +// only ignoring an error when the file does not exist. +func tryRemoveFile(filePath string) (err error) { + err = os.Remove(filePath) + if os.IsNotExist(err) { + err = nil + return + } + return +} diff --git a/vendor/github.com/cihub/seelog/internals_xmlnode.go b/vendor/github.com/cihub/seelog/internals_xmlnode.go new file mode 100644 index 00000000..98588493 --- /dev/null +++ b/vendor/github.com/cihub/seelog/internals_xmlnode.go @@ -0,0 +1,175 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "strings" +) + +type xmlNode struct { + name string + attributes map[string]string + children []*xmlNode + value string +} + +func newNode() *xmlNode { + node := new(xmlNode) + node.children = make([]*xmlNode, 0) + node.attributes = make(map[string]string) + return node +} + +func (node *xmlNode) String() string { + str := fmt.Sprintf("<%s", node.name) + + for attrName, attrVal := range node.attributes { + str += fmt.Sprintf(" %s=\"%s\"", attrName, attrVal) + } + + str += ">" + str += node.value + + if len(node.children) != 0 { + for _, child := range node.children { + str += fmt.Sprintf("%s", child) + } + } + + str += fmt.Sprintf("", node.name) + + return str +} + +func (node *xmlNode) unmarshal(startEl xml.StartElement) error { + node.name = startEl.Name.Local + + for _, v := range startEl.Attr { + _, alreadyExists := node.attributes[v.Name.Local] + if alreadyExists { + return errors.New("tag '" + node.name + "' has duplicated attribute: '" + v.Name.Local + "'") + } + node.attributes[v.Name.Local] = v.Value + } + + return nil +} + +func (node *xmlNode) add(child *xmlNode) { + if node.children == nil { + node.children = make([]*xmlNode, 0) + } + + node.children = append(node.children, child) +} + +func (node *xmlNode) hasChildren() bool { + return node.children != nil && len(node.children) > 0 +} + +//============================================= + +func unmarshalConfig(reader io.Reader) (*xmlNode, error) { + xmlParser := xml.NewDecoder(reader) + + config, err := unmarshalNode(xmlParser, nil) + if err != nil { + return nil, err + } + if config == nil { + return nil, errors.New("xml has no content") + } + + nextConfigEntry, err := unmarshalNode(xmlParser, nil) + if nextConfigEntry != nil { + return nil, errors.New("xml contains more than one root element") + } + + return config, nil +} + +func unmarshalNode(xmlParser *xml.Decoder, curToken xml.Token) (node *xmlNode, err error) { + firstLoop := true + for { + var tok xml.Token + if firstLoop && curToken != nil { + tok = curToken + firstLoop = false + } else { + tok, err = getNextToken(xmlParser) + if err != nil || tok == nil { + return + } + } + + switch tt := tok.(type) { + case xml.SyntaxError: + err = errors.New(tt.Error()) + return + case xml.CharData: + value := strings.TrimSpace(string([]byte(tt))) + if node != nil { + node.value += value + } + case xml.StartElement: + if node == nil { + node = newNode() + err := node.unmarshal(tt) + if err != nil { + return nil, err + } + } else { + childNode, childErr := unmarshalNode(xmlParser, tok) + if childErr != nil { + return nil, childErr + } + + if childNode != nil { + node.add(childNode) + } else { + return + } + } + case xml.EndElement: + return + } + } +} + +func getNextToken(xmlParser *xml.Decoder) (tok xml.Token, err error) { + if tok, err = xmlParser.Token(); err != nil { + if err == io.EOF { + err = nil + return + } + return + } + + return +} diff --git a/vendor/github.com/cihub/seelog/log.go b/vendor/github.com/cihub/seelog/log.go new file mode 100644 index 00000000..f775e1fd --- /dev/null +++ b/vendor/github.com/cihub/seelog/log.go @@ -0,0 +1,307 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "sync" + "time" +) + +const ( + staticFuncCallDepth = 3 // See 'commonLogger.log' method comments + loggerFuncCallDepth = 3 +) + +// Current is the logger used in all package level convenience funcs like 'Trace', 'Debug', 'Flush', etc. +var Current LoggerInterface + +// Default logger that is created from an empty config: "". It is not closed by a ReplaceLogger call. +var Default LoggerInterface + +// Disabled logger that doesn't produce any output in any circumstances. It is neither closed nor flushed by a ReplaceLogger call. +var Disabled LoggerInterface + +var pkgOperationsMutex *sync.Mutex + +func init() { + pkgOperationsMutex = new(sync.Mutex) + var err error + + if Default == nil { + Default, err = LoggerFromConfigAsBytes([]byte("")) + } + + if Disabled == nil { + Disabled, err = LoggerFromConfigAsBytes([]byte("")) + } + + if err != nil { + panic(fmt.Sprintf("Seelog couldn't start. Error: %s", err.Error())) + } + + Current = Default +} + +func createLoggerFromFullConfig(config *configForParsing) (LoggerInterface, error) { + if config.LogType == syncloggerTypeFromString { + return NewSyncLogger(&config.logConfig), nil + } else if config.LogType == asyncLooploggerTypeFromString { + return NewAsyncLoopLogger(&config.logConfig), nil + } else if config.LogType == asyncTimerloggerTypeFromString { + logData := config.LoggerData + if logData == nil { + return nil, errors.New("async timer data not set") + } + + asyncInt, ok := logData.(asyncTimerLoggerData) + if !ok { + return nil, errors.New("invalid async timer data") + } + + logger, err := NewAsyncTimerLogger(&config.logConfig, time.Duration(asyncInt.AsyncInterval)) + if !ok { + return nil, err + } + + return logger, nil + } else if config.LogType == adaptiveLoggerTypeFromString { + logData := config.LoggerData + if logData == nil { + return nil, errors.New("adaptive logger parameters not set") + } + + adaptData, ok := logData.(adaptiveLoggerData) + if !ok { + return nil, errors.New("invalid adaptive logger parameters") + } + + logger, err := NewAsyncAdaptiveLogger( + &config.logConfig, + time.Duration(adaptData.MinInterval), + time.Duration(adaptData.MaxInterval), + adaptData.CriticalMsgCount, + ) + if err != nil { + return nil, err + } + + return logger, nil + } + return nil, errors.New("invalid config log type/data") +} + +// UseLogger sets the 'Current' package level logger variable to the specified value. +// This variable is used in all Trace/Debug/... package level convenience funcs. +// +// Example: +// +// after calling +// seelog.UseLogger(somelogger) +// the following: +// seelog.Debug("abc") +// will be equal to +// somelogger.Debug("abc") +// +// IMPORTANT: UseLogger do NOT close the previous logger (only flushes it). So if +// you constantly use it to replace loggers and don't close them in other code, you'll +// end up having memory leaks. +// +// To safely replace loggers, use ReplaceLogger. +func UseLogger(logger LoggerInterface) error { + if logger == nil { + return errors.New("logger can not be nil") + } + + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + + oldLogger := Current + Current = logger + + if oldLogger != nil { + oldLogger.Flush() + } + + return nil +} + +// ReplaceLogger acts as UseLogger but the logger that was previously +// used is disposed (except Default and Disabled loggers). +// +// Example: +// import log "github.com/cihub/seelog" +// +// func main() { +// logger, err := log.LoggerFromConfigAsFile("seelog.xml") +// +// if err != nil { +// panic(err) +// } +// +// log.ReplaceLogger(logger) +// defer log.Flush() +// +// log.Trace("test") +// log.Debugf("var = %s", "abc") +// } +func ReplaceLogger(logger LoggerInterface) error { + if logger == nil { + return errors.New("logger can not be nil") + } + + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + + defer func() { + if err := recover(); err != nil { + reportInternalError(fmt.Errorf("recovered from panic during ReplaceLogger: %s", err)) + } + }() + + if Current == Default { + Current.Flush() + } else if Current != nil && !Current.Closed() && Current != Disabled { + Current.Flush() + Current.Close() + } + + Current = logger + + return nil +} + +// Tracef formats message according to format specifier +// and writes to default logger with log level = Trace. +func Tracef(format string, params ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.traceWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) +} + +// Debugf formats message according to format specifier +// and writes to default logger with log level = Debug. +func Debugf(format string, params ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.debugWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) +} + +// Infof formats message according to format specifier +// and writes to default logger with log level = Info. +func Infof(format string, params ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.infoWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) +} + +// Warnf formats message according to format specifier and writes to default logger with log level = Warn +func Warnf(format string, params ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogFormattedMessage(format, params) + Current.warnWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Errorf formats message according to format specifier and writes to default logger with log level = Error +func Errorf(format string, params ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogFormattedMessage(format, params) + Current.errorWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Criticalf formats message according to format specifier and writes to default logger with log level = Critical +func Criticalf(format string, params ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogFormattedMessage(format, params) + Current.criticalWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Trace formats message using the default formats for its operands and writes to default logger with log level = Trace +func Trace(v ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.traceWithCallDepth(staticFuncCallDepth, newLogMessage(v)) +} + +// Debug formats message using the default formats for its operands and writes to default logger with log level = Debug +func Debug(v ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.debugWithCallDepth(staticFuncCallDepth, newLogMessage(v)) +} + +// Info formats message using the default formats for its operands and writes to default logger with log level = Info +func Info(v ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.infoWithCallDepth(staticFuncCallDepth, newLogMessage(v)) +} + +// Warn formats message using the default formats for its operands and writes to default logger with log level = Warn +func Warn(v ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogMessage(v) + Current.warnWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Error formats message using the default formats for its operands and writes to default logger with log level = Error +func Error(v ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogMessage(v) + Current.errorWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Critical formats message using the default formats for its operands and writes to default logger with log level = Critical +func Critical(v ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogMessage(v) + Current.criticalWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Flush immediately processes all currently queued messages and all currently buffered messages. +// It is a blocking call which returns only after the queue is empty and all the buffers are empty. +// +// If Flush is called for a synchronous logger (type='sync'), it only flushes buffers (e.g. '' receivers) +// , because there is no queue. +// +// Call this method when your app is going to shut down not to lose any log messages. +func Flush() { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.Flush() +} diff --git a/vendor/github.com/cihub/seelog/logger.go b/vendor/github.com/cihub/seelog/logger.go new file mode 100644 index 00000000..fc96aed4 --- /dev/null +++ b/vendor/github.com/cihub/seelog/logger.go @@ -0,0 +1,370 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "os" + "sync" +) + +func reportInternalError(err error) { + fmt.Fprintf(os.Stderr, "seelog internal error: %s\n", err) +} + +// LoggerInterface represents structs capable of logging Seelog messages +type LoggerInterface interface { + + // Tracef formats message according to format specifier + // and writes to log with level = Trace. + Tracef(format string, params ...interface{}) + + // Debugf formats message according to format specifier + // and writes to log with level = Debug. + Debugf(format string, params ...interface{}) + + // Infof formats message according to format specifier + // and writes to log with level = Info. + Infof(format string, params ...interface{}) + + // Warnf formats message according to format specifier + // and writes to log with level = Warn. + Warnf(format string, params ...interface{}) error + + // Errorf formats message according to format specifier + // and writes to log with level = Error. + Errorf(format string, params ...interface{}) error + + // Criticalf formats message according to format specifier + // and writes to log with level = Critical. + Criticalf(format string, params ...interface{}) error + + // Trace formats message using the default formats for its operands + // and writes to log with level = Trace + Trace(v ...interface{}) + + // Debug formats message using the default formats for its operands + // and writes to log with level = Debug + Debug(v ...interface{}) + + // Info formats message using the default formats for its operands + // and writes to log with level = Info + Info(v ...interface{}) + + // Warn formats message using the default formats for its operands + // and writes to log with level = Warn + Warn(v ...interface{}) error + + // Error formats message using the default formats for its operands + // and writes to log with level = Error + Error(v ...interface{}) error + + // Critical formats message using the default formats for its operands + // and writes to log with level = Critical + Critical(v ...interface{}) error + + traceWithCallDepth(callDepth int, message fmt.Stringer) + debugWithCallDepth(callDepth int, message fmt.Stringer) + infoWithCallDepth(callDepth int, message fmt.Stringer) + warnWithCallDepth(callDepth int, message fmt.Stringer) + errorWithCallDepth(callDepth int, message fmt.Stringer) + criticalWithCallDepth(callDepth int, message fmt.Stringer) + + // Close flushes all the messages in the logger and closes it. It cannot be used after this operation. + Close() + + // Flush flushes all the messages in the logger. + Flush() + + // Closed returns true if the logger was previously closed. + Closed() bool + + // SetAdditionalStackDepth sets the additional number of frames to skip by runtime.Caller + // when getting function information needed to print seelog format identifiers such as %Func or %File. + // + // This func may be used when you wrap seelog funcs and want to print caller info of you own + // wrappers instead of seelog func callers. In this case you should set depth = 1. If you then + // wrap your wrapper, you should set depth = 2, etc. + // + // NOTE: Incorrect depth value may lead to errors in runtime.Caller evaluation or incorrect + // function/file names in log files. Do not use it if you are not going to wrap seelog funcs. + // You may reset the value to default using a SetAdditionalStackDepth(0) call. + SetAdditionalStackDepth(depth int) error + + // Sets logger context that can be used in formatter funcs and custom receivers + SetContext(context interface{}) +} + +// innerLoggerInterface is an internal logging interface +type innerLoggerInterface interface { + innerLog(level LogLevel, context LogContextInterface, message fmt.Stringer) + Flush() +} + +// [file path][func name][level] -> [allowed] +type allowedContextCache map[string]map[string]map[LogLevel]bool + +// commonLogger contains all common data needed for logging and contains methods used to log messages. +type commonLogger struct { + config *logConfig // Config used for logging + contextCache allowedContextCache // Caches whether log is enabled for specific "full path-func name-level" sets + closed bool // 'true' when all writers are closed, all data is flushed, logger is unusable. Must be accessed while holding closedM + closedM sync.RWMutex + m sync.Mutex // Mutex for main operations + unusedLevels []bool + innerLogger innerLoggerInterface + addStackDepth int // Additional stack depth needed for correct seelog caller context detection + customContext interface{} +} + +func newCommonLogger(config *logConfig, internalLogger innerLoggerInterface) *commonLogger { + cLogger := new(commonLogger) + + cLogger.config = config + cLogger.contextCache = make(allowedContextCache) + cLogger.unusedLevels = make([]bool, Off) + cLogger.fillUnusedLevels() + cLogger.innerLogger = internalLogger + + return cLogger +} + +func (cLogger *commonLogger) SetAdditionalStackDepth(depth int) error { + if depth < 0 { + return fmt.Errorf("negative depth: %d", depth) + } + cLogger.m.Lock() + cLogger.addStackDepth = depth + cLogger.m.Unlock() + return nil +} + +func (cLogger *commonLogger) Tracef(format string, params ...interface{}) { + cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) +} + +func (cLogger *commonLogger) Debugf(format string, params ...interface{}) { + cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) +} + +func (cLogger *commonLogger) Infof(format string, params ...interface{}) { + cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) +} + +func (cLogger *commonLogger) Warnf(format string, params ...interface{}) error { + message := newLogFormattedMessage(format, params) + cLogger.warnWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Errorf(format string, params ...interface{}) error { + message := newLogFormattedMessage(format, params) + cLogger.errorWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Criticalf(format string, params ...interface{}) error { + message := newLogFormattedMessage(format, params) + cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Trace(v ...interface{}) { + cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) +} + +func (cLogger *commonLogger) Debug(v ...interface{}) { + cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) +} + +func (cLogger *commonLogger) Info(v ...interface{}) { + cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) +} + +func (cLogger *commonLogger) Warn(v ...interface{}) error { + message := newLogMessage(v) + cLogger.warnWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Error(v ...interface{}) error { + message := newLogMessage(v) + cLogger.errorWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Critical(v ...interface{}) error { + message := newLogMessage(v) + cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) SetContext(c interface{}) { + cLogger.customContext = c +} + +func (cLogger *commonLogger) traceWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(TraceLvl, message, callDepth) +} + +func (cLogger *commonLogger) debugWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(DebugLvl, message, callDepth) +} + +func (cLogger *commonLogger) infoWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(InfoLvl, message, callDepth) +} + +func (cLogger *commonLogger) warnWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(WarnLvl, message, callDepth) +} + +func (cLogger *commonLogger) errorWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(ErrorLvl, message, callDepth) +} + +func (cLogger *commonLogger) criticalWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(CriticalLvl, message, callDepth) + cLogger.innerLogger.Flush() +} + +func (cLogger *commonLogger) Closed() bool { + cLogger.closedM.RLock() + defer cLogger.closedM.RUnlock() + return cLogger.closed +} + +func (cLogger *commonLogger) fillUnusedLevels() { + for i := 0; i < len(cLogger.unusedLevels); i++ { + cLogger.unusedLevels[i] = true + } + + cLogger.fillUnusedLevelsByContraint(cLogger.config.Constraints) + + for _, exception := range cLogger.config.Exceptions { + cLogger.fillUnusedLevelsByContraint(exception) + } +} + +func (cLogger *commonLogger) fillUnusedLevelsByContraint(constraint logLevelConstraints) { + for i := 0; i < len(cLogger.unusedLevels); i++ { + if constraint.IsAllowed(LogLevel(i)) { + cLogger.unusedLevels[i] = false + } + } +} + +// stackCallDepth is used to indicate the call depth of 'log' func. +// This depth level is used in the runtime.Caller(...) call. See +// common_context.go -> specifyContext, extractCallerInfo for details. +func (cLogger *commonLogger) log(level LogLevel, message fmt.Stringer, stackCallDepth int) { + if cLogger.unusedLevels[level] { + return + } + cLogger.m.Lock() + defer cLogger.m.Unlock() + + if cLogger.Closed() { + return + } + context, _ := specifyContext(stackCallDepth+cLogger.addStackDepth, cLogger.customContext) + // Context errors are not reported because there are situations + // in which context errors are normal Seelog usage cases. For + // example in executables with stripped symbols. + // Error contexts are returned instead. See common_context.go. + /*if err != nil { + reportInternalError(err) + return + }*/ + cLogger.innerLogger.innerLog(level, context, message) +} + +func (cLogger *commonLogger) processLogMsg(level LogLevel, message fmt.Stringer, context LogContextInterface) { + defer func() { + if err := recover(); err != nil { + reportInternalError(fmt.Errorf("recovered from panic during message processing: %s", err)) + } + }() + if cLogger.config.IsAllowed(level, context) { + cLogger.config.RootDispatcher.Dispatch(message.String(), level, context, reportInternalError) + } +} + +func (cLogger *commonLogger) isAllowed(level LogLevel, context LogContextInterface) bool { + funcMap, ok := cLogger.contextCache[context.FullPath()] + if !ok { + funcMap = make(map[string]map[LogLevel]bool, 0) + cLogger.contextCache[context.FullPath()] = funcMap + } + + levelMap, ok := funcMap[context.Func()] + if !ok { + levelMap = make(map[LogLevel]bool, 0) + funcMap[context.Func()] = levelMap + } + + isAllowValue, ok := levelMap[level] + if !ok { + isAllowValue = cLogger.config.IsAllowed(level, context) + levelMap[level] = isAllowValue + } + + return isAllowValue +} + +type logMessage struct { + params []interface{} +} + +type logFormattedMessage struct { + format string + params []interface{} +} + +func newLogMessage(params []interface{}) fmt.Stringer { + message := new(logMessage) + + message.params = params + + return message +} + +func newLogFormattedMessage(format string, params []interface{}) *logFormattedMessage { + message := new(logFormattedMessage) + + message.params = params + message.format = format + + return message +} + +func (message *logMessage) String() string { + return fmt.Sprint(message.params...) +} + +func (message *logFormattedMessage) String() string { + return fmt.Sprintf(message.format, message.params...) +} diff --git a/vendor/github.com/cihub/seelog/writers_bufferedwriter.go b/vendor/github.com/cihub/seelog/writers_bufferedwriter.go new file mode 100644 index 00000000..37d75c82 --- /dev/null +++ b/vendor/github.com/cihub/seelog/writers_bufferedwriter.go @@ -0,0 +1,161 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "bufio" + "errors" + "fmt" + "io" + "sync" + "time" +) + +// bufferedWriter stores data in memory and flushes it every flushPeriod or when buffer is full +type bufferedWriter struct { + flushPeriod time.Duration // data flushes interval (in microseconds) + bufferMutex *sync.Mutex // mutex for buffer operations syncronization + innerWriter io.Writer // inner writer + buffer *bufio.Writer // buffered wrapper for inner writer + bufferSize int // max size of data chunk in bytes +} + +// NewBufferedWriter creates a new buffered writer struct. +// bufferSize -- size of memory buffer in bytes +// flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality +func NewBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) { + + if innerWriter == nil { + return nil, errors.New("argument is nil: innerWriter") + } + if flushPeriod < 0 { + return nil, fmt.Errorf("flushPeriod can not be less than 0. Got: %d", flushPeriod) + } + + if bufferSize <= 0 { + return nil, fmt.Errorf("bufferSize can not be less or equal to 0. Got: %d", bufferSize) + } + + buffer := bufio.NewWriterSize(innerWriter, bufferSize) + + /*if err != nil { + return nil, err + }*/ + + newWriter := new(bufferedWriter) + + newWriter.innerWriter = innerWriter + newWriter.buffer = buffer + newWriter.bufferSize = bufferSize + newWriter.flushPeriod = flushPeriod * 1e6 + newWriter.bufferMutex = new(sync.Mutex) + + if flushPeriod != 0 { + go newWriter.flushPeriodically() + } + + return newWriter, nil +} + +func (bufWriter *bufferedWriter) writeBigChunk(bytes []byte) (n int, err error) { + bufferedLen := bufWriter.buffer.Buffered() + + n, err = bufWriter.flushInner() + if err != nil { + return + } + + written, writeErr := bufWriter.innerWriter.Write(bytes) + return bufferedLen + written, writeErr +} + +// Sends data to buffer manager. Waits until all buffers are full. +func (bufWriter *bufferedWriter) Write(bytes []byte) (n int, err error) { + + bufWriter.bufferMutex.Lock() + defer bufWriter.bufferMutex.Unlock() + + bytesLen := len(bytes) + + if bytesLen > bufWriter.bufferSize { + return bufWriter.writeBigChunk(bytes) + } + + if bytesLen > bufWriter.buffer.Available() { + n, err = bufWriter.flushInner() + if err != nil { + return + } + } + + bufWriter.buffer.Write(bytes) + + return len(bytes), nil +} + +func (bufWriter *bufferedWriter) Close() error { + closer, ok := bufWriter.innerWriter.(io.Closer) + if ok { + return closer.Close() + } + + return nil +} + +func (bufWriter *bufferedWriter) Flush() { + + bufWriter.bufferMutex.Lock() + defer bufWriter.bufferMutex.Unlock() + + bufWriter.flushInner() +} + +func (bufWriter *bufferedWriter) flushInner() (n int, err error) { + bufferedLen := bufWriter.buffer.Buffered() + flushErr := bufWriter.buffer.Flush() + + return bufWriter.buffer.Buffered() - bufferedLen, flushErr +} + +func (bufWriter *bufferedWriter) flushBuffer() { + bufWriter.bufferMutex.Lock() + defer bufWriter.bufferMutex.Unlock() + + bufWriter.buffer.Flush() +} + +func (bufWriter *bufferedWriter) flushPeriodically() { + if bufWriter.flushPeriod > 0 { + ticker := time.NewTicker(bufWriter.flushPeriod) + for { + <-ticker.C + bufWriter.flushBuffer() + } + } +} + +func (bufWriter *bufferedWriter) String() string { + return fmt.Sprintf("bufferedWriter size: %d, flushPeriod: %d", bufWriter.bufferSize, bufWriter.flushPeriod) +} diff --git a/vendor/github.com/cihub/seelog/writers_connwriter.go b/vendor/github.com/cihub/seelog/writers_connwriter.go new file mode 100644 index 00000000..d199894e --- /dev/null +++ b/vendor/github.com/cihub/seelog/writers_connwriter.go @@ -0,0 +1,144 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "crypto/tls" + "fmt" + "io" + "net" +) + +// connWriter is used to write to a stream-oriented network connection. +type connWriter struct { + innerWriter io.WriteCloser + reconnectOnMsg bool + reconnect bool + net string + addr string + useTLS bool + configTLS *tls.Config +} + +// Creates writer to the address addr on the network netName. +// Connection will be opened on each write if reconnectOnMsg = true +func NewConnWriter(netName string, addr string, reconnectOnMsg bool) *connWriter { + newWriter := new(connWriter) + + newWriter.net = netName + newWriter.addr = addr + newWriter.reconnectOnMsg = reconnectOnMsg + + return newWriter +} + +// Creates a writer that uses SSL/TLS +func newTLSWriter(netName string, addr string, reconnectOnMsg bool, config *tls.Config) *connWriter { + newWriter := new(connWriter) + + newWriter.net = netName + newWriter.addr = addr + newWriter.reconnectOnMsg = reconnectOnMsg + newWriter.useTLS = true + newWriter.configTLS = config + + return newWriter +} + +func (connWriter *connWriter) Close() error { + if connWriter.innerWriter == nil { + return nil + } + + return connWriter.innerWriter.Close() +} + +func (connWriter *connWriter) Write(bytes []byte) (n int, err error) { + if connWriter.neededConnectOnMsg() { + err = connWriter.connect() + if err != nil { + return 0, err + } + } + + if connWriter.reconnectOnMsg { + defer connWriter.innerWriter.Close() + } + + n, err = connWriter.innerWriter.Write(bytes) + if err != nil { + connWriter.reconnect = true + } + + return +} + +func (connWriter *connWriter) String() string { + return fmt.Sprintf("Conn writer: [%s, %s, %v]", connWriter.net, connWriter.addr, connWriter.reconnectOnMsg) +} + +func (connWriter *connWriter) connect() error { + if connWriter.innerWriter != nil { + connWriter.innerWriter.Close() + connWriter.innerWriter = nil + } + + if connWriter.useTLS { + conn, err := tls.Dial(connWriter.net, connWriter.addr, connWriter.configTLS) + if err != nil { + return err + } + connWriter.innerWriter = conn + + return nil + } + + conn, err := net.Dial(connWriter.net, connWriter.addr) + if err != nil { + return err + } + + tcpConn, ok := conn.(*net.TCPConn) + if ok { + tcpConn.SetKeepAlive(true) + } + + connWriter.innerWriter = conn + + return nil +} + +func (connWriter *connWriter) neededConnectOnMsg() bool { + if connWriter.reconnect { + connWriter.reconnect = false + return true + } + + if connWriter.innerWriter == nil { + return true + } + + return connWriter.reconnectOnMsg +} diff --git a/vendor/github.com/cihub/seelog/writers_consolewriter.go b/vendor/github.com/cihub/seelog/writers_consolewriter.go new file mode 100644 index 00000000..3eb79afa --- /dev/null +++ b/vendor/github.com/cihub/seelog/writers_consolewriter.go @@ -0,0 +1,47 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import "fmt" + +// consoleWriter is used to write to console +type consoleWriter struct { +} + +// Creates a new console writer. Returns error, if the console writer couldn't be created. +func NewConsoleWriter() (writer *consoleWriter, err error) { + newWriter := new(consoleWriter) + + return newWriter, nil +} + +// Create folder and file on WriteLog/Write first call +func (console *consoleWriter) Write(bytes []byte) (int, error) { + return fmt.Print(string(bytes)) +} + +func (console *consoleWriter) String() string { + return "Console writer" +} diff --git a/vendor/github.com/cihub/seelog/writers_filewriter.go b/vendor/github.com/cihub/seelog/writers_filewriter.go new file mode 100644 index 00000000..8d3ae270 --- /dev/null +++ b/vendor/github.com/cihub/seelog/writers_filewriter.go @@ -0,0 +1,92 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" + "io" + "os" + "path/filepath" +) + +// fileWriter is used to write to a file. +type fileWriter struct { + innerWriter io.WriteCloser + fileName string +} + +// Creates a new file and a corresponding writer. Returns error, if the file couldn't be created. +func NewFileWriter(fileName string) (writer *fileWriter, err error) { + newWriter := new(fileWriter) + newWriter.fileName = fileName + + return newWriter, nil +} + +func (fw *fileWriter) Close() error { + if fw.innerWriter != nil { + err := fw.innerWriter.Close() + if err != nil { + return err + } + fw.innerWriter = nil + } + return nil +} + +// Create folder and file on WriteLog/Write first call +func (fw *fileWriter) Write(bytes []byte) (n int, err error) { + if fw.innerWriter == nil { + if err := fw.createFile(); err != nil { + return 0, err + } + } + return fw.innerWriter.Write(bytes) +} + +func (fw *fileWriter) createFile() error { + folder, _ := filepath.Split(fw.fileName) + var err error + + if 0 != len(folder) { + err = os.MkdirAll(folder, defaultDirectoryPermissions) + if err != nil { + return err + } + } + + // If exists + fw.innerWriter, err = os.OpenFile(fw.fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions) + + if err != nil { + return err + } + + return nil +} + +func (fw *fileWriter) String() string { + return fmt.Sprintf("File writer: %s", fw.fileName) +} diff --git a/vendor/github.com/cihub/seelog/writers_formattedwriter.go b/vendor/github.com/cihub/seelog/writers_formattedwriter.go new file mode 100644 index 00000000..bf44a410 --- /dev/null +++ b/vendor/github.com/cihub/seelog/writers_formattedwriter.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "io" +) + +type formattedWriter struct { + writer io.Writer + formatter *formatter +} + +func NewFormattedWriter(writer io.Writer, formatter *formatter) (*formattedWriter, error) { + if formatter == nil { + return nil, errors.New("formatter can not be nil") + } + + return &formattedWriter{writer, formatter}, nil +} + +func (formattedWriter *formattedWriter) Write(message string, level LogLevel, context LogContextInterface) error { + str := formattedWriter.formatter.Format(message, level, context) + _, err := formattedWriter.writer.Write([]byte(str)) + return err +} + +func (formattedWriter *formattedWriter) String() string { + return fmt.Sprintf("writer: %s, format: %s", formattedWriter.writer, formattedWriter.formatter) +} + +func (formattedWriter *formattedWriter) Writer() io.Writer { + return formattedWriter.writer +} + +func (formattedWriter *formattedWriter) Format() *formatter { + return formattedWriter.formatter +} diff --git a/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go b/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go new file mode 100644 index 00000000..9535a579 --- /dev/null +++ b/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go @@ -0,0 +1,763 @@ +// Copyright (c) 2013 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/cihub/seelog/archive" + "github.com/cihub/seelog/archive/gzip" + "github.com/cihub/seelog/archive/tar" + "github.com/cihub/seelog/archive/zip" +) + +// Common constants +const ( + rollingLogHistoryDelimiter = "." +) + +// Types of the rolling writer: roll by date, by time, etc. +type rollingType uint8 + +const ( + rollingTypeSize = iota + rollingTypeTime +) + +// Types of the rolled file naming mode: prefix, postfix, etc. +type rollingNameMode uint8 + +const ( + rollingNameModePostfix = iota + rollingNameModePrefix +) + +var rollingNameModesStringRepresentation = map[rollingNameMode]string{ + rollingNameModePostfix: "postfix", + rollingNameModePrefix: "prefix", +} + +func rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) { + for tp, tpStr := range rollingNameModesStringRepresentation { + if tpStr == rollingNameStr { + return tp, true + } + } + + return 0, false +} + +var rollingTypesStringRepresentation = map[rollingType]string{ + rollingTypeSize: "size", + rollingTypeTime: "date", +} + +func rollingTypeFromString(rollingTypeStr string) (rollingType, bool) { + for tp, tpStr := range rollingTypesStringRepresentation { + if tpStr == rollingTypeStr { + return tp, true + } + } + + return 0, false +} + +// Old logs archivation type. +type rollingArchiveType uint8 + +const ( + rollingArchiveNone = iota + rollingArchiveZip + rollingArchiveGzip +) + +var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{ + rollingArchiveNone: "none", + rollingArchiveZip: "zip", + rollingArchiveGzip: "gzip", +} + +type archiver func(f *os.File, exploded bool) archive.WriteCloser + +type unarchiver func(f *os.File) (archive.ReadCloser, error) + +type compressionType struct { + extension string + handleMultipleEntries bool + archiver archiver + unarchiver unarchiver +} + +var compressionTypes = map[rollingArchiveType]compressionType{ + rollingArchiveZip: { + extension: ".zip", + handleMultipleEntries: true, + archiver: func(f *os.File, _ bool) archive.WriteCloser { + return zip.NewWriter(f) + }, + unarchiver: func(f *os.File) (archive.ReadCloser, error) { + fi, err := f.Stat() + if err != nil { + return nil, err + } + r, err := zip.NewReader(f, fi.Size()) + if err != nil { + return nil, err + } + return archive.NopCloser(r), nil + }, + }, + rollingArchiveGzip: { + extension: ".gz", + handleMultipleEntries: false, + archiver: func(f *os.File, exploded bool) archive.WriteCloser { + gw := gzip.NewWriter(f) + if exploded { + return gw + } + return tar.NewWriteMultiCloser(gw, gw) + }, + unarchiver: func(f *os.File) (archive.ReadCloser, error) { + gr, err := gzip.NewReader(f, f.Name()) + if err != nil { + return nil, err + } + + // Determine if the gzip is a tar + tr := tar.NewReader(gr) + _, err = tr.Next() + isTar := err == nil + + // Reset to beginning of file + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + gr.Reset(f) + + if isTar { + return archive.NopCloser(tar.NewReader(gr)), nil + } + return gr, nil + }, + }, +} + +func (compressionType *compressionType) rollingArchiveTypeName(name string, exploded bool) string { + if !compressionType.handleMultipleEntries && !exploded { + return name + ".tar" + compressionType.extension + } else { + return name + compressionType.extension + } + +} + +func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) { + for tp, tpStr := range rollingArchiveTypesStringRepresentation { + if tpStr == rollingArchiveTypeStr { + return tp, true + } + } + + return 0, false +} + +// Default names for different archive types +var rollingArchiveDefaultExplodedName = "old" + +func rollingArchiveTypeDefaultName(archiveType rollingArchiveType, exploded bool) (string, error) { + compressionType, ok := compressionTypes[archiveType] + if !ok { + return "", fmt.Errorf("cannot get default filename for archive type = %v", archiveType) + } + return compressionType.rollingArchiveTypeName("log", exploded), nil +} + +// rollerVirtual is an interface that represents all virtual funcs that are +// called in different rolling writer subtypes. +type rollerVirtual interface { + needsToRoll() bool // Returns true if needs to switch to another file. + isFileRollNameValid(rname string) bool // Returns true if logger roll file name (postfix/prefix/etc.) is ok. + sortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger. + + // getNewHistoryRollFileName is called whenever we are about to roll the + // current log file. It returns the name the current log file should be + // rolled to. + getNewHistoryRollFileName(otherHistoryFiles []string) string + + getCurrentFileName() string +} + +// rollingFileWriter writes received messages to a file, until time interval passes +// or file exceeds a specified limit. After that the current log file is renamed +// and writer starts to log into a new file. You can set a limit for such renamed +// files count, if you want, and then the rolling writer would delete older ones when +// the files count exceed the specified limit. +type rollingFileWriter struct { + fileName string // log file name + currentDirPath string + currentFile *os.File + currentName string + currentFileSize int64 + rollingType rollingType // Rolling mode (Files roll by size/date/...) + archiveType rollingArchiveType + archivePath string + archiveExploded bool + fullName bool + maxRolls int + nameMode rollingNameMode + self rollerVirtual // Used for virtual calls + rollLock sync.Mutex +} + +func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode, + archiveExploded bool, fullName bool) (*rollingFileWriter, error) { + rw := new(rollingFileWriter) + rw.currentDirPath, rw.fileName = filepath.Split(fpath) + if len(rw.currentDirPath) == 0 { + rw.currentDirPath = "." + } + + rw.rollingType = rtype + rw.archiveType = atype + rw.archivePath = apath + rw.nameMode = namemode + rw.maxRolls = maxr + rw.archiveExploded = archiveExploded + rw.fullName = fullName + return rw, nil +} + +func (rw *rollingFileWriter) hasRollName(file string) bool { + switch rw.nameMode { + case rollingNameModePostfix: + rname := rw.fileName + rollingLogHistoryDelimiter + return strings.HasPrefix(file, rname) + case rollingNameModePrefix: + rname := rollingLogHistoryDelimiter + rw.fileName + return strings.HasSuffix(file, rname) + } + return false +} + +func (rw *rollingFileWriter) createFullFileName(originalName, rollname string) string { + switch rw.nameMode { + case rollingNameModePostfix: + return originalName + rollingLogHistoryDelimiter + rollname + case rollingNameModePrefix: + return rollname + rollingLogHistoryDelimiter + originalName + } + return "" +} + +func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) { + files, err := getDirFilePaths(rw.currentDirPath, nil, true) + if err != nil { + return nil, err + } + var validRollNames []string + for _, file := range files { + if rw.hasRollName(file) { + rname := rw.getFileRollName(file) + if rw.self.isFileRollNameValid(rname) { + validRollNames = append(validRollNames, rname) + } + } + } + sortedTails, err := rw.self.sortFileRollNamesAsc(validRollNames) + if err != nil { + return nil, err + } + validSortedFiles := make([]string, len(sortedTails)) + for i, v := range sortedTails { + validSortedFiles[i] = rw.createFullFileName(rw.fileName, v) + } + return validSortedFiles, nil +} + +func (rw *rollingFileWriter) createFileAndFolderIfNeeded(first bool) error { + var err error + + if len(rw.currentDirPath) != 0 { + err = os.MkdirAll(rw.currentDirPath, defaultDirectoryPermissions) + + if err != nil { + return err + } + } + rw.currentName = rw.self.getCurrentFileName() + filePath := filepath.Join(rw.currentDirPath, rw.currentName) + + // This will either open the existing file (without truncating it) or + // create if necessary. Append mode avoids any race conditions. + rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions) + if err != nil { + return err + } + + stat, err := rw.currentFile.Stat() + if err != nil { + rw.currentFile.Close() + rw.currentFile = nil + return err + } + + rw.currentFileSize = stat.Size() + return nil +} + +func (rw *rollingFileWriter) archiveExplodedLogs(logFilename string, compressionType compressionType) (err error) { + closeWithError := func(c io.Closer) { + if cerr := c.Close(); cerr != nil && err == nil { + err = cerr + } + } + + rollPath := filepath.Join(rw.currentDirPath, logFilename) + src, err := os.Open(rollPath) + if err != nil { + return err + } + defer src.Close() // Read-only + + // Buffer to a temporary file on the same partition + // Note: archivePath is a path to a directory when handling exploded logs + dst, err := rw.tempArchiveFile(rw.archivePath) + if err != nil { + return err + } + defer func() { + closeWithError(dst) + if err != nil { + os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file + return + } + + // Finalize archive by swapping the buffered archive into place + err = os.Rename(dst.Name(), filepath.Join(rw.archivePath, + compressionType.rollingArchiveTypeName(logFilename, true))) + }() + + // archive entry + w := compressionType.archiver(dst, true) + defer closeWithError(w) + fi, err := src.Stat() + if err != nil { + return err + } + if err := w.NextFile(logFilename, fi); err != nil { + return err + } + _, err = io.Copy(w, src) + return err +} + +func (rw *rollingFileWriter) archiveUnexplodedLogs(compressionType compressionType, rollsToDelete int, history []string) (err error) { + closeWithError := func(c io.Closer) { + if cerr := c.Close(); cerr != nil && err == nil { + err = cerr + } + } + + // Buffer to a temporary file on the same partition + // Note: archivePath is a path to a file when handling unexploded logs + dst, err := rw.tempArchiveFile(filepath.Dir(rw.archivePath)) + if err != nil { + return err + } + defer func() { + closeWithError(dst) + if err != nil { + os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file + return + } + + // Finalize archive by moving the buffered archive into place + err = os.Rename(dst.Name(), rw.archivePath) + }() + + w := compressionType.archiver(dst, false) + defer closeWithError(w) + + src, err := os.Open(rw.archivePath) + switch { + // Archive exists + case err == nil: + defer src.Close() // Read-only + + r, err := compressionType.unarchiver(src) + if err != nil { + return err + } + defer r.Close() // Read-only + + if err := archive.Copy(w, r); err != nil { + return err + } + + // Failed to stat + case !os.IsNotExist(err): + return err + } + + // Add new files to the archive + for i := 0; i < rollsToDelete; i++ { + rollPath := filepath.Join(rw.currentDirPath, history[i]) + src, err := os.Open(rollPath) + if err != nil { + return err + } + defer src.Close() // Read-only + fi, err := src.Stat() + if err != nil { + return err + } + if err := w.NextFile(src.Name(), fi); err != nil { + return err + } + if _, err := io.Copy(w, src); err != nil { + return err + } + } + return nil +} + +func (rw *rollingFileWriter) deleteOldRolls(history []string) error { + if rw.maxRolls <= 0 { + return nil + } + + rollsToDelete := len(history) - rw.maxRolls + if rollsToDelete <= 0 { + return nil + } + + if rw.archiveType != rollingArchiveNone { + if rw.archiveExploded { + os.MkdirAll(rw.archivePath, defaultDirectoryPermissions) + + // Archive logs + for i := 0; i < rollsToDelete; i++ { + rw.archiveExplodedLogs(history[i], compressionTypes[rw.archiveType]) + } + } else { + os.MkdirAll(filepath.Dir(rw.archivePath), defaultDirectoryPermissions) + + rw.archiveUnexplodedLogs(compressionTypes[rw.archiveType], rollsToDelete, history) + } + } + + var err error + // In all cases (archive files or not) the files should be deleted. + for i := 0; i < rollsToDelete; i++ { + // Try best to delete files without breaking the loop. + if err = tryRemoveFile(filepath.Join(rw.currentDirPath, history[i])); err != nil { + reportInternalError(err) + } + } + + return nil +} + +func (rw *rollingFileWriter) getFileRollName(fileName string) string { + switch rw.nameMode { + case rollingNameModePostfix: + return fileName[len(rw.fileName+rollingLogHistoryDelimiter):] + case rollingNameModePrefix: + return fileName[:len(fileName)-len(rw.fileName+rollingLogHistoryDelimiter)] + } + return "" +} + +func (rw *rollingFileWriter) roll() error { + // First, close current file. + err := rw.currentFile.Close() + if err != nil { + return err + } + rw.currentFile = nil + + // Current history of all previous log files. + // For file roller it may be like this: + // * ... + // * file.log.4 + // * file.log.5 + // * file.log.6 + // + // For date roller it may look like this: + // * ... + // * file.log.11.Aug.13 + // * file.log.15.Aug.13 + // * file.log.16.Aug.13 + // Sorted log history does NOT include current file. + history, err := rw.getSortedLogHistory() + if err != nil { + return err + } + // Renames current file to create a new roll history entry + // For file roller it may be like this: + // * ... + // * file.log.4 + // * file.log.5 + // * file.log.6 + // n file.log.7 <---- RENAMED (from file.log) + newHistoryName := rw.createFullFileName(rw.fileName, + rw.self.getNewHistoryRollFileName(history)) + + err = os.Rename(filepath.Join(rw.currentDirPath, rw.currentName), filepath.Join(rw.currentDirPath, newHistoryName)) + if err != nil { + return err + } + + // Finally, add the newly added history file to the history archive + // and, if after that the archive exceeds the allowed max limit, older rolls + // must the removed/archived. + history = append(history, newHistoryName) + if len(history) > rw.maxRolls { + err = rw.deleteOldRolls(history) + if err != nil { + return err + } + } + + return nil +} + +func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) { + rw.rollLock.Lock() + defer rw.rollLock.Unlock() + + if rw.self.needsToRoll() { + if err := rw.roll(); err != nil { + return 0, err + } + } + + if rw.currentFile == nil { + err := rw.createFileAndFolderIfNeeded(true) + if err != nil { + return 0, err + } + } + + n, err = rw.currentFile.Write(bytes) + rw.currentFileSize += int64(n) + return n, err +} + +func (rw *rollingFileWriter) Close() error { + if rw.currentFile != nil { + e := rw.currentFile.Close() + if e != nil { + return e + } + rw.currentFile = nil + } + return nil +} + +func (rw *rollingFileWriter) tempArchiveFile(archiveDir string) (*os.File, error) { + tmp := filepath.Join(archiveDir, ".seelog_tmp") + if err := os.MkdirAll(tmp, defaultDirectoryPermissions); err != nil { + return nil, err + } + return ioutil.TempFile(tmp, "archived_logs") +} + +// ============================================================================================= +// Different types of rolling writers +// ============================================================================================= + +// -------------------------------------------------- +// Rolling writer by SIZE +// -------------------------------------------------- + +// rollingFileWriterSize performs roll when file exceeds a specified limit. +type rollingFileWriterSize struct { + *rollingFileWriter + maxFileSize int64 +} + +func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode, archiveExploded bool) (*rollingFileWriterSize, error) { + rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode, archiveExploded, false) + if err != nil { + return nil, err + } + rws := &rollingFileWriterSize{rw, maxSize} + rws.self = rws + return rws, nil +} + +func (rws *rollingFileWriterSize) needsToRoll() bool { + return rws.currentFileSize >= rws.maxFileSize +} + +func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool { + if len(rname) == 0 { + return false + } + _, err := strconv.Atoi(rname) + return err == nil +} + +type rollSizeFileTailsSlice []string + +func (p rollSizeFileTailsSlice) Len() int { + return len(p) +} +func (p rollSizeFileTailsSlice) Less(i, j int) bool { + v1, _ := strconv.Atoi(p[i]) + v2, _ := strconv.Atoi(p[j]) + return v1 < v2 +} +func (p rollSizeFileTailsSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) { + ss := rollSizeFileTailsSlice(fs) + sort.Sort(ss) + return ss, nil +} + +func (rws *rollingFileWriterSize) getNewHistoryRollFileName(otherLogFiles []string) string { + v := 0 + if len(otherLogFiles) != 0 { + latest := otherLogFiles[len(otherLogFiles)-1] + v, _ = strconv.Atoi(rws.getFileRollName(latest)) + } + return fmt.Sprintf("%d", v+1) +} + +func (rws *rollingFileWriterSize) getCurrentFileName() string { + return rws.fileName +} + +func (rws *rollingFileWriterSize) String() string { + return fmt.Sprintf("Rolling file writer (By SIZE): filename: %s, archive: %s, archivefile: %s, maxFileSize: %v, maxRolls: %v", + rws.fileName, + rollingArchiveTypesStringRepresentation[rws.archiveType], + rws.archivePath, + rws.maxFileSize, + rws.maxRolls) +} + +// -------------------------------------------------- +// Rolling writer by TIME +// -------------------------------------------------- + +// rollingFileWriterTime performs roll when a specified time interval has passed. +type rollingFileWriterTime struct { + *rollingFileWriter + timePattern string + currentTimeFileName string +} + +func NewRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int, + timePattern string, namemode rollingNameMode, archiveExploded bool, fullName bool) (*rollingFileWriterTime, error) { + + rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode, archiveExploded, fullName) + if err != nil { + return nil, err + } + rws := &rollingFileWriterTime{rw, timePattern, ""} + rws.self = rws + return rws, nil +} + +func (rwt *rollingFileWriterTime) needsToRoll() bool { + newName := time.Now().Format(rwt.timePattern) + + if rwt.currentTimeFileName == "" { + // first run; capture the current name + rwt.currentTimeFileName = newName + return false + } + + return newName != rwt.currentTimeFileName +} + +func (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool { + if len(rname) == 0 { + return false + } + _, err := time.ParseInLocation(rwt.timePattern, rname, time.Local) + return err == nil +} + +type rollTimeFileTailsSlice struct { + data []string + pattern string +} + +func (p rollTimeFileTailsSlice) Len() int { + return len(p.data) +} + +func (p rollTimeFileTailsSlice) Less(i, j int) bool { + t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local) + t2, _ := time.ParseInLocation(p.pattern, p.data[j], time.Local) + return t1.Before(t2) +} + +func (p rollTimeFileTailsSlice) Swap(i, j int) { + p.data[i], p.data[j] = p.data[j], p.data[i] +} + +func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) { + ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern} + sort.Sort(ss) + return ss.data, nil +} + +func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(_ []string) string { + newFileName := rwt.currentTimeFileName + rwt.currentTimeFileName = time.Now().Format(rwt.timePattern) + return newFileName +} + +func (rwt *rollingFileWriterTime) getCurrentFileName() string { + if rwt.fullName { + return rwt.createFullFileName(rwt.fileName, time.Now().Format(rwt.timePattern)) + } + return rwt.fileName +} + +func (rwt *rollingFileWriterTime) String() string { + return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, pattern: %s, maxRolls: %v", + rwt.fileName, + rollingArchiveTypesStringRepresentation[rwt.archiveType], + rwt.archivePath, + rwt.timePattern, + rwt.maxRolls) +} diff --git a/vendor/github.com/cihub/seelog/writers_smtpwriter.go b/vendor/github.com/cihub/seelog/writers_smtpwriter.go new file mode 100644 index 00000000..31b79438 --- /dev/null +++ b/vendor/github.com/cihub/seelog/writers_smtpwriter.go @@ -0,0 +1,214 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net/smtp" + "path/filepath" + "strings" +) + +const ( + // Default subject phrase for sending emails. + DefaultSubjectPhrase = "Diagnostic message from server: " + + // Message subject pattern composed according to RFC 5321. + rfc5321SubjectPattern = "From: %s <%s>\nSubject: %s\n\n" +) + +// smtpWriter is used to send emails via given SMTP-server. +type smtpWriter struct { + auth smtp.Auth + hostName string + hostPort string + hostNameWithPort string + senderAddress string + senderName string + recipientAddresses []string + caCertDirPaths []string + mailHeaders []string + subject string +} + +// NewSMTPWriter returns a new SMTP-writer. +func NewSMTPWriter(sa, sn string, ras []string, hn, hp, un, pwd string, cacdps []string, subj string, headers []string) *smtpWriter { + return &smtpWriter{ + auth: smtp.PlainAuth("", un, pwd, hn), + hostName: hn, + hostPort: hp, + hostNameWithPort: fmt.Sprintf("%s:%s", hn, hp), + senderAddress: sa, + senderName: sn, + recipientAddresses: ras, + caCertDirPaths: cacdps, + subject: subj, + mailHeaders: headers, + } +} + +func prepareMessage(senderAddr, senderName, subject string, body []byte, headers []string) []byte { + headerLines := fmt.Sprintf(rfc5321SubjectPattern, senderName, senderAddr, subject) + // Build header lines if configured. + if headers != nil && len(headers) > 0 { + headerLines += strings.Join(headers, "\n") + headerLines += "\n" + } + return append([]byte(headerLines), body...) +} + +// getTLSConfig gets paths of PEM files with certificates, +// host server name and tries to create an appropriate TLS.Config. +func getTLSConfig(pemFileDirPaths []string, hostName string) (config *tls.Config, err error) { + if pemFileDirPaths == nil || len(pemFileDirPaths) == 0 { + err = errors.New("invalid PEM file paths") + return + } + pemEncodedContent := []byte{} + var ( + e error + bytes []byte + ) + // Create a file-filter-by-extension, set aside non-pem files. + pemFilePathFilter := func(fp string) bool { + if filepath.Ext(fp) == ".pem" { + return true + } + return false + } + for _, pemFileDirPath := range pemFileDirPaths { + pemFilePaths, err := getDirFilePaths(pemFileDirPath, pemFilePathFilter, false) + if err != nil { + return nil, err + } + + // Put together all the PEM files to decode them as a whole byte slice. + for _, pfp := range pemFilePaths { + if bytes, e = ioutil.ReadFile(pfp); e == nil { + pemEncodedContent = append(pemEncodedContent, bytes...) + } else { + return nil, fmt.Errorf("cannot read file: %s: %s", pfp, e.Error()) + } + } + } + config = &tls.Config{RootCAs: x509.NewCertPool(), ServerName: hostName} + isAppended := config.RootCAs.AppendCertsFromPEM(pemEncodedContent) + if !isAppended { + // Extract this into a separate error. + err = errors.New("invalid PEM content") + return + } + return +} + +// SendMail accepts TLS configuration, connects to the server at addr, +// switches to TLS if possible, authenticates with mechanism a if possible, +// and then sends an email from address from, to addresses to, with message msg. +func sendMailWithTLSConfig(config *tls.Config, addr string, a smtp.Auth, from string, to []string, msg []byte) error { + c, err := smtp.Dial(addr) + if err != nil { + return err + } + // Check if the server supports STARTTLS extension. + if ok, _ := c.Extension("STARTTLS"); ok { + if err = c.StartTLS(config); err != nil { + return err + } + } + // Check if the server supports AUTH extension and use given smtp.Auth. + if a != nil { + if isSupported, _ := c.Extension("AUTH"); isSupported { + if err = c.Auth(a); err != nil { + return err + } + } + } + // Portion of code from the official smtp.SendMail function, + // see http://golang.org/src/pkg/net/smtp/smtp.go. + if err = c.Mail(from); err != nil { + return err + } + for _, addr := range to { + if err = c.Rcpt(addr); err != nil { + return err + } + } + w, err := c.Data() + if err != nil { + return err + } + _, err = w.Write(msg) + if err != nil { + return err + } + err = w.Close() + if err != nil { + return err + } + return c.Quit() +} + +// Write pushes a text message properly composed according to RFC 5321 +// to a post server, which sends it to the recipients. +func (smtpw *smtpWriter) Write(data []byte) (int, error) { + var err error + + if smtpw.caCertDirPaths == nil { + err = smtp.SendMail( + smtpw.hostNameWithPort, + smtpw.auth, + smtpw.senderAddress, + smtpw.recipientAddresses, + prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), + ) + } else { + config, e := getTLSConfig(smtpw.caCertDirPaths, smtpw.hostName) + if e != nil { + return 0, e + } + err = sendMailWithTLSConfig( + config, + smtpw.hostNameWithPort, + smtpw.auth, + smtpw.senderAddress, + smtpw.recipientAddresses, + prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), + ) + } + if err != nil { + return 0, err + } + return len(data), nil +} + +// Close closes down SMTP-connection. +func (smtpw *smtpWriter) Close() error { + // Do nothing as Write method opens and closes connection automatically. + return nil +} diff --git a/vendor/github.com/eapache/queue/v2/LICENSE b/vendor/github.com/eapache/queue/v2/LICENSE deleted file mode 100644 index d5f36dbc..00000000 --- a/vendor/github.com/eapache/queue/v2/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/eapache/queue/v2/queue.go b/vendor/github.com/eapache/queue/v2/queue.go deleted file mode 100644 index 8cf74cc3..00000000 --- a/vendor/github.com/eapache/queue/v2/queue.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. -Using this instead of other, simpler, queue implementations (slice+append or linked list) provides -substantial memory and time benefits, and fewer GC pauses. - -The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. -*/ -package queue - -// minQueueLen is smallest capacity that queue may have. -// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). -const minQueueLen = 16 - -// Queue represents a single instance of the queue data structure. -type Queue[V any] struct { - buf []*V - head, tail, count int -} - -// New constructs and returns a new Queue. -func New[V any]() *Queue[V] { - return &Queue[V]{ - buf: make([]*V, minQueueLen), - } -} - -// Length returns the number of elements currently stored in the queue. -func (q *Queue[V]) Length() int { - return q.count -} - -// resizes the queue to fit exactly twice its current contents -// this can result in shrinking if the queue is less than half-full -func (q *Queue[V]) resize() { - newBuf := make([]*V, q.count<<1) - - if q.tail > q.head { - copy(newBuf, q.buf[q.head:q.tail]) - } else { - n := copy(newBuf, q.buf[q.head:]) - copy(newBuf[n:], q.buf[:q.tail]) - } - - q.head = 0 - q.tail = q.count - q.buf = newBuf -} - -// Add puts an element on the end of the queue. -func (q *Queue[V]) Add(elem V) { - if q.count == len(q.buf) { - q.resize() - } - - q.buf[q.tail] = &elem - // bitwise modulus - q.tail = (q.tail + 1) & (len(q.buf) - 1) - q.count++ -} - -// Peek returns the element at the head of the queue. This call panics -// if the queue is empty. -func (q *Queue[V]) Peek() V { - if q.count <= 0 { - panic("queue: Peek() called on empty queue") - } - return *(q.buf[q.head]) -} - -// Get returns the element at index i in the queue. If the index is -// invalid, the call will panic. This method accepts both positive and -// negative index values. Index 0 refers to the first element, and -// index -1 refers to the last. -func (q *Queue[V]) Get(i int) V { - // If indexing backwards, convert to positive index. - if i < 0 { - i += q.count - } - if i < 0 || i >= q.count { - panic("queue: Get() called with index out of range") - } - // bitwise modulus - return *(q.buf[(q.head+i)&(len(q.buf)-1)]) -} - -// Remove removes and returns the element from the front of the queue. If the -// queue is empty, the call will panic. -func (q *Queue[V]) Remove() V { - if q.count <= 0 { - panic("queue: Remove() called on empty queue") - } - ret := q.buf[q.head] - q.buf[q.head] = nil - // bitwise modulus - q.head = (q.head + 1) & (len(q.buf) - 1) - q.count-- - // Resize down if buffer 1/4 full. - if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { - q.resize() - } - return *ret -} diff --git a/vendor/github.com/ebitengine/purego/README.md b/vendor/github.com/ebitengine/purego/README.md index 8f40f8b7..f1ff9053 100644 --- a/vendor/github.com/ebitengine/purego/README.md +++ b/vendor/github.com/ebitengine/purego/README.md @@ -35,7 +35,8 @@ except for float arguments and return values. ## Example -This example only works on macOS and Linux. For a complete example look at [libc](https://github.com/ebitengine/purego/tree/main/examples/libc) which supports Windows and FreeBSD. +The example below only showcases purego use for macOS and Linux. The other platforms require special handling which can +be seen in the complete example at [examples/libc](https://github.com/ebitengine/purego/tree/main/examples/libc) which supports Windows and FreeBSD. ```go package main @@ -74,7 +75,7 @@ Then to run: `CGO_ENABLED=0 go run main.go` ## Questions If you have questions about how to incorporate purego in your project or want to discuss -how it works join the [Discord](https://discord.com/channels/842049801528016967/1123106378731487345)! +how it works join the [Discord](https://discord.gg/HzGZVD6BkY)! ### External Code diff --git a/vendor/github.com/ebitengine/purego/dlerror.go b/vendor/github.com/ebitengine/purego/dlerror.go index cf4c0505..95cdfe16 100644 --- a/vendor/github.com/ebitengine/purego/dlerror.go +++ b/vendor/github.com/ebitengine/purego/dlerror.go @@ -6,6 +6,8 @@ package purego // Dlerror represents an error value returned from Dlopen, Dlsym, or Dlclose. +// +// This type is not available on Windows as there is no counterpart to it on Windows. type Dlerror struct { s string } diff --git a/vendor/github.com/ebitengine/purego/dlfcn.go b/vendor/github.com/ebitengine/purego/dlfcn.go index 2ee6f34b..cd1bf293 100644 --- a/vendor/github.com/ebitengine/purego/dlfcn.go +++ b/vendor/github.com/ebitengine/purego/dlfcn.go @@ -1,7 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || freebsd || linux +//go:build (darwin || freebsd || linux) && !android && !faketime package purego @@ -33,6 +33,10 @@ func init() { // A second call to Dlopen with the same path will return the same handle, but the internal // reference count for the handle will be incremented. Therefore, all // Dlopen calls should be balanced with a Dlclose call. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.LoadLibrary], [golang.org/x/sys/windows.LoadLibraryEx], +// [golang.org/x/sys/windows.NewLazyDLL], or [golang.org/x/sys/windows.NewLazySystemDLL] for Windows instead. func Dlopen(path string, mode int) (uintptr, error) { u := fnDlopen(path, mode) if u == 0 { @@ -45,6 +49,9 @@ func Dlopen(path string, mode int) (uintptr, error) { // It returns the address where that symbol is loaded into memory. If the symbol is not found, // in the specified library or any of the libraries that were automatically loaded by Dlopen // when that library was loaded, Dlsym returns zero. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.GetProcAddress] for Windows instead. func Dlsym(handle uintptr, name string) (uintptr, error) { u := fnDlsym(handle, name) if u == 0 { @@ -56,6 +63,9 @@ func Dlsym(handle uintptr, name string) (uintptr, error) { // Dlclose decrements the reference count on the dynamic library handle. // If the reference count drops to zero and no other loaded libraries // use symbols in it, then the dynamic library is unloaded. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.FreeLibrary] for Windows instead. func Dlclose(handle uintptr) error { if fnDlclose(handle) { return Dlerror{fnDlerror()} @@ -63,11 +73,6 @@ func Dlclose(handle uintptr) error { return nil } -//go:linkname openLibrary openLibrary -func openLibrary(name string) (uintptr, error) { - return Dlopen(name, RTLD_NOW|RTLD_GLOBAL) -} - func loadSymbol(handle uintptr, name string) (uintptr, error) { return Dlsym(handle, name) } @@ -78,17 +83,17 @@ func loadSymbol(handle uintptr, name string) (uintptr, error) { // appear to work if you link directly to the C function on darwin arm64. //go:linkname dlopen dlopen -var dlopen uintptr +var dlopen uint8 var dlopenABI0 = uintptr(unsafe.Pointer(&dlopen)) //go:linkname dlsym dlsym -var dlsym uintptr +var dlsym uint8 var dlsymABI0 = uintptr(unsafe.Pointer(&dlsym)) //go:linkname dlclose dlclose -var dlclose uintptr +var dlclose uint8 var dlcloseABI0 = uintptr(unsafe.Pointer(&dlclose)) //go:linkname dlerror dlerror -var dlerror uintptr +var dlerror uint8 var dlerrorABI0 = uintptr(unsafe.Pointer(&dlerror)) diff --git a/vendor/github.com/ebitengine/purego/dlfcn_android.go b/vendor/github.com/ebitengine/purego/dlfcn_android.go new file mode 100644 index 00000000..0d534176 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_android.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import "github.com/ebitengine/purego/internal/cgo" + +// Source for constants: https://android.googlesource.com/platform/bionic/+/refs/heads/main/libc/include/dlfcn.h + +const ( + is64bit = 1 << (^uintptr(0) >> 63) / 2 + is32bit = 1 - is64bit + RTLD_DEFAULT = is32bit * 0xffffffff + RTLD_LAZY = 0x00000001 + RTLD_NOW = is64bit * 0x00000002 + RTLD_LOCAL = 0x00000000 + RTLD_GLOBAL = is64bit*0x00100 | is32bit*0x00000002 +) + +func Dlopen(path string, mode int) (uintptr, error) { + return cgo.Dlopen(path, mode) +} + +func Dlsym(handle uintptr, name string) (uintptr, error) { + return cgo.Dlsym(handle, name) +} + +func Dlclose(handle uintptr) error { + return cgo.Dlclose(handle) +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return Dlsym(handle, name) +} diff --git a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go index b343f846..27f56071 100644 --- a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go +++ b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go @@ -6,19 +6,14 @@ package purego // Source for constants: https://opensource.apple.com/source/dyld/dyld-360.14/include/dlfcn.h.auto.html const ( - RTLD_DEFAULT = ^uintptr(0) - 1 // Pseudo-handle for dlsym so search for any loaded symbol - RTLD_LAZY = 0x1 // Relocations are performed at an implementation-dependent time. - RTLD_NOW = 0x2 // Relocations are performed when the object is loaded. - RTLD_LOCAL = 0x4 // All symbols are not made available for relocation processing by other modules. - RTLD_GLOBAL = 0x8 // All symbols are available for relocation processing of other modules. + RTLD_DEFAULT = 1<<64 - 2 // Pseudo-handle for dlsym so search for any loaded symbol + RTLD_LAZY = 0x1 // Relocations are performed at an implementation-dependent time. + RTLD_NOW = 0x2 // Relocations are performed when the object is loaded. + RTLD_LOCAL = 0x4 // All symbols are not made available for relocation processing by other modules. + RTLD_GLOBAL = 0x8 // All symbols are available for relocation processing of other modules. ) //go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" - -//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib" -//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" -//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" -//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go index 38f06783..6b371620 100644 --- a/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go +++ b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go @@ -5,9 +5,10 @@ package purego // Constants as defined in https://github.com/freebsd/freebsd-src/blob/main/include/dlfcn.h const ( - RTLD_DEFAULT = ^uintptr(0) - 2 // Pseudo-handle for dlsym so search for any loaded symbol - RTLD_LAZY = 0x00001 // Relocations are performed at an implementation-dependent time. - RTLD_NOW = 0x00002 // Relocations are performed when the object is loaded. - RTLD_LOCAL = 0x00000 // All symbols are not made available for relocation processing by other modules. - RTLD_GLOBAL = 0x00100 // All symbols are available for relocation processing of other modules. + intSize = 32 << (^uint(0) >> 63) // 32 or 64 + RTLD_DEFAULT = 1< +#include */ import "C" +import ( + "errors" + "unsafe" +) + +func Dlopen(filename string, flag int) (uintptr, error) { + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + handle := C.dlopen(cfilename, C.int(flag)) + if handle == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(handle), nil +} + +func Dlsym(handle uintptr, symbol string) (uintptr, error) { + csymbol := C.CString(symbol) + defer C.free(unsafe.Pointer(csymbol)) + symbolAddr := C.dlsym(*(*unsafe.Pointer)(unsafe.Pointer(&handle)), csymbol) + if symbolAddr == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(symbolAddr), nil +} + +func Dlclose(handle uintptr) error { + result := C.dlclose(*(*unsafe.Pointer)(unsafe.Pointer(&handle))) + if result != 0 { + return errors.New(C.GoString(C.dlerror())) + } + return nil +} + // all that is needed is to assign each dl function because then its // symbol will then be made available to the linker and linked to inside dlfcn.go var ( diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go index 029a2598..37ff24d5 100644 --- a/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go +++ b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go @@ -20,7 +20,7 @@ typedef struct syscall15Args { uintptr_t fn; uintptr_t a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15; uintptr_t f1, f2, f3, f4, f5, f6, f7, f8; - uintptr_t r1, r2, err; + uintptr_t err; } syscall15Args; void syscall15(struct syscall15Args *args) { @@ -31,7 +31,7 @@ void syscall15(struct syscall15Args *args) { *(void**)(&func_name) = (void*)(args->fn); uintptr_t r1 = func_name(args->a1,args->a2,args->a3,args->a4,args->a5,args->a6,args->a7,args->a8,args->a9, args->a10,args->a11,args->a12,args->a13,args->a14,args->a15); - args->r1 = r1; + args->a1 = r1; args->err = errno; } @@ -48,8 +48,8 @@ func Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, C.uintptr_t(fn), C.uintptr_t(a1), C.uintptr_t(a2), C.uintptr_t(a3), C.uintptr_t(a4), C.uintptr_t(a5), C.uintptr_t(a6), C.uintptr_t(a7), C.uintptr_t(a8), C.uintptr_t(a9), C.uintptr_t(a10), C.uintptr_t(a11), C.uintptr_t(a12), - C.uintptr_t(a13), C.uintptr_t(a14), C.uintptr_t(a15), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + C.uintptr_t(a13), C.uintptr_t(a14), C.uintptr_t(a15), 0, 0, 0, 0, 0, 0, 0, 0, 0, } C.syscall15(&args) - return uintptr(args.r1), uintptr(args.r2), uintptr(args.err) + return uintptr(args.a1), 0, uintptr(args.err) } diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go index f6a079a7..f29e690c 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) package fakecgo diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go index efbe4212..be82f7df 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go @@ -1,7 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || freebsd +//go:build !cgo && (darwin || freebsd || linux) // Package fakecgo implements the Cgo runtime (runtime/cgo) entirely in Go. // This allows code that calls into C to function properly when CGO_ENABLED=0. @@ -30,4 +30,3 @@ package fakecgo //go:generate go run gen.go -//go:generate gofmt -s -w symbols.go diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go index fb3a3f7f..39f5ff1f 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cgo + package fakecgo import "unsafe" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go index b000b3fb..d0868f0f 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cgo + package fakecgo import "unsafe" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go index 9aa57ef6..c9ff7156 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cgo + package fakecgo import "unsafe" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go index 1db518e3..e3a060b9 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cgo + package fakecgo import "unsafe" @@ -14,7 +16,7 @@ func _cgo_sys_thread_start(ts *ThreadStart) { var size size_t var err int - //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + // fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug sigfillset(&ign) pthread_sigmask(SIG_SETMASK, &ign, &oset) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go index 71da1128..d229d842 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go @@ -1,7 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) package fakecgo @@ -50,9 +50,11 @@ func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe var err int for tries = 0; tries < 20; tries++ { - err = int(pthread_create(thread, attr, pfn, unsafe.Pointer(arg))) + // inlined this call because it ran out of stack when inlining was disabled + err = int(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(pfn), uintptr(unsafe.Pointer(arg)), 0)) if err == 0 { - pthread_detach(*thread) + // inlined this call because it ran out of stack when inlining was disabled + call5(pthread_detachABI0, uintptr(*thread), 0, 0, 0, 0) return 0 } if err != int(syscall.EAGAIN) { @@ -60,7 +62,8 @@ func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe } ts.Sec = 0 ts.Nsec = (tries + 1) * 1000 * 1000 // Milliseconds. - nanosleep(&ts, nil) + // inlined this call because it ran out of stack when inlining was disabled + call5(nanosleepABI0, uintptr(unsafe.Pointer(&ts)), 0, 0, 0, 0) } return int(syscall.EAGAIN) } diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go index 9aa57ef6..c9ff7156 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cgo + package fakecgo import "unsafe" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go index 1db518e3..a3b1cca5 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cgo + package fakecgo import "unsafe" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go index 818372ea..e42d84f0 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go @@ -1,7 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) package fakecgo diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go index 7a43b42b..0ac10d1f 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go @@ -1,7 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) package fakecgo @@ -27,7 +27,11 @@ func x_cgo_thread_start(arg *ThreadStart) { println("fakecgo: out of memory in thread_start") abort() } - // *ts = *arg would cause a writebarrier so use memmove instead - memmove(unsafe.Pointer(ts), unsafe.Pointer(arg), unsafe.Sizeof(*ts)) + // *ts = *arg would cause a writebarrier so copy using slices + s1 := unsafe.Slice((*uintptr)(unsafe.Pointer(ts)), unsafe.Sizeof(*ts)/8) + s2 := unsafe.Slice((*uintptr)(unsafe.Pointer(arg)), unsafe.Sizeof(*arg)/8) + for i := range s2 { + s1[i] = s2[i] + } _cgo_sys_thread_start(ts) // OS-dependent half } diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go index ce17d182..28af41cc 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) // The runtime package contains an uninitialized definition // for runtime·iscgo. Override it to tell the runtime we're here. diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go index c12d403c..38f94419 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go @@ -1,12 +1,16 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) package fakecgo type ( - size_t uintptr + size_t uintptr + // Sources: + // Darwin (32 bytes) - https://github.com/apple/darwin-xnu/blob/2ff845c2e033bd0ff64b5b6aa6063a1f8f65aa32/bsd/sys/_types.h#L74 + // FreeBSD (32 bytes) - https://github.com/DoctorWkt/xv6-freebsd/blob/d2a294c2a984baed27676068b15ed9a29b06ab6f/include/signal.h#L98C9-L98C21 + // Linux (128 bytes) - https://github.com/torvalds/linux/blob/ab75170520d4964f3acf8bb1f91d34cbc650688e/arch/x86/include/asm/signal.h#L25 sigset_t [128]byte pthread_attr_t [64]byte pthread_t int diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go index 03c91718..af148333 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go @@ -1,6 +1,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors +//go:build !cgo + package fakecgo type ( diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go index baf03fa8..ca1f722c 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go @@ -1,6 +1,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors +//go:build !cgo + package fakecgo type ( diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go index 93aa5b26..c4b6e9ea 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go @@ -1,6 +1,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors +//go:build !cgo + package fakecgo type ( diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go index b69d4b39..f30af0e1 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) package fakecgo diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go index d7401f70..d17942e0 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) package fakecgo @@ -15,170 +15,187 @@ import ( // setg_trampoline calls setg with the G provided func setg_trampoline(setg uintptr, G uintptr) -//go:linkname memmove runtime.memmove -func memmove(to, from unsafe.Pointer, n uintptr) - // call5 takes fn the C function and 5 arguments and calls the function with those arguments func call5(fn, a1, a2, a3, a4, a5 uintptr) uintptr +//go:nosplit func malloc(size uintptr) unsafe.Pointer { ret := call5(mallocABI0, uintptr(size), 0, 0, 0, 0) // this indirection is to avoid go vet complaining about possible misuse of unsafe.Pointer return *(*unsafe.Pointer)(unsafe.Pointer(&ret)) } +//go:nosplit func free(ptr unsafe.Pointer) { call5(freeABI0, uintptr(ptr), 0, 0, 0, 0) } +//go:nosplit func setenv(name *byte, value *byte, overwrite int32) int32 { return int32(call5(setenvABI0, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), uintptr(overwrite), 0, 0)) } +//go:nosplit func unsetenv(name *byte) int32 { return int32(call5(unsetenvABI0, uintptr(unsafe.Pointer(name)), 0, 0, 0, 0)) } +//go:nosplit func sigfillset(set *sigset_t) int32 { return int32(call5(sigfillsetABI0, uintptr(unsafe.Pointer(set)), 0, 0, 0, 0)) } +//go:nosplit func nanosleep(ts *syscall.Timespec, rem *syscall.Timespec) int32 { return int32(call5(nanosleepABI0, uintptr(unsafe.Pointer(ts)), uintptr(unsafe.Pointer(rem)), 0, 0, 0)) } +//go:nosplit func abort() { call5(abortABI0, 0, 0, 0, 0, 0) } +//go:nosplit func pthread_attr_init(attr *pthread_attr_t) int32 { return int32(call5(pthread_attr_initABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) } +//go:nosplit func pthread_create(thread *pthread_t, attr *pthread_attr_t, start unsafe.Pointer, arg unsafe.Pointer) int32 { return int32(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(start), uintptr(arg), 0)) } +//go:nosplit func pthread_detach(thread pthread_t) int32 { return int32(call5(pthread_detachABI0, uintptr(thread), 0, 0, 0, 0)) } +//go:nosplit func pthread_sigmask(how sighow, ign *sigset_t, oset *sigset_t) int32 { return int32(call5(pthread_sigmaskABI0, uintptr(how), uintptr(unsafe.Pointer(ign)), uintptr(unsafe.Pointer(oset)), 0, 0)) } +//go:nosplit func pthread_self() pthread_t { return pthread_t(call5(pthread_selfABI0, 0, 0, 0, 0, 0)) } +//go:nosplit func pthread_get_stacksize_np(thread pthread_t) size_t { return size_t(call5(pthread_get_stacksize_npABI0, uintptr(thread), 0, 0, 0, 0)) } +//go:nosplit func pthread_attr_getstacksize(attr *pthread_attr_t, stacksize *size_t) int32 { return int32(call5(pthread_attr_getstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(stacksize)), 0, 0, 0)) } +//go:nosplit func pthread_attr_setstacksize(attr *pthread_attr_t, size size_t) int32 { return int32(call5(pthread_attr_setstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(size), 0, 0, 0)) } +//go:nosplit func pthread_attr_destroy(attr *pthread_attr_t) int32 { return int32(call5(pthread_attr_destroyABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) } +//go:nosplit func pthread_mutex_lock(mutex *pthread_mutex_t) int32 { return int32(call5(pthread_mutex_lockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) } +//go:nosplit func pthread_mutex_unlock(mutex *pthread_mutex_t) int32 { return int32(call5(pthread_mutex_unlockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) } +//go:nosplit func pthread_cond_broadcast(cond *pthread_cond_t) int32 { return int32(call5(pthread_cond_broadcastABI0, uintptr(unsafe.Pointer(cond)), 0, 0, 0, 0)) } +//go:nosplit func pthread_setspecific(key pthread_key_t, value unsafe.Pointer) int32 { return int32(call5(pthread_setspecificABI0, uintptr(key), uintptr(value), 0, 0, 0)) } //go:linkname _malloc _malloc -var _malloc uintptr +var _malloc uint8 var mallocABI0 = uintptr(unsafe.Pointer(&_malloc)) //go:linkname _free _free -var _free uintptr +var _free uint8 var freeABI0 = uintptr(unsafe.Pointer(&_free)) //go:linkname _setenv _setenv -var _setenv uintptr +var _setenv uint8 var setenvABI0 = uintptr(unsafe.Pointer(&_setenv)) //go:linkname _unsetenv _unsetenv -var _unsetenv uintptr +var _unsetenv uint8 var unsetenvABI0 = uintptr(unsafe.Pointer(&_unsetenv)) //go:linkname _sigfillset _sigfillset -var _sigfillset uintptr +var _sigfillset uint8 var sigfillsetABI0 = uintptr(unsafe.Pointer(&_sigfillset)) //go:linkname _nanosleep _nanosleep -var _nanosleep uintptr +var _nanosleep uint8 var nanosleepABI0 = uintptr(unsafe.Pointer(&_nanosleep)) //go:linkname _abort _abort -var _abort uintptr +var _abort uint8 var abortABI0 = uintptr(unsafe.Pointer(&_abort)) //go:linkname _pthread_attr_init _pthread_attr_init -var _pthread_attr_init uintptr +var _pthread_attr_init uint8 var pthread_attr_initABI0 = uintptr(unsafe.Pointer(&_pthread_attr_init)) //go:linkname _pthread_create _pthread_create -var _pthread_create uintptr +var _pthread_create uint8 var pthread_createABI0 = uintptr(unsafe.Pointer(&_pthread_create)) //go:linkname _pthread_detach _pthread_detach -var _pthread_detach uintptr +var _pthread_detach uint8 var pthread_detachABI0 = uintptr(unsafe.Pointer(&_pthread_detach)) //go:linkname _pthread_sigmask _pthread_sigmask -var _pthread_sigmask uintptr +var _pthread_sigmask uint8 var pthread_sigmaskABI0 = uintptr(unsafe.Pointer(&_pthread_sigmask)) //go:linkname _pthread_self _pthread_self -var _pthread_self uintptr +var _pthread_self uint8 var pthread_selfABI0 = uintptr(unsafe.Pointer(&_pthread_self)) //go:linkname _pthread_get_stacksize_np _pthread_get_stacksize_np -var _pthread_get_stacksize_np uintptr +var _pthread_get_stacksize_np uint8 var pthread_get_stacksize_npABI0 = uintptr(unsafe.Pointer(&_pthread_get_stacksize_np)) //go:linkname _pthread_attr_getstacksize _pthread_attr_getstacksize -var _pthread_attr_getstacksize uintptr +var _pthread_attr_getstacksize uint8 var pthread_attr_getstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_getstacksize)) //go:linkname _pthread_attr_setstacksize _pthread_attr_setstacksize -var _pthread_attr_setstacksize uintptr +var _pthread_attr_setstacksize uint8 var pthread_attr_setstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_setstacksize)) //go:linkname _pthread_attr_destroy _pthread_attr_destroy -var _pthread_attr_destroy uintptr +var _pthread_attr_destroy uint8 var pthread_attr_destroyABI0 = uintptr(unsafe.Pointer(&_pthread_attr_destroy)) //go:linkname _pthread_mutex_lock _pthread_mutex_lock -var _pthread_mutex_lock uintptr +var _pthread_mutex_lock uint8 var pthread_mutex_lockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_lock)) //go:linkname _pthread_mutex_unlock _pthread_mutex_unlock -var _pthread_mutex_unlock uintptr +var _pthread_mutex_unlock uint8 var pthread_mutex_unlockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_unlock)) //go:linkname _pthread_cond_broadcast _pthread_cond_broadcast -var _pthread_cond_broadcast uintptr +var _pthread_cond_broadcast uint8 var pthread_cond_broadcastABI0 = uintptr(unsafe.Pointer(&_pthread_cond_broadcast)) //go:linkname _pthread_setspecific _pthread_setspecific -var _pthread_setspecific uintptr +var _pthread_setspecific uint8 var pthread_setspecificABI0 = uintptr(unsafe.Pointer(&_pthread_setspecific)) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go index 7341fecd..54aaa462 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go @@ -3,6 +3,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors +//go:build !cgo + package fakecgo //go:cgo_import_dynamic purego_malloc malloc "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go index bff096d5..81538119 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go @@ -3,6 +3,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors +//go:build !cgo + package fakecgo //go:cgo_import_dynamic purego_malloc malloc "libc.so.7" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go index ee3ab7aa..180057d0 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go @@ -3,6 +3,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors +//go:build !cgo + package fakecgo //go:cgo_import_dynamic purego_malloc malloc "libc.so.6" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s index 24b62060..c9a3cc09 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s @@ -1,7 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || linux || freebsd +//go:build !cgo && (darwin || linux || freebsd) /* trampoline for emulating required C functions for cgo in go (see cgo.go) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s index 9c80fe2f..9dbdbc01 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s @@ -1,7 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) #include "textflag.h" #include "go_asm.h" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s index e8726376..a65b2012 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2022 The Ebitengine Authors -//go:build darwin || freebsd || linux +//go:build !cgo && (darwin || freebsd || linux) #include "textflag.h" diff --git a/vendor/github.com/ebitengine/purego/struct_amd64.go b/vendor/github.com/ebitengine/purego/struct_amd64.go index 06a82dd8..f3514c98 100644 --- a/vendor/github.com/ebitengine/purego/struct_amd64.go +++ b/vendor/github.com/ebitengine/purego/struct_amd64.go @@ -111,7 +111,7 @@ func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFl return keepAlive } -func postMerger(t reflect.Type) bool { +func postMerger(t reflect.Type) (passInMemory bool) { // (c) If the size of the aggregate exceeds two eightbytes and the first eight- byte isn’t SSE or any other // eightbyte isn’t SSEUP, the whole argument is passed in memory. if t.Kind() != reflect.Struct { @@ -120,19 +120,7 @@ func postMerger(t reflect.Type) bool { if t.Size() <= 2*8 { return false } - first := getFirst(t).Kind() - if first != reflect.Float32 && first != reflect.Float64 { - return false - } - return true -} - -func getFirst(t reflect.Type) reflect.Type { - first := t.Field(0).Type - if first.Kind() == reflect.Struct { - return getFirst(first) - } - return first + return true // Go does not have an SSE/SEEUP type so this is always true } func tryPlaceRegister(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) (ok bool) { @@ -196,7 +184,7 @@ func tryPlaceRegister(v reflect.Value, addFloat func(uintptr), addInt func(uintp val |= uint64(f.Int()&0xFFFF_FFFF) << shift shift += 32 class |= _INTEGER - case reflect.Int64: + case reflect.Int64, reflect.Int: val = uint64(f.Int()) shift = 64 class = _INTEGER @@ -212,7 +200,7 @@ func tryPlaceRegister(v reflect.Value, addFloat func(uintptr), addInt func(uintp val |= f.Uint() << shift shift += 32 class |= _INTEGER - case reflect.Uint64: + case reflect.Uint64, reflect.Uint: val = f.Uint() shift = 64 class = _INTEGER diff --git a/vendor/github.com/ebitengine/purego/sys_amd64.s b/vendor/github.com/ebitengine/purego/sys_amd64.s index 4eb26d65..cabde1a5 100644 --- a/vendor/github.com/ebitengine/purego/sys_amd64.s +++ b/vendor/github.com/ebitengine/purego/sys_amd64.s @@ -122,6 +122,9 @@ TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 PUSHQ R10 // push the stack pointer below registers + // Switch from the host ABI to the Go ABI. + PUSH_REGS_HOST_TO_ABI0() + // determine index into runtime·cbs table MOVQ $callbackasm(SB), DX SUBQ DX, AX @@ -130,9 +133,6 @@ TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 DIVL CX SUBQ $1, AX // subtract 1 because return PC is to the next slot - // Switch from the host ABI to the Go ABI. - PUSH_REGS_HOST_TO_ABI0() - // Create a struct callbackArgs on our stack to be passed as // the "frame" to cgocallback and on to callbackWrap. // $24 to make enough room for the arguments to runtime.cgocallback diff --git a/vendor/github.com/ebitengine/purego/syscall.go b/vendor/github.com/ebitengine/purego/syscall.go index f38e4a2f..c30688dd 100644 --- a/vendor/github.com/ebitengine/purego/syscall.go +++ b/vendor/github.com/ebitengine/purego/syscall.go @@ -5,11 +5,24 @@ package purego +// CDecl marks a function as being called using the __cdecl calling convention as defined in +// the [MSDocs] when passed to NewCallback. It must be the first argument to the function. +// This is only useful on 386 Windows, but it is safe to use on other platforms. +// +// [MSDocs]: https://learn.microsoft.com/en-us/cpp/cpp/cdecl?view=msvc-170 +type CDecl struct{} + const ( maxArgs = 15 numOfFloats = 8 // arm64 and amd64 both have 8 float registers ) +type syscall15Args struct { + fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr + f1, f2, f3, f4, f5, f6, f7, f8 uintptr + arm64_r8 uintptr +} + // SyscallN takes fn, a C function pointer and a list of arguments as uintptr. // There is an internal maximum number of arguments that SyscallN can take. It panics // when the maximum is exceeded. It returns the result and the libc error code if there is one. diff --git a/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go index ee2bb218..36ee14e3 100644 --- a/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go +++ b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go @@ -6,21 +6,11 @@ package purego import ( - _ "unsafe" // for go:linkname - "github.com/ebitengine/purego/internal/cgo" ) var syscall15XABI0 = uintptr(cgo.Syscall15XABI0) -// this is only here to make the assembly files happy :) -type syscall15Args struct { - fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr - f1, f2, f3, f4, f5, f6, f7, f8 uintptr - r1, r2, err uintptr - arm64_r8 uintptr -} - //go:nosplit func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { return cgo.Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) diff --git a/vendor/github.com/ebitengine/purego/syscall_sysv.go b/vendor/github.com/ebitengine/purego/syscall_sysv.go index f41234bc..cce171c8 100644 --- a/vendor/github.com/ebitengine/purego/syscall_sysv.go +++ b/vendor/github.com/ebitengine/purego/syscall_sysv.go @@ -14,12 +14,6 @@ import ( var syscall15XABI0 uintptr -type syscall15Args struct { - fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr - f1, f2, f3, f4, f5, f6, f7, f8 uintptr - arm64_r8 uintptr -} - //go:nosplit func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { args := syscall15Args{ @@ -38,6 +32,16 @@ func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a // for these callbacks is never released. At least 2000 callbacks can always be created. Although this function // provides similar functionality to windows.NewCallback it is distinct. func NewCallback(fn interface{}) uintptr { + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + } return compileCallback(fn) } @@ -79,7 +83,12 @@ func compileCallback(fn interface{}) uintptr { for i := 0; i < ty.NumIn(); i++ { in := ty.In(i) switch in.Kind() { - case reflect.Struct, reflect.Interface, reflect.Func, reflect.Slice, + case reflect.Struct: + if i == 0 && in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + fallthrough + case reflect.Interface, reflect.Func, reflect.Slice, reflect.Chan, reflect.Complex64, reflect.Complex128, reflect.String, reflect.Map, reflect.Invalid: panic("purego: unsupported argument type: " + in.Kind().String()) @@ -149,7 +158,12 @@ func callbackWrap(a *callbackArgs) { pos = floatsN } floatsN++ + case reflect.Struct: + // This is the CDecl field + args[i] = reflect.Zero(fnType.In(i)) + continue default: + if intsN >= numOfIntegerRegisters() { pos = stack stack++ diff --git a/vendor/github.com/ebitengine/purego/syscall_windows.go b/vendor/github.com/ebitengine/purego/syscall_windows.go index a45693f9..5fbfcabf 100644 --- a/vendor/github.com/ebitengine/purego/syscall_windows.go +++ b/vendor/github.com/ebitengine/purego/syscall_windows.go @@ -4,20 +4,12 @@ package purego import ( + "reflect" "syscall" - _ "unsafe" // only for go:linkname - - "golang.org/x/sys/windows" ) var syscall15XABI0 uintptr -type syscall15Args struct { - fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr - f1, f2, f3, f4, f5, f6, f7, f8 uintptr - arm64_r8 uintptr -} - func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { r1, r2, errno := syscall.Syscall15(fn, 15, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) return r1, r2, uintptr(errno) @@ -31,15 +23,24 @@ func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a // callbacks can always be created. Although this function is similiar to the darwin version it may act // differently. func NewCallback(fn interface{}) uintptr { + isCDecl := false + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + isCDecl = true + } + if isCDecl { + return syscall.NewCallbackCDecl(fn) + } return syscall.NewCallback(fn) } -//go:linkname openLibrary openLibrary -func openLibrary(name string) (uintptr, error) { - handle, err := windows.LoadLibrary(name) - return uintptr(handle), err -} - func loadSymbol(handle uintptr, name string) (uintptr, error) { - return windows.GetProcAddress(windows.Handle(handle), name) + return syscall.GetProcAddress(syscall.Handle(handle), name) } diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 0cffafa7..0ed62c1a 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -1,26 +1,28 @@ +version: "2" + run: timeout: 1m tests: true linters: - disable-all: true - enable: + default: none + enable: # please keep this alphabetized + - asasalint - asciicheck + - copyloopvar + - dupl - errcheck - forcetypeassert + - goconst - gocritic - - gofmt - - goimports - - gosimple - govet - ineffassign - misspell + - musttag - revive - staticcheck - - typecheck - unused issues: - exclude-use-default: false max-issues-per-linter: 0 max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 30568e76..b22c57d7 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -77,7 +77,7 @@ func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { write: fn, } // For skipping fnlogger.Info and fnlogger.Error. - l.Formatter.AddCallDepth(1) + l.AddCallDepth(1) // via Formatter return l } @@ -164,17 +164,17 @@ type fnlogger struct { } func (l fnlogger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) + l.AddName(name) // via Formatter return &l } func (l fnlogger) WithValues(kvList ...any) logr.LogSink { - l.Formatter.AddValues(kvList) + l.AddValues(kvList) // via Formatter return &l } func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) + l.AddCallDepth(depth) // via Formatter return &l } diff --git a/vendor/github.com/go-ole/go-ole/.travis.yml b/vendor/github.com/go-ole/go-ole/.travis.yml new file mode 100644 index 00000000..28f740cd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/.travis.yml @@ -0,0 +1,8 @@ +language: go +sudo: false + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - tip diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md new file mode 100644 index 00000000..4ba6a8c6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ChangeLog.md @@ -0,0 +1,49 @@ +# Version 1.x.x + +* **Add more test cases and reference new test COM server project.** (Placeholder for future additions) + +# Version 1.2.0-alphaX + +**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.** + + * Added CI configuration for Travis-CI and AppVeyor. + * Added test InterfaceID and ClassID for the COM Test Server project. + * Added more inline documentation (#83). + * Added IEnumVARIANT implementation (#88). + * Added IEnumVARIANT test cases (#99, #100, #101). + * Added support for retrieving `time.Time` from VARIANT (#92). + * Added test case for IUnknown (#64). + * Added test case for IDispatch (#64). + * Added test cases for scalar variants (#64, #76). + +# Version 1.1.1 + + * Fixes for Linux build. + * Fixes for Windows build. + +# Version 1.1.0 + +The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes. + + * Move GUID out of variables.go into its own file to make new documentation available. + * Move OleError out of ole.go into its own file to make new documentation available. + * Add documentation to utility functions. + * Add documentation to variant receiver functions. + * Add documentation to ole structures. + * Make variant available to other systems outside of Windows. + * Make OLE structures available to other systems outside of Windows. + +## New Features + + * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows. + * More functions are now documented and available on godoc.org. + +# Version 1.0.1 + + 1. Fix package references from repository location change. + +# Version 1.0.0 + +This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface. + +There is no changelog for this version. Check commits for history. diff --git a/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/go-ole/go-ole/LICENSE new file mode 100644 index 00000000..623ec06f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright © 2013-2017 Yasuhiro Matsumoto, + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md new file mode 100644 index 00000000..7b577558 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/README.md @@ -0,0 +1,46 @@ +# Go OLE + +[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) +[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) +[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole) + +Go bindings for Windows COM using shared libraries instead of cgo. + +By Yasuhiro Matsumoto. + +## Install + +To experiment with go-ole, you can just compile and run the example program: + +``` +go get github.com/go-ole/go-ole +cd /path/to/go-ole/ +go test + +cd /path/to/go-ole/example/excel +go run excel.go +``` + +## Continuous Integration + +Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run. + +**Travis-CI** + +Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server. + +**AppVeyor** + +AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server. + +The tests currently do run and do pass and this should be maintained with commits. + +## Versioning + +Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch. + +This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed. + +## LICENSE + +Under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/go-ole/go-ole/SECURITY.md b/vendor/github.com/go-ole/go-ole/SECURITY.md new file mode 100644 index 00000000..dac28152 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/go-ole/go-ole/security/advisories/new). + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml new file mode 100644 index 00000000..8df7fa26 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/appveyor.yml @@ -0,0 +1,68 @@ +# Notes: +# - Minimal appveyor.yml file is an empty file. All sections are optional. +# - Indent each level of configuration with 2 spaces. Do not use tabs! +# - All section names are case-sensitive. +# - Section names should be unique on each level. + +version: "1.3.0.{build}-alpha-{branch}" + +os: Visual Studio 2019 + +build: off + +skip_tags: true + +clone_folder: c:\gopath\src\github.com\go-ole\go-ole + +environment: + GOPATH: c:\gopath + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" + +before_test: + # - Download COM Server + - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" + - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL + - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat + +test_script: + - go test -v -cover ./... + # go vet has false positives on unsafe.Pointer with windows/sys. Disabling since it is recommended to use go test instead. + # - go vet ./... + +branches: + only: + - master + - v1.2 + - v1.1 + - v1.0 + +matrix: + allow_failures: + - environment: + GOROOT: C:\go-x86 + DOWNLOADPLATFORM: "x86" + - environment: + GOROOT: C:\go118 + DOWNLOADPLATFORM: "x64" + - environment: + GOROOT: C:\go118-x86 + DOWNLOADPLATFORM: "x86" + +install: + - go version + - go env + - go get -u golang.org/x/tools/cmd/cover + - go get -u golang.org/x/tools/cmd/godoc + - go get -u golang.org/x/tools/cmd/stringer + +build_script: + - cd c:\gopath\src\github.com\go-ole\go-ole + - go get -v -t ./... + - go build + +# disable automatic tests +test: on + +# disable deployment +deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go new file mode 100644 index 00000000..cabbac01 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com.go @@ -0,0 +1,386 @@ +// +build windows + +package ole + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +var ( + procCoInitialize = modole32.NewProc("CoInitialize") + procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoInitializeSecurity = modole32.NewProc("CoInitializeSecurity") + procCoUninitialize = modole32.NewProc("CoUninitialize") + procCoCreateInstance = modole32.NewProc("CoCreateInstance") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procCLSIDFromProgID = modole32.NewProc("CLSIDFromProgID") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromCLSID = modole32.NewProc("StringFromCLSID") + procStringFromIID = modole32.NewProc("StringFromIID") + procIIDFromString = modole32.NewProc("IIDFromString") + procCoGetObject = modole32.NewProc("CoGetObject") + procGetUserDefaultLCID = modkernel32.NewProc("GetUserDefaultLCID") + procCopyMemory = modkernel32.NewProc("RtlMoveMemory") + procVariantInit = modoleaut32.NewProc("VariantInit") + procVariantClear = modoleaut32.NewProc("VariantClear") + procVariantTimeToSystemTime = modoleaut32.NewProc("VariantTimeToSystemTime") + procSysAllocString = modoleaut32.NewProc("SysAllocString") + procSysAllocStringLen = modoleaut32.NewProc("SysAllocStringLen") + procSysFreeString = modoleaut32.NewProc("SysFreeString") + procSysStringLen = modoleaut32.NewProc("SysStringLen") + procCreateDispTypeInfo = modoleaut32.NewProc("CreateDispTypeInfo") + procCreateStdDispatch = modoleaut32.NewProc("CreateStdDispatch") + procGetActiveObject = modoleaut32.NewProc("GetActiveObject") + + procGetMessageW = moduser32.NewProc("GetMessageW") + procDispatchMessageW = moduser32.NewProc("DispatchMessageW") +) + +// This is to enable calling COM Security initialization multiple times +var bSecurityInit bool = false + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx + // Suggests that no value should be passed to CoInitialized. + // Could just be Call() since the parameter is optional. <-- Needs testing to be sure. + hr, _, _ := procCoInitialize.Call(uintptr(0)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx + // Suggests that the first parameter is not only optional but should always be NULL. + hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// coInitializeSecurity: Registers security and sets the default security values +// for the process. +func coInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + // Check COM Security initialization has done previously + if !bSecurityInit { + // https://learn.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-coinitializesecurity + hr, _, _ := procCoInitializeSecurity.Call( + uintptr(0), // Allow *all* VSS writers to communicate back! + uintptr(cAuthSvc), // Default COM authentication service + uintptr(0), // Default COM authorization service + uintptr(0), // Reserved parameter + uintptr(dwAuthnLevel), // Strongest COM authentication level + uintptr(dwImpLevel), // Minimal impersonation abilities + uintptr(0), // Default COM authentication settings + uintptr(dwCapabilities), // Cloaking + uintptr(0)) // eserved parameter + if hr != 0 { + err = NewError(hr) + } else { + // COM Security initialization done make global flag true. + bSecurityInit = true + } + } + return +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) (err error) { + // p is ignored and won't be used. + // Avoid any variable not used errors. + p = uintptr(0) + return coInitialize() +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) (err error) { + // Avoid any variable not used errors. + p = uintptr(0) + return coInitializeEx(coinit) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() { + procCoUninitialize.Call() +} + +// CoInitializeSecurity: Registers security and sets the default security values +// for the process. +func CoInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + return coInitializeSecurity(cAuthSvc, dwAuthnLevel, dwImpLevel, dwCapabilities) +} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) { + procCoTaskMemFree.Call(memptr) +} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (clsid *GUID, err error) { + var guid GUID + lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str))) + hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoCreateInstance.Call( + uintptr(unsafe.Pointer(clsid)), + 0, + CLSCTX_SERVER, + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procGetActiveObject.Call( + uintptr(unsafe.Pointer(clsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +type BindOpts struct { + CbStruct uint32 + GrfFlags uint32 + GrfMode uint32 + TickCountDeadline uint32 +} + +// GetObject retrieves pointer to active object. +func GetObject(programID string, bindOpts *BindOpts, iid *GUID) (unk *IUnknown, err error) { + if bindOpts != nil { + bindOpts.CbStruct = uint32(unsafe.Sizeof(BindOpts{})) + } + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoGetObject.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(programID))), + uintptr(unsafe.Pointer(bindOpts)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) (err error) { + hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) (err error) { + hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) (ss *int16) { + pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) (ss *int16) { + utf16 := utf16.Encode([]rune(v + "\x00")) + ptr := &utf16[0] + + pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1)) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) (err error) { + hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) + return uint32(l) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) { + hr, _, _ := procCreateStdDispatch.Call( + uintptr(unsafe.Pointer(unk)), + v, + uintptr(unsafe.Pointer(ptinfo)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { + hr, _, _ := procCreateDispTypeInfo.Call( + uintptr(unsafe.Pointer(idata)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&pptinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) { + procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length)) +} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() (lcid uint32) { + ret, _, _ := procGetUserDefaultLCID.Call() + lcid = uint32(ret) + return +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) { + r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax)) + ret = int32(r0) + return +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) (ret int32) { + r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg))) + ret = int32(r0) + return +} diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go new file mode 100644 index 00000000..cef539d9 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com_func.go @@ -0,0 +1,174 @@ +// +build !windows + +package ole + +import ( + "time" + "unsafe" +) + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() error { + return NewError(E_NOTIMPL) +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) error { + return NewError(E_NOTIMPL) +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() {} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) {} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) *int16 { + u := int16(0) + return &u +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) *int16 { + u := int16(0) + return &u +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) error { + return NewError(E_NOTIMPL) +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + return uint32(0) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) { + return nil, NewError(E_NOTIMPL) +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() uint32 { + return uint32(0) +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) int32 { + return int32(0) +} + +func GetVariantDate(value uint64) (time.Time, error) { + return time.Now(), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go new file mode 100644 index 00000000..b2ac2ec6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/connect.go @@ -0,0 +1,192 @@ +package ole + +// Connection contains IUnknown for fluent interface interaction. +// +// Deprecated. Use oleutil package instead. +type Connection struct { + Object *IUnknown // Access COM +} + +// Initialize COM. +func (*Connection) Initialize() (err error) { + return coInitialize() +} + +// Uninitialize COM. +func (*Connection) Uninitialize() { + CoUninitialize() +} + +// Create IUnknown object based first on ProgId and then from String. +func (c *Connection) Create(progId string) (err error) { + var clsid *GUID + clsid, err = CLSIDFromProgID(progId) + if err != nil { + clsid, err = CLSIDFromString(progId) + if err != nil { + return + } + } + + unknown, err := CreateInstance(clsid, IID_IUnknown) + if err != nil { + return + } + c.Object = unknown + + return +} + +// Release IUnknown object. +func (c *Connection) Release() { + c.Object.Release() +} + +// Load COM object from list of programIDs or strings. +func (c *Connection) Load(names ...string) (errors []error) { + var tempErrors []error = make([]error, len(names)) + var numErrors int = 0 + for _, name := range names { + err := c.Create(name) + if err != nil { + tempErrors = append(tempErrors, err) + numErrors += 1 + continue + } + break + } + + copy(errors, tempErrors[0:numErrors]) + return +} + +// Dispatch returns Dispatch object. +func (c *Connection) Dispatch() (object *Dispatch, err error) { + dispatch, err := c.Object.QueryInterface(IID_IDispatch) + if err != nil { + return + } + object = &Dispatch{dispatch} + return +} + +// Dispatch stores IDispatch object. +type Dispatch struct { + Object *IDispatch // Dispatch object. +} + +// Call method on IDispatch with parameters. +func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(method) + if err != nil { + return + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + return +} + +// MustCall method on IDispatch with parameters. +func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(method) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + if err != nil { + panic(err) + } + + return +} + +// Get property on IDispatch with parameters. +func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + return +} + +// MustGet property on IDispatch with parameters. +func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + if err != nil { + panic(err) + } + return +} + +// Set property on IDispatch with parameters. +func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + return +} + +// MustSet property on IDispatch with parameters. +func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + if err != nil { + panic(err) + } + return +} + +// GetId retrieves ID of name on IDispatch. +func (d *Dispatch) GetId(name string) (id int32, err error) { + var dispid []int32 + dispid, err = d.Object.GetIDsOfName([]string{name}) + if err != nil { + return + } + id = dispid[0] + return +} + +// GetIds retrieves all IDs of names on IDispatch. +func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) { + dispid, err = d.Object.GetIDsOfName(names) + return +} + +// Invoke IDispatch on DisplayID of dispatch type with parameters. +// +// There have been problems where if send cascading params..., it would error +// out because the parameters would be empty. +func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) { + if len(params) < 1 { + result, err = d.Object.Invoke(id, dispatch) + } else { + result, err = d.Object.Invoke(id, dispatch, params...) + } + return +} + +// Release IDispatch object. +func (d *Dispatch) Release() { + d.Object.Release() +} + +// Connect initializes COM and attempts to load IUnknown based on given names. +func Connect(names ...string) (connection *Connection) { + connection.Initialize() + connection.Load(names...) + return +} diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go new file mode 100644 index 00000000..fd0c6d74 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/constants.go @@ -0,0 +1,153 @@ +package ole + +const ( + CLSCTX_INPROC_SERVER = 1 + CLSCTX_INPROC_HANDLER = 2 + CLSCTX_LOCAL_SERVER = 4 + CLSCTX_INPROC_SERVER16 = 8 + CLSCTX_REMOTE_SERVER = 16 + CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER + CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER + CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER +) + +const ( + COINIT_APARTMENTTHREADED = 0x2 + COINIT_MULTITHREADED = 0x0 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +const ( + DISPATCH_METHOD = 1 + DISPATCH_PROPERTYGET = 2 + DISPATCH_PROPERTYPUT = 4 + DISPATCH_PROPERTYPUTREF = 8 +) + +const ( + S_OK = 0x00000000 + E_UNEXPECTED = 0x8000FFFF + E_NOTIMPL = 0x80004001 + E_OUTOFMEMORY = 0x8007000E + E_INVALIDARG = 0x80070057 + E_NOINTERFACE = 0x80004002 + E_POINTER = 0x80004003 + E_HANDLE = 0x80070006 + E_ABORT = 0x80004004 + E_FAIL = 0x80004005 + E_ACCESSDENIED = 0x80070005 + E_PENDING = 0x8000000A + + CO_E_CLASSSTRING = 0x800401F3 +) + +const ( + CC_FASTCALL = iota + CC_CDECL + CC_MSCPASCAL + CC_PASCAL = CC_MSCPASCAL + CC_MACPASCAL + CC_STDCALL + CC_FPFASTCALL + CC_SYSCALL + CC_MPWCDECL + CC_MPWPASCAL + CC_MAX = CC_MPWPASCAL +) + +type VT uint16 + +const ( + VT_EMPTY VT = 0x0 + VT_NULL VT = 0x1 + VT_I2 VT = 0x2 + VT_I4 VT = 0x3 + VT_R4 VT = 0x4 + VT_R8 VT = 0x5 + VT_CY VT = 0x6 + VT_DATE VT = 0x7 + VT_BSTR VT = 0x8 + VT_DISPATCH VT = 0x9 + VT_ERROR VT = 0xa + VT_BOOL VT = 0xb + VT_VARIANT VT = 0xc + VT_UNKNOWN VT = 0xd + VT_DECIMAL VT = 0xe + VT_I1 VT = 0x10 + VT_UI1 VT = 0x11 + VT_UI2 VT = 0x12 + VT_UI4 VT = 0x13 + VT_I8 VT = 0x14 + VT_UI8 VT = 0x15 + VT_INT VT = 0x16 + VT_UINT VT = 0x17 + VT_VOID VT = 0x18 + VT_HRESULT VT = 0x19 + VT_PTR VT = 0x1a + VT_SAFEARRAY VT = 0x1b + VT_CARRAY VT = 0x1c + VT_USERDEFINED VT = 0x1d + VT_LPSTR VT = 0x1e + VT_LPWSTR VT = 0x1f + VT_RECORD VT = 0x24 + VT_INT_PTR VT = 0x25 + VT_UINT_PTR VT = 0x26 + VT_FILETIME VT = 0x40 + VT_BLOB VT = 0x41 + VT_STREAM VT = 0x42 + VT_STORAGE VT = 0x43 + VT_STREAMED_OBJECT VT = 0x44 + VT_STORED_OBJECT VT = 0x45 + VT_BLOB_OBJECT VT = 0x46 + VT_CF VT = 0x47 + VT_CLSID VT = 0x48 + VT_BSTR_BLOB VT = 0xfff + VT_VECTOR VT = 0x1000 + VT_ARRAY VT = 0x2000 + VT_BYREF VT = 0x4000 + VT_RESERVED VT = 0x8000 + VT_ILLEGAL VT = 0xffff + VT_ILLEGALMASKED VT = 0xfff + VT_TYPEMASK VT = 0xfff +) + +const ( + DISPID_UNKNOWN = -1 + DISPID_VALUE = 0 + DISPID_PROPERTYPUT = -3 + DISPID_NEWENUM = -4 + DISPID_EVALUATE = -5 + DISPID_CONSTRUCTOR = -6 + DISPID_DESTRUCTOR = -7 + DISPID_COLLECT = -8 +) + +const ( + TKIND_ENUM = 1 + TKIND_RECORD = 2 + TKIND_MODULE = 3 + TKIND_INTERFACE = 4 + TKIND_DISPATCH = 5 + TKIND_COCLASS = 6 + TKIND_ALIAS = 7 + TKIND_UNION = 8 + TKIND_MAX = 9 +) + +// Safe Array Feature Flags + +const ( + FADF_AUTO = 0x0001 + FADF_STATIC = 0x0002 + FADF_EMBEDDED = 0x0004 + FADF_FIXEDSIZE = 0x0010 + FADF_RECORD = 0x0020 + FADF_HAVEIID = 0x0040 + FADF_HAVEVARTYPE = 0x0080 + FADF_BSTR = 0x0100 + FADF_UNKNOWN = 0x0200 + FADF_DISPATCH = 0x0400 + FADF_VARIANT = 0x0800 + FADF_RESERVED = 0xF008 +) diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go new file mode 100644 index 00000000..096b456d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error.go @@ -0,0 +1,51 @@ +package ole + +// OleError stores COM errors. +type OleError struct { + hr uintptr + description string + subError error +} + +// NewError creates new error with HResult. +func NewError(hr uintptr) *OleError { + return &OleError{hr: hr} +} + +// NewErrorWithDescription creates new COM error with HResult and description. +func NewErrorWithDescription(hr uintptr, description string) *OleError { + return &OleError{hr: hr, description: description} +} + +// NewErrorWithSubError creates new COM error with parent error. +func NewErrorWithSubError(hr uintptr, description string, err error) *OleError { + return &OleError{hr: hr, description: description, subError: err} +} + +// Code is the HResult. +func (v *OleError) Code() uintptr { + return uintptr(v.hr) +} + +// String description, either manually set or format message with error code. +func (v *OleError) String() string { + if v.description != "" { + return errstr(int(v.hr)) + " (" + v.description + ")" + } + return errstr(int(v.hr)) +} + +// Error implements error interface. +func (v *OleError) Error() string { + return v.String() +} + +// Description retrieves error summary, if there is one. +func (v *OleError) Description() string { + return v.description +} + +// SubError returns parent error, if there is one. +func (v *OleError) SubError() error { + return v.subError +} diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go new file mode 100644 index 00000000..8a2ffaa2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_func.go @@ -0,0 +1,8 @@ +// +build !windows + +package ole + +// errstr converts error code to string. +func errstr(errno int) string { + return "" +} diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go new file mode 100644 index 00000000..d0e8e685 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_windows.go @@ -0,0 +1,24 @@ +// +build windows + +package ole + +import ( + "fmt" + "syscall" + "unicode/utf16" +) + +// errstr converts error code to string. +func errstr(errno int) string { + // ask windows for the remaining errors + var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS + b := make([]uint16, 300) + n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil) + if err != nil { + return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go new file mode 100644 index 00000000..8d20f68f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/guid.go @@ -0,0 +1,284 @@ +package ole + +var ( + // IID_NULL is null Interface ID, used when no other Interface ID is known. + IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}") + + // IID_IUnknown is for IUnknown interfaces. + IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}") + + // IID_IDispatch is for IDispatch interfaces. + IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}") + + // IID_IEnumVariant is for IEnumVariant interfaces + IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}") + + // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces. + IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}") + + // IID_IConnectionPoint is for IConnectionPoint interfaces. + IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}") + + // IID_IInspectable is for IInspectable interfaces. + IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}") + + // IID_IProvideClassInfo is for IProvideClassInfo interfaces. + IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}") +) + +// These are for testing and not part of any library. +var ( + // IID_ICOMTestString is for ICOMTestString interfaces. + // + // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED} + IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}") + + // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces. + // + // {BEB06610-EB84-4155-AF58-E2BFF53680B4} + IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}") + + // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces. + // + // {DAA3F9FA-761E-4976-A860-8364CE55F6FC} + IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}") + + // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces. + // + // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0} + IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}") + + // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces. + // + // {8D437CBC-B3ED-485C-BC32-C336432A1623} + IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}") + + // IID_ICOMTestFloat is for ICOMTestFloat interfaces. + // + // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C} + IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}") + + // IID_ICOMTestDouble is for ICOMTestDouble interfaces. + // + // {BF908A81-8687-4E93-999F-D86FAB284BA0} + IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}") + + // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces. + // + // {D530E7A6-4EE8-40D1-8931-3D63B8605010} + IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}") + + // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces. + // + // {6485B1EF-D780-4834-A4FE-1EBB51746CA3} + IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}") + + // IID_ICOMTestTypes is for ICOMTestTypes interfaces. + // + // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0} + IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}") + + // CLSID_COMEchoTestObject is for COMEchoTestObject class. + // + // {3C24506A-AE9E-4D50-9157-EF317281F1B0} + CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}") + + // CLSID_COMTestScalarClass is for COMTestScalarClass class. + // + // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86} + CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}") +) + +const hextable = "0123456789ABCDEF" +const emptyGUID = "{00000000-0000-0000-0000-000000000000}" + +// GUID is Windows API specific GUID type. +// +// This exists to match Windows GUID type for direct passing for COM. +// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +// NewGUID converts the given string into a globally unique identifier that is +// compliant with the Windows API. +// +// The supplied string may be in any of these formats: +// +// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// The conversion of the supplied string is not case-sensitive. +func NewGUID(guid string) *GUID { + d := []byte(guid) + var d1, d2, d3, d4a, d4b []byte + + switch len(d) { + case 38: + if d[0] != '{' || d[37] != '}' { + return nil + } + d = d[1:37] + fallthrough + case 36: + if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' { + return nil + } + d1 = d[0:8] + d2 = d[9:13] + d3 = d[14:18] + d4a = d[19:23] + d4b = d[24:36] + case 32: + d1 = d[0:8] + d2 = d[8:12] + d3 = d[12:16] + d4a = d[16:20] + d4b = d[20:32] + default: + return nil + } + + var g GUID + var ok1, ok2, ok3, ok4 bool + g.Data1, ok1 = decodeHexUint32(d1) + g.Data2, ok2 = decodeHexUint16(d2) + g.Data3, ok3 = decodeHexUint16(d3) + g.Data4, ok4 = decodeHexByte64(d4a, d4b) + if ok1 && ok2 && ok3 && ok4 { + return &g + } + return nil +} + +func decodeHexUint32(src []byte) (value uint32, ok bool) { + var b1, b2, b3, b4 byte + var ok1, ok2, ok3, ok4 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + b3, ok3 = decodeHexByte(src[4], src[5]) + b4, ok4 = decodeHexByte(src[6], src[7]) + value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4) + ok = ok1 && ok2 && ok3 && ok4 + return +} + +func decodeHexUint16(src []byte) (value uint16, ok bool) { + var b1, b2 byte + var ok1, ok2 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + value = (uint16(b1) << 8) | uint16(b2) + ok = ok1 && ok2 + return +} + +func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) { + var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool + value[0], ok1 = decodeHexByte(s1[0], s1[1]) + value[1], ok2 = decodeHexByte(s1[2], s1[3]) + value[2], ok3 = decodeHexByte(s2[0], s2[1]) + value[3], ok4 = decodeHexByte(s2[2], s2[3]) + value[4], ok5 = decodeHexByte(s2[4], s2[5]) + value[5], ok6 = decodeHexByte(s2[6], s2[7]) + value[6], ok7 = decodeHexByte(s2[8], s2[9]) + value[7], ok8 = decodeHexByte(s2[10], s2[11]) + ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8 + return +} + +func decodeHexByte(c1, c2 byte) (value byte, ok bool) { + var n1, n2 byte + var ok1, ok2 bool + n1, ok1 = decodeHexChar(c1) + n2, ok2 = decodeHexChar(c2) + value = (n1 << 4) | n2 + ok = ok1 && ok2 + return +} + +func decodeHexChar(c byte) (byte, bool) { + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + + return 0, false +} + +// String converts the GUID to string form. It will adhere to this pattern: +// +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// If the GUID is nil, the string representation of an empty GUID is returned: +// +// {00000000-0000-0000-0000-000000000000} +func (guid *GUID) String() string { + if guid == nil { + return emptyGUID + } + + var c [38]byte + c[0] = '{' + putUint32Hex(c[1:9], guid.Data1) + c[9] = '-' + putUint16Hex(c[10:14], guid.Data2) + c[14] = '-' + putUint16Hex(c[15:19], guid.Data3) + c[19] = '-' + putByteHex(c[20:24], guid.Data4[0:2]) + c[24] = '-' + putByteHex(c[25:37], guid.Data4[2:8]) + c[37] = '}' + return string(c[:]) +} + +func putUint32Hex(b []byte, v uint32) { + b[0] = hextable[byte(v>>24)>>4] + b[1] = hextable[byte(v>>24)&0x0f] + b[2] = hextable[byte(v>>16)>>4] + b[3] = hextable[byte(v>>16)&0x0f] + b[4] = hextable[byte(v>>8)>>4] + b[5] = hextable[byte(v>>8)&0x0f] + b[6] = hextable[byte(v)>>4] + b[7] = hextable[byte(v)&0x0f] +} + +func putUint16Hex(b []byte, v uint16) { + b[0] = hextable[byte(v>>8)>>4] + b[1] = hextable[byte(v>>8)&0x0f] + b[2] = hextable[byte(v)>>4] + b[3] = hextable[byte(v)&0x0f] +} + +func putByteHex(dst, src []byte) { + for i := 0; i < len(src); i++ { + dst[i*2] = hextable[src[i]>>4] + dst[i*2+1] = hextable[src[i]&0x0f] + } +} + +// IsEqualGUID compares two GUID. +// +// Not constant time comparison. +func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool { + return guid1.Data1 == guid2.Data1 && + guid1.Data2 == guid2.Data2 && + guid1.Data3 == guid2.Data3 && + guid1.Data4[0] == guid2.Data4[0] && + guid1.Data4[1] == guid2.Data4[1] && + guid1.Data4[2] == guid2.Data4[2] && + guid1.Data4[3] == guid2.Data4[3] && + guid1.Data4[4] == guid2.Data4[4] && + guid1.Data4[5] == guid2.Data4[5] && + guid1.Data4[6] == guid2.Data4[6] && + guid1.Data4[7] == guid2.Data4[7] +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go new file mode 100644 index 00000000..9e6c49f4 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go @@ -0,0 +1,20 @@ +package ole + +import "unsafe" + +type IConnectionPoint struct { + IUnknown +} + +type IConnectionPointVtbl struct { + IUnknownVtbl + GetConnectionInterface uintptr + GetConnectionPointContainer uintptr + Advise uintptr + Unadvise uintptr + EnumConnections uintptr +} + +func (v *IConnectionPoint) VTable() *IConnectionPointVtbl { + return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go new file mode 100644 index 00000000..5414dc3c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go @@ -0,0 +1,21 @@ +// +build !windows + +package ole + +import "unsafe" + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + return int32(0) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go new file mode 100644 index 00000000..32bc1832 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + // XXX: This doesn't look like it does what it's supposed to + return release((*IUnknown)(unsafe.Pointer(v))) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Advise, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(unknown)), + uintptr(unsafe.Pointer(&cookie))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Unadvise, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(cookie), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go new file mode 100644 index 00000000..165860d1 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go @@ -0,0 +1,17 @@ +package ole + +import "unsafe" + +type IConnectionPointContainer struct { + IUnknown +} + +type IConnectionPointContainerVtbl struct { + IUnknownVtbl + EnumConnectionPoints uintptr + FindConnectionPoint uintptr +} + +func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl { + return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go new file mode 100644 index 00000000..5dfa42aa --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go @@ -0,0 +1,11 @@ +// +build !windows + +package ole + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go new file mode 100644 index 00000000..ad30d79e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().FindConnectionPoint, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(point))) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go new file mode 100644 index 00000000..d4af1240 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch.go @@ -0,0 +1,94 @@ +package ole + +import "unsafe" + +type IDispatch struct { + IUnknown +} + +type IDispatchVtbl struct { + IUnknownVtbl + GetTypeInfoCount uintptr + GetTypeInfo uintptr + GetIDsOfNames uintptr + Invoke uintptr +} + +func (v *IDispatch) VTable() *IDispatchVtbl { + return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) { + dispid, err = getIDsOfName(v, names) + return +} + +func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + result, err = invoke(v, dispid, dispatch, params...) + return +} + +func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) { + c, err = getTypeInfoCount(v) + return +} + +func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) { + tinfo, err = getTypeInfo(v) + return +} + +// GetSingleIDOfName is a helper that returns single display ID for IDispatch name. +// +// This replaces the common pattern of attempting to get a single name from the list of available +// IDs. It gives the first ID, if it is available. +func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) { + var displayIDs []int32 + displayIDs, err = v.GetIDsOfName([]string{name}) + if err != nil { + return + } + displayID = displayIDs[0] + return +} + +// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke. +// +// Accepts name and will attempt to retrieve Display ID to pass to Invoke. +// +// Passing params as an array is a workaround that could be fixed in later versions of Go that +// prevent passing empty params. During testing it was discovered that this is an acceptable way of +// getting around not being able to pass params normally. +func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) { + displayID, err := v.GetSingleIDOfName(name) + if err != nil { + return + } + + if len(params) < 1 { + result, err = v.Invoke(displayID, dispatch) + } else { + result, err = v.Invoke(displayID, dispatch, params...) + } + + return +} + +// CallMethod invokes named function with arguments on object. +func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params) +} + +// GetProperty retrieves the property with the name with the ability to pass arguments. +// +// Most of the time you will not need to pass arguments as most objects do not allow for this +// feature. Or at least, should not allow for this feature. Some servers don't follow best practices +// and this is provided for those edge cases. +func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params) +} + +// PutProperty attempts to mutate a property in the object. +func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go new file mode 100644 index 00000000..b8fbbe31 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) { + return []int32{}, NewError(E_NOTIMPL) +} + +func getTypeInfoCount(disp *IDispatch) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) { + return nil, NewError(E_NOTIMPL) +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go new file mode 100644 index 00000000..649c0734 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -0,0 +1,203 @@ +//go:build windows +// +build windows + +package ole + +import ( + "math/big" + "syscall" + "time" + "unsafe" +) + +func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) { + wnames := make([]*uint16, len(names)) + for i := 0; i < len(names); i++ { + wnames[i] = syscall.StringToUTF16Ptr(names[i]) + } + dispid = make([]int32, len(names)) + namelen := uint32(len(names)) + hr, _, _ := syscall.Syscall6( + disp.VTable().GetIDsOfNames, + 6, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(unsafe.Pointer(&wnames[0])), + uintptr(namelen), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&dispid[0]))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfoCount(disp *IDispatch) (c uint32, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfoCount, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&c)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfo, + 3, + uintptr(unsafe.Pointer(disp)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&tinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + var dispparams DISPPARAMS + + if dispatch&DISPATCH_PROPERTYPUT != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } + var vargs []VARIANT + if len(params) > 0 { + vargs = make([]VARIANT, len(params)) + for i, v := range params { + //n := len(params)-i-1 + n := len(params) - i - 1 + VariantInit(&vargs[n]) + switch vv := v.(type) { + case bool: + if vv { + vargs[n] = NewVariant(VT_BOOL, 0xffff) + } else { + vargs[n] = NewVariant(VT_BOOL, 0) + } + case *bool: + vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool))))) + case uint8: + vargs[n] = NewVariant(VT_I1, int64(v.(uint8))) + case *uint8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + case int8: + vargs[n] = NewVariant(VT_I1, int64(v.(int8))) + case *int8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int8))))) + case int16: + vargs[n] = NewVariant(VT_I2, int64(v.(int16))) + case *int16: + vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16))))) + case uint16: + vargs[n] = NewVariant(VT_UI2, int64(v.(uint16))) + case *uint16: + vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16))))) + case int32: + vargs[n] = NewVariant(VT_I4, int64(v.(int32))) + case *int32: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32))))) + case uint32: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint32))) + case *uint32: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32))))) + case int64: + vargs[n] = NewVariant(VT_I8, int64(v.(int64))) + case *int64: + vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64))))) + case uint64: + vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64)))) + case *uint64: + vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64))))) + case int: + vargs[n] = NewVariant(VT_I4, int64(v.(int))) + case *int: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int))))) + case uint: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint))) + case *uint: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint))))) + case float32: + vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv))) + case *float32: + vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32))))) + case float64: + vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv))) + case *float64: + vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64))))) + case *big.Int: + vargs[n] = NewVariant(VT_DECIMAL, v.(*big.Int).Int64()) + case string: + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string)))))) + case *string: + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string))))) + case time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s))))) + case *time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s)))) + case *IDispatch: + vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))) + case **IDispatch: + vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))) + case nil: + vargs[n] = NewVariant(VT_NULL, 0) + case *VARIANT: + vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))) + case []byte: + safeByteArray := safeArrayFromByteSlice(v.([]byte)) + vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + case []string: + safeByteArray := safeArrayFromStringSlice(v.([]string)) + vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + default: + panic("unknown type") + } + } + dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0])) + dispparams.cArgs = uint32(len(params)) + } + + result = new(VARIANT) + var excepInfo EXCEPINFO + VariantInit(result) + hr, _, _ := syscall.Syscall9( + disp.VTable().Invoke, + 9, + uintptr(unsafe.Pointer(disp)), + uintptr(dispid), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(GetUserDefaultLCID()), + uintptr(dispatch), + uintptr(unsafe.Pointer(&dispparams)), + uintptr(unsafe.Pointer(result)), + uintptr(unsafe.Pointer(&excepInfo)), + 0) + if hr != 0 { + excepInfo.renderStrings() + excepInfo.Clear() + err = NewErrorWithSubError(hr, excepInfo.description, excepInfo) + } + for i, varg := range vargs { + n := len(params) - i - 1 + if varg.VT == VT_BSTR && varg.Val != 0 { + SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) + } + if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 { + *(params[n].(*string)) = LpOleStrToString(*(**uint16)(unsafe.Pointer(uintptr(varg.Val)))) + } + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go new file mode 100644 index 00000000..24338975 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant.go @@ -0,0 +1,19 @@ +package ole + +import "unsafe" + +type IEnumVARIANT struct { + IUnknown +} + +type IEnumVARIANTVtbl struct { + IUnknownVtbl + Next uintptr + Skip uintptr + Reset uintptr + Clone uintptr +} + +func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl { + return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go new file mode 100644 index 00000000..c1484819 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) { + return nil, NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Reset() error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Skip(celt uint) error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) { + return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go new file mode 100644 index 00000000..4781f3b8 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go @@ -0,0 +1,63 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Clone, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(unsafe.Pointer(&cloned)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Reset() (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Reset, + 1, + uintptr(unsafe.Pointer(enum)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Skip(celt uint) (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Skip, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) { + hr, _, _ := syscall.Syscall6( + enum.VTable().Next, + 4, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + uintptr(unsafe.Pointer(&array)), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go new file mode 100644 index 00000000..f4a19e25 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable.go @@ -0,0 +1,18 @@ +package ole + +import "unsafe" + +type IInspectable struct { + IUnknown +} + +type IInspectableVtbl struct { + IUnknownVtbl + GetIIds uintptr + GetRuntimeClassName uintptr + GetTrustLevel uintptr +} + +func (v *IInspectable) VTable() *IInspectableVtbl { + return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go new file mode 100644 index 00000000..348829bf --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_func.go @@ -0,0 +1,15 @@ +// +build !windows + +package ole + +func (v *IInspectable) GetIids() ([]*GUID, error) { + return []*GUID{}, NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetRuntimeClassName() (string, error) { + return "", NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetTrustLevel() (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go new file mode 100644 index 00000000..4519a4aa --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go @@ -0,0 +1,72 @@ +// +build windows + +package ole + +import ( + "bytes" + "encoding/binary" + "reflect" + "syscall" + "unsafe" +) + +func (v *IInspectable) GetIids() (iids []*GUID, err error) { + var count uint32 + var array uintptr + hr, _, _ := syscall.Syscall( + v.VTable().GetIIds, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&count)), + uintptr(unsafe.Pointer(&array))) + if hr != 0 { + err = NewError(hr) + return + } + defer CoTaskMemFree(array) + + iids = make([]*GUID, count) + byteCount := count * uint32(unsafe.Sizeof(GUID{})) + slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)} + byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr)) + reader := bytes.NewReader(byteSlice) + for i := range iids { + guid := GUID{} + err = binary.Read(reader, binary.LittleEndian, &guid) + if err != nil { + return + } + iids[i] = &guid + } + return +} + +func (v *IInspectable) GetRuntimeClassName() (s string, err error) { + var hstring HString + hr, _, _ := syscall.Syscall( + v.VTable().GetRuntimeClassName, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&hstring)), + 0) + if hr != 0 { + err = NewError(hr) + return + } + s = hstring.String() + DeleteHString(hstring) + return +} + +func (v *IInspectable) GetTrustLevel() (level uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().GetTrustLevel, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&level)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go new file mode 100644 index 00000000..25f3a6f2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go @@ -0,0 +1,21 @@ +package ole + +import "unsafe" + +type IProvideClassInfo struct { + IUnknown +} + +type IProvideClassInfoVtbl struct { + IUnknownVtbl + GetClassInfo uintptr +} + +func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl { + return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) { + cinfo, err = getClassInfo(v) + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go new file mode 100644 index 00000000..7e3cb63e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go new file mode 100644 index 00000000..2ad01639 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetClassInfo, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&tinfo)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go new file mode 100644 index 00000000..dd3c5e21 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo.go @@ -0,0 +1,34 @@ +package ole + +import "unsafe" + +type ITypeInfo struct { + IUnknown +} + +type ITypeInfoVtbl struct { + IUnknownVtbl + GetTypeAttr uintptr + GetTypeComp uintptr + GetFuncDesc uintptr + GetVarDesc uintptr + GetNames uintptr + GetRefTypeOfImplType uintptr + GetImplTypeFlags uintptr + GetIDsOfNames uintptr + Invoke uintptr + GetDocumentation uintptr + GetDllEntry uintptr + GetRefTypeInfo uintptr + AddressOfMember uintptr + CreateInstance uintptr + GetMops uintptr + GetContainingTypeLib uintptr + ReleaseTypeAttr uintptr + ReleaseFuncDesc uintptr + ReleaseVarDesc uintptr +} + +func (v *ITypeInfo) VTable() *ITypeInfoVtbl { + return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go new file mode 100644 index 00000000..8364a659 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go new file mode 100644 index 00000000..54782b3d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) { + hr, _, _ := syscall.Syscall( + uintptr(v.VTable().GetTypeAttr), + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&tattr)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go new file mode 100644 index 00000000..108f28ea --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown.go @@ -0,0 +1,57 @@ +package ole + +import "unsafe" + +type IUnknown struct { + RawVTable *interface{} +} + +type IUnknownVtbl struct { + QueryInterface uintptr + AddRef uintptr + Release uintptr +} + +type UnknownLike interface { + QueryInterface(iid *GUID) (disp *IDispatch, err error) + AddRef() int32 + Release() int32 +} + +func (v *IUnknown) VTable() *IUnknownVtbl { + return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error { + return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj) +} + +func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) { + err = v.PutQueryInterface(interfaceID, &dispatch) + return +} + +func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) { + err = v.PutQueryInterface(interfaceID, &enum) + return +} + +func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) { + return queryInterface(v, iid) +} + +func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) { + unk, err := queryInterface(v, iid) + if err != nil { + panic(err) + } + return unk +} + +func (v *IUnknown) AddRef() int32 { + return addRef(v) +} + +func (v *IUnknown) Release() int32 { + return release(v) +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go new file mode 100644 index 00000000..d0a62cfd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + return NewError(E_NOTIMPL) +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + return nil, NewError(E_NOTIMPL) +} + +func addRef(unk *IUnknown) int32 { + return 0 +} + +func release(unk *IUnknown) int32 { + return 0 +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go new file mode 100644 index 00000000..ede5bb8c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unsafe" +) + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + selfValue := reflect.ValueOf(self).Elem() + objValue := reflect.ValueOf(obj).Elem() + + hr, _, _ := syscall.Syscall( + method, + 3, + selfValue.UnsafeAddr(), + uintptr(unsafe.Pointer(interfaceID)), + objValue.Addr().Pointer()) + if hr != 0 { + err = NewError(hr) + } + return +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + hr, _, _ := syscall.Syscall( + unk.VTable().QueryInterface, + 3, + uintptr(unsafe.Pointer(unk)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func addRef(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().AddRef, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} + +func release(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().Release, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go new file mode 100644 index 00000000..dbd132bb --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ole.go @@ -0,0 +1,190 @@ +package ole + +import ( + "fmt" + "strings" + "unsafe" +) + +// DISPPARAMS are the arguments that passed to methods or property. +type DISPPARAMS struct { + rgvarg uintptr + rgdispidNamedArgs uintptr + cArgs uint32 + cNamedArgs uint32 +} + +// EXCEPINFO defines exception info. +type EXCEPINFO struct { + wCode uint16 + wReserved uint16 + bstrSource *uint16 + bstrDescription *uint16 + bstrHelpFile *uint16 + dwHelpContext uint32 + pvReserved uintptr + pfnDeferredFillIn uintptr + scode uint32 + + // Go-specific part. Don't move upper cos it'll break structure layout for native code. + rendered bool + source string + description string + helpFile string +} + +// renderStrings translates BSTR strings to Go ones so `.Error` and `.String` +// could be safely called after `.Clear`. We need this when we can't rely on +// a caller to call `.Clear`. +func (e *EXCEPINFO) renderStrings() { + e.rendered = true + if e.bstrSource == nil { + e.source = "" + } else { + e.source = BstrToString(e.bstrSource) + } + if e.bstrDescription == nil { + e.description = "" + } else { + e.description = BstrToString(e.bstrDescription) + } + if e.bstrHelpFile == nil { + e.helpFile = "" + } else { + e.helpFile = BstrToString(e.bstrHelpFile) + } +} + +// Clear frees BSTR strings inside an EXCEPINFO and set it to NULL. +func (e *EXCEPINFO) Clear() { + freeBSTR := func(s *uint16) { + // SysFreeString don't return errors and is safe for call's on NULL. + // https://docs.microsoft.com/en-us/windows/win32/api/oleauto/nf-oleauto-sysfreestring + _ = SysFreeString((*int16)(unsafe.Pointer(s))) + } + + if e.bstrSource != nil { + freeBSTR(e.bstrSource) + e.bstrSource = nil + } + if e.bstrDescription != nil { + freeBSTR(e.bstrDescription) + e.bstrDescription = nil + } + if e.bstrHelpFile != nil { + freeBSTR(e.bstrHelpFile) + e.bstrHelpFile = nil + } +} + +// WCode return wCode in EXCEPINFO. +func (e EXCEPINFO) WCode() uint16 { + return e.wCode +} + +// SCODE return scode in EXCEPINFO. +func (e EXCEPINFO) SCODE() uint32 { + return e.scode +} + +// String convert EXCEPINFO to string. +func (e EXCEPINFO) String() string { + if !e.rendered { + e.renderStrings() + } + return fmt.Sprintf( + "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x", + e.wCode, e.source, e.description, e.helpFile, e.dwHelpContext, e.scode, + ) +} + +// Error implements error interface and returns error string. +func (e EXCEPINFO) Error() string { + if !e.rendered { + e.renderStrings() + } + + if e.description != "" { + return strings.TrimSpace(e.description) + } + + code := e.scode + if e.wCode != 0 { + code = uint32(e.wCode) + } + return fmt.Sprintf("%v: %#x", e.source, code) +} + +// PARAMDATA defines parameter data type. +type PARAMDATA struct { + Name *int16 + Vt uint16 +} + +// METHODDATA defines method info. +type METHODDATA struct { + Name *uint16 + Data *PARAMDATA + Dispid int32 + Meth uint32 + CC int32 + CArgs uint32 + Flags uint16 + VtReturn uint32 +} + +// INTERFACEDATA defines interface info. +type INTERFACEDATA struct { + MethodData *METHODDATA + CMembers uint32 +} + +// Point is 2D vector type. +type Point struct { + X int32 + Y int32 +} + +// Msg is message between processes. +type Msg struct { + Hwnd uint32 + Message uint32 + Wparam int32 + Lparam int32 + Time uint32 + Pt Point +} + +// TYPEDESC defines data type. +type TYPEDESC struct { + Hreftype uint32 + VT uint16 +} + +// IDLDESC defines IDL info. +type IDLDESC struct { + DwReserved uint32 + WIDLFlags uint16 +} + +// TYPEATTR defines type info. +type TYPEATTR struct { + Guid GUID + Lcid uint32 + dwReserved uint32 + MemidConstructor int32 + MemidDestructor int32 + LpstrSchema *uint16 + CbSizeInstance uint32 + Typekind int32 + CFuncs uint16 + CVars uint16 + CImplTypes uint16 + CbSizeVft uint16 + CbAlignment uint16 + WTypeFlags uint16 + WMajorVerNum uint16 + WMinorVerNum uint16 + TdescAlias TYPEDESC + IdldescType IDLDESC +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go new file mode 100644 index 00000000..60df73cd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection.go @@ -0,0 +1,100 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +type stdDispatch struct { + lpVtbl *stdDispatchVtbl + ref int32 + iid *ole.GUID + iface interface{} + funcMap map[string]int32 +} + +type stdDispatchVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr + pGetTypeInfoCount uintptr + pGetTypeInfo uintptr + pGetIDsOfNames uintptr + pInvoke uintptr +} + +func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + *punk = nil + if ole.IsEqualGUID(iid, ole.IID_IUnknown) || + ole.IsEqualGUID(iid, ole.IID_IDispatch) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + if ole.IsEqualGUID(iid, pthis.iid) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + return ole.E_NOINTERFACE +} + +func dispAddRef(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref++ + return pthis.ref +} + +func dispRelease(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref-- + return pthis.ref +} + +func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + names := make([]string, len(wnames)) + for i := 0; i < len(names); i++ { + names[i] = ole.LpOleStrToString(wnames[i]) + } + for n := 0; n < namelen; n++ { + if id, ok := pthis.funcMap[names[n]]; ok { + pdisp[n] = id + } + } + return ole.S_OK +} + +func dispGetTypeInfoCount(pcount *int) uintptr { + if pcount != nil { + *pcount = 0 + } + return ole.S_OK +} + +func dispGetTypeInfo(ptypeif *uintptr) uintptr { + return ole.E_NOTIMPL +} + +func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + found := "" + for name, id := range pthis.funcMap { + if id == dispid { + found = name + } + } + if found != "" { + rv := reflect.ValueOf(pthis.iface).Elem() + rm := rv.MethodByName(found) + rr := rm.Call([]reflect.Value{}) + println(len(rr)) + return ole.S_OK + } + return ole.E_NOTIMPL +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go new file mode 100644 index 00000000..8818fb82 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go @@ -0,0 +1,10 @@ +// +build !windows + +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) { + return 0, ole.NewError(ole.E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go new file mode 100644 index 00000000..ab9c0d8d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "syscall" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) { + unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer) + if err != nil { + return + } + + container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown)) + var point *ole.IConnectionPoint + err = container.FindConnectionPoint(iid, &point) + if err != nil { + return + } + if edisp, ok := idisp.(*ole.IUnknown); ok { + cookie, err = point.Advise(edisp) + container.Release() + if err != nil { + return + } + } + rv := reflect.ValueOf(disp).Elem() + if rv.Type().Kind() == reflect.Struct { + dest := &stdDispatch{} + dest.lpVtbl = &stdDispatchVtbl{} + dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface) + dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef) + dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease) + dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount) + dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo) + dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames) + dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke) + dest.iface = disp + dest.iid = iid + cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest))) + container.Release() + if err != nil { + point.Release() + return + } + return + } + + container.Release() + + return 0, ole.NewError(ole.E_INVALIDARG) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go new file mode 100644 index 00000000..58347628 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go @@ -0,0 +1,6 @@ +// This file is here so go get succeeds as without it errors with: +// no buildable Go source files in ... +// +// +build !windows + +package oleutil diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go new file mode 100644 index 00000000..f7803c1e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go @@ -0,0 +1,127 @@ +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +func ClassIDFrom(programID string) (classID *ole.GUID, err error) { + return ole.ClassIDFrom(programID) +} + +// CreateObject creates object from programID based on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func CreateObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// GetActiveObject retrieves active object for program ID and interface ID based +// on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// CallMethod calls method on IDispatch with parameters. +func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params) +} + +// MustCallMethod calls method on IDispatch with parameters or panics. +func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := CallMethod(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// GetProperty retrieves property from IDispatch. +func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params) +} + +// MustGetProperty retrieves property from IDispatch or panics. +func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := GetProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutProperty mutates property. +func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params) +} + +// MustPutProperty mutates property or panics. +func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutPropertyRef mutates property reference. +func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params) +} + +// MustPutPropertyRef mutates property reference or panics. +func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutPropertyRef(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error { + newEnum, err := disp.GetProperty("_NewEnum") + if err != nil { + return err + } + defer newEnum.Clear() + + enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + defer enum.Release() + + for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) { + if err != nil { + return err + } + if ferr := f(&item); ferr != nil { + return ferr + } + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go new file mode 100644 index 00000000..a5201b56 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray.go @@ -0,0 +1,27 @@ +// Package is meant to retrieve and process safe array data returned from COM. + +package ole + +// SafeArrayBound defines the SafeArray boundaries. +type SafeArrayBound struct { + Elements uint32 + LowerBound int32 +} + +// SafeArray is how COM handles arrays. +type SafeArray struct { + Dimensions uint16 + FeaturesFlag uint16 + ElementsSize uint32 + LocksAmount uint32 + Data uint32 + Bounds [16]byte +} + +// SAFEARRAY is obsolete, exists for backwards compatibility. +// Use SafeArray +type SAFEARRAY SafeArray + +// SAFEARRAYBOUND is obsolete, exists for backwards compatibility. +// Use SafeArrayBound +type SAFEARRAYBOUND SafeArrayBound diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go new file mode 100644 index 00000000..0dee670c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_func.go @@ -0,0 +1,211 @@ +// +build !windows + +package ole + +import ( + "unsafe" +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +func safeArrayAccessData(safearray *SafeArray) (uintptr, error) { + return uintptr(0), NewError(E_NOTIMPL) +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int32) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (uint16, error) { + return uint16(0), NewError(E_NOTIMPL) +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go new file mode 100644 index 00000000..0c1b3a10 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_windows.go @@ -0,0 +1,337 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +var ( + procSafeArrayAccessData = modoleaut32.NewProc("SafeArrayAccessData") + procSafeArrayAllocData = modoleaut32.NewProc("SafeArrayAllocData") + procSafeArrayAllocDescriptor = modoleaut32.NewProc("SafeArrayAllocDescriptor") + procSafeArrayAllocDescriptorEx = modoleaut32.NewProc("SafeArrayAllocDescriptorEx") + procSafeArrayCopy = modoleaut32.NewProc("SafeArrayCopy") + procSafeArrayCopyData = modoleaut32.NewProc("SafeArrayCopyData") + procSafeArrayCreate = modoleaut32.NewProc("SafeArrayCreate") + procSafeArrayCreateEx = modoleaut32.NewProc("SafeArrayCreateEx") + procSafeArrayCreateVector = modoleaut32.NewProc("SafeArrayCreateVector") + procSafeArrayCreateVectorEx = modoleaut32.NewProc("SafeArrayCreateVectorEx") + procSafeArrayDestroy = modoleaut32.NewProc("SafeArrayDestroy") + procSafeArrayDestroyData = modoleaut32.NewProc("SafeArrayDestroyData") + procSafeArrayDestroyDescriptor = modoleaut32.NewProc("SafeArrayDestroyDescriptor") + procSafeArrayGetDim = modoleaut32.NewProc("SafeArrayGetDim") + procSafeArrayGetElement = modoleaut32.NewProc("SafeArrayGetElement") + procSafeArrayGetElemsize = modoleaut32.NewProc("SafeArrayGetElemsize") + procSafeArrayGetIID = modoleaut32.NewProc("SafeArrayGetIID") + procSafeArrayGetLBound = modoleaut32.NewProc("SafeArrayGetLBound") + procSafeArrayGetUBound = modoleaut32.NewProc("SafeArrayGetUBound") + procSafeArrayGetVartype = modoleaut32.NewProc("SafeArrayGetVartype") + procSafeArrayLock = modoleaut32.NewProc("SafeArrayLock") + procSafeArrayPtrOfIndex = modoleaut32.NewProc("SafeArrayPtrOfIndex") + procSafeArrayUnaccessData = modoleaut32.NewProc("SafeArrayUnaccessData") + procSafeArrayUnlock = modoleaut32.NewProc("SafeArrayUnlock") + procSafeArrayPutElement = modoleaut32.NewProc("SafeArrayPutElement") + //procSafeArrayRedim = modoleaut32.NewProc("SafeArrayRedim") // TODO + //procSafeArraySetIID = modoleaut32.NewProc("SafeArraySetIID") // TODO + procSafeArrayGetRecordInfo = modoleaut32.NewProc("SafeArrayGetRecordInfo") + procSafeArraySetRecordInfo = modoleaut32.NewProc("SafeArraySetRecordInfo") +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +// Todo: Test +func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { + err = convertHresultToError( + procSafeArrayAccessData.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&element)))) + return +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptorEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayCopy.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { + err = convertHresultToError( + procSafeArrayCopyData.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(duplicate)))) + return +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreate.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds))) + safearray = (*SafeArray)(unsafe.Pointer(&sa)) + return +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds)), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVector.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length)) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVectorEx.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) { + l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray))) + dimensions = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { + l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray))) + length = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { + return convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(pv))) +} + +// safeArrayGetElementString retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int32) (str string, err error) { + var element *int16 + err = convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(&element)))) + str = BstrToString(*(**uint16)(unsafe.Pointer(&element))) + SysFreeString(element) + return +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { + err = convertHresultToError( + procSafeArrayGetIID.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&guid)))) + return +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int32, err error) { + err = convertHresultToError( + procSafeArrayGetLBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&lowerBound)))) + return +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int32, err error) { + err = convertHresultToError( + procSafeArrayGetUBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&upperBound)))) + return +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { + err = convertHresultToError( + procSafeArrayGetVartype.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&varType)))) + return +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) { + err = convertHresultToError( + procSafeArrayPutElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(element)))) + return +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) { + err = convertHresultToError( + procSafeArrayGetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) { + err = convertHresultToError( + procSafeArraySetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go new file mode 100644 index 00000000..da737293 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayconversion.go @@ -0,0 +1,140 @@ +// Helper for converting SafeArray to array of objects. + +package ole + +import ( + "unsafe" +) + +type SafeArrayConversion struct { + Array *SafeArray +} + +func (sac *SafeArrayConversion) ToStringArray() (strings []string) { + totalElements, _ := sac.TotalElements(0) + strings = make([]string, totalElements) + + for i := int32(0); i < totalElements; i++ { + strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i) + } + + return +} + +func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) { + totalElements, _ := sac.TotalElements(0) + bytes = make([]byte, totalElements) + + for i := int32(0); i < totalElements; i++ { + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)])) + } + + return +} + +func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) { + totalElements, _ := sac.TotalElements(0) + values = make([]interface{}, totalElements) + vt, _ := safeArrayGetVartype(sac.Array) + + for i := int32(0); i < totalElements; i++ { + switch VT(vt) { + case VT_BOOL: + var v bool + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I1: + var v int8 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I2: + var v int16 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I4: + var v int32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I8: + var v int64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI1: + var v uint8 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI2: + var v uint16 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI4: + var v uint32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI8: + var v uint64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_R4: + var v float32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_R8: + var v float64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_BSTR: + v , _ := safeArrayGetElementString(sac.Array, i) + values[i] = v + case VT_VARIANT: + var v VARIANT + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v.Value() + v.Clear() + default: + // TODO + } + } + + return +} + +func (sac *SafeArrayConversion) GetType() (varType uint16, err error) { + return safeArrayGetVartype(sac.Array) +} + +func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) { + return safeArrayGetDim(sac.Array) +} + +func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) { + return safeArrayGetElementSize(sac.Array) +} + +func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int32, err error) { + if index < 1 { + index = 1 + } + + // Get array bounds + var LowerBounds int32 + var UpperBounds int32 + + LowerBounds, err = safeArrayGetLBound(sac.Array, index) + if err != nil { + return + } + + UpperBounds, err = safeArrayGetUBound(sac.Array, index) + if err != nil { + return + } + + totalElements = UpperBounds - LowerBounds + 1 + return +} + +// Release Safe Array memory +func (sac *SafeArrayConversion) Release() { + safeArrayDestroy(sac.Array) +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go new file mode 100644 index 00000000..a9fa885f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayslices.go @@ -0,0 +1,33 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +func safeArrayFromByteSlice(slice []byte) *SafeArray { + array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []byte to SAFEARRAY") + } + + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v))) + } + return array +} + +func safeArrayFromStringSlice(slice []string) *SafeArray { + array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []string to SAFEARRAY") + } + // SysAllocStringLen(s) + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v)))) + } + return array +} diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go new file mode 100644 index 00000000..99ee82dc --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/utility.go @@ -0,0 +1,101 @@ +package ole + +import ( + "unicode/utf16" + "unsafe" +) + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +// +// Helper that provides check against both Class ID from Program ID and Class ID from string. It is +// faster, if you know which you are using, to use the individual functions, but this will check +// against available functions for you. +func ClassIDFrom(programID string) (classID *GUID, err error) { + classID, err = CLSIDFromProgID(programID) + if err != nil { + classID, err = CLSIDFromString(programID) + if err != nil { + return + } + } + return +} + +// BytePtrToString converts byte pointer to a Go string. +func BytePtrToString(p *byte) string { + a := (*[10000]uint8)(unsafe.Pointer(p)) + i := 0 + for a[i] != 0 { + i++ + } + return string(a[:i]) +} + +// UTF16PtrToString is alias for LpOleStrToString. +// +// Kept for compatibility reasons. +func UTF16PtrToString(p *uint16) string { + return LpOleStrToString(p) +} + +// LpOleStrToString converts COM Unicode to Go string. +func LpOleStrToString(p *uint16) string { + if p == nil { + return "" + } + + length := lpOleStrLen(p) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + + return string(utf16.Decode(a)) +} + +// BstrToString converts COM binary string to Go string. +func BstrToString(p *uint16) string { + if p == nil { + return "" + } + length := SysStringLen((*int16)(unsafe.Pointer(p))) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return string(utf16.Decode(a)) +} + +// lpOleStrLen returns the length of Unicode string. +func lpOleStrLen(p *uint16) (length int64) { + if p == nil { + return 0 + } + + ptr := unsafe.Pointer(p) + + for i := 0; ; i++ { + if 0 == *(*uint16)(ptr) { + length = int64(i) + break + } + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return +} + +// convertHresultToError converts syscall to error, if call is unsuccessful. +func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) { + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go new file mode 100644 index 00000000..a6add1b0 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variables.go @@ -0,0 +1,15 @@ +// +build windows + +package ole + +import ( + "golang.org/x/sys/windows" +) + +var ( + modcombase = windows.NewLazySystemDLL("combase.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modole32 = windows.NewLazySystemDLL("ole32.dll") + modoleaut32 = windows.NewLazySystemDLL("oleaut32.dll") + moduser32 = windows.NewLazySystemDLL("user32.dll") +) diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go new file mode 100644 index 00000000..a2c8402f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant.go @@ -0,0 +1,105 @@ +package ole + +import "unsafe" + +// NewVariant returns new variant based on type and value. +func NewVariant(vt VT, val int64) VARIANT { + return VARIANT{VT: vt, Val: val} +} + +// ToIUnknown converts Variant to Unknown object. +func (v *VARIANT) ToIUnknown() *IUnknown { + if v.VT != VT_UNKNOWN { + return nil + } + return (*IUnknown)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToIDispatch converts variant to dispatch object. +func (v *VARIANT) ToIDispatch() *IDispatch { + if v.VT != VT_DISPATCH { + return nil + } + return (*IDispatch)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToArray converts variant to SafeArray helper. +func (v *VARIANT) ToArray() *SafeArrayConversion { + if v.VT != VT_SAFEARRAY { + if v.VT&VT_ARRAY == 0 { + return nil + } + } + var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val))) + return &SafeArrayConversion{safeArray} +} + +// ToString converts variant to Go string. +func (v *VARIANT) ToString() string { + if v.VT != VT_BSTR { + return "" + } + return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val))) +} + +// Clear the memory of variant object. +func (v *VARIANT) Clear() error { + return VariantClear(v) +} + +// Value returns variant value based on its type. +// +// Currently supported types: 2- and 4-byte integers, strings, bools. +// Note that 64-bit integers, datetimes, and other types are stored as strings +// and will be returned as strings. +// +// Needs to be further converted, because this returns an interface{}. +func (v *VARIANT) Value() interface{} { + switch v.VT { + case VT_I1: + return int8(v.Val) + case VT_UI1: + return uint8(v.Val) + case VT_I2: + return int16(v.Val) + case VT_UI2: + return uint16(v.Val) + case VT_I4: + return int32(v.Val) + case VT_UI4: + return uint32(v.Val) + case VT_I8: + return int64(v.Val) + case VT_UI8: + return uint64(v.Val) + case VT_INT: + return int(v.Val) + case VT_UINT: + return uint(v.Val) + case VT_INT_PTR: + return uintptr(v.Val) // TODO + case VT_UINT_PTR: + return uintptr(v.Val) + case VT_R4: + return *(*float32)(unsafe.Pointer(&v.Val)) + case VT_R8: + return *(*float64)(unsafe.Pointer(&v.Val)) + case VT_BSTR: + return v.ToString() + case VT_DATE: + // VT_DATE type will either return float64 or time.Time. + d := uint64(v.Val) + date, err := GetVariantDate(d) + if err != nil { + return float64(v.Val) + } + return date + case VT_UNKNOWN: + return v.ToIUnknown() + case VT_DISPATCH: + return v.ToIDispatch() + case VT_BOOL: + return (v.Val & 0xffff) != 0 + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go new file mode 100644 index 00000000..e73736bf --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_386.go @@ -0,0 +1,11 @@ +// +build 386 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go new file mode 100644 index 00000000..dccdde13 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_amd64.go @@ -0,0 +1,12 @@ +// +build amd64 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_arm.go b/vendor/github.com/go-ole/go-ole/variant_arm.go new file mode 100644 index 00000000..d4724544 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_arm.go @@ -0,0 +1,11 @@ +// +build arm + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_arm64.go b/vendor/github.com/go-ole/go-ole/variant_arm64.go new file mode 100644 index 00000000..78473cec --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_arm64.go @@ -0,0 +1,13 @@ +//go:build arm64 +// +build arm64 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_386.go b/vendor/github.com/go-ole/go-ole/variant_date_386.go new file mode 100644 index 00000000..1b970f63 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_386.go @@ -0,0 +1,22 @@ +// +build windows,386 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_amd64.go b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go new file mode 100644 index 00000000..6952f1f0 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go @@ -0,0 +1,20 @@ +// +build windows,amd64 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm.go b/vendor/github.com/go-ole/go-ole/variant_date_arm.go new file mode 100644 index 00000000..09ec7b5c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_arm.go @@ -0,0 +1,22 @@ +// +build windows,arm + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm64.go b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go new file mode 100644 index 00000000..02b04a0d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go @@ -0,0 +1,23 @@ +//go:build windows && arm64 +// +build windows,arm64 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_ppc64le.go b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go new file mode 100644 index 00000000..326427a7 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/go-ole/go-ole/variant_s390x.go new file mode 100644 index 00000000..9874ca66 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go new file mode 100644 index 00000000..729b4a04 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/vt_string.go @@ -0,0 +1,58 @@ +// generated by stringer -output vt_string.go -type VT; DO NOT EDIT + +package ole + +import "fmt" + +const ( + _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL" + _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR" + _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR" + _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID" + _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR" + _VT_name_5 = "VT_ARRAY" + _VT_name_6 = "VT_BYREF" + _VT_name_7 = "VT_RESERVED" + _VT_name_8 = "VT_ILLEGAL" +) + +var ( + _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110} + _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122} + _VT_index_2 = [...]uint8{0, 9, 19, 30} + _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98} + _VT_index_4 = [...]uint8{0, 12, 21} + _VT_index_5 = [...]uint8{0, 8} + _VT_index_6 = [...]uint8{0, 8} + _VT_index_7 = [...]uint8{0, 11} + _VT_index_8 = [...]uint8{0, 10} +) + +func (i VT) String() string { + switch { + case 0 <= i && i <= 14: + return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]] + case 16 <= i && i <= 31: + i -= 16 + return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]] + case 36 <= i && i <= 38: + i -= 36 + return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]] + case 64 <= i && i <= 72: + i -= 64 + return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]] + case 4095 <= i && i <= 4096: + i -= 4095 + return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]] + case i == 8192: + return _VT_name_5 + case i == 16384: + return _VT_name_6 + case i == 32768: + return _VT_name_7 + case i == 65535: + return _VT_name_8 + default: + return fmt.Sprintf("VT(%d)", i) + } +} diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go new file mode 100644 index 00000000..4e9eca73 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt.go @@ -0,0 +1,99 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unicode/utf8" + "unsafe" +) + +var ( + procRoInitialize = modcombase.NewProc("RoInitialize") + procRoActivateInstance = modcombase.NewProc("RoActivateInstance") + procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory") + procWindowsCreateString = modcombase.NewProc("WindowsCreateString") + procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString") + procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer") +) + +func RoInitialize(thread_type uint32) (err error) { + hr, _, _ := procRoInitialize.Call(uintptr(thread_type)) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoActivateInstance.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoGetActivationFactory.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + u16 := syscall.StringToUTF16Ptr(s) + len := uint32(utf8.RuneCountInString(s)) + hr, _, _ := procWindowsCreateString.Call( + uintptr(unsafe.Pointer(u16)), + uintptr(len), + uintptr(unsafe.Pointer(&hstring))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// String returns Go string value of HString. +func (h HString) String() string { + var u16buf uintptr + var u16len uint32 + u16buf, _, _ = procWindowsGetStringRawBuffer.Call( + uintptr(h), + uintptr(unsafe.Pointer(&u16len))) + + u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)} + u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr)) + return syscall.UTF16ToString(u16) +} diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go new file mode 100644 index 00000000..52e6d74c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt_doc.go @@ -0,0 +1,36 @@ +// +build !windows + +package ole + +// RoInitialize +func RoInitialize(thread_type uint32) (err error) { + return NewError(E_NOTIMPL) +} + +// RoActivateInstance +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// RoGetActivationFactory +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + return HString(uintptr(0)), NewError(E_NOTIMPL) +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + return NewError(E_NOTIMPL) +} + +// String returns Go string value of HString. +func (h HString) String() string { + return "" +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig new file mode 100644 index 00000000..faef0c91 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -0,0 +1,21 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{Makefile,*.mk}] +indent_style = tab + +[*.nix] +indent_size = 2 + +[.golangci.yaml] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc new file mode 100644 index 00000000..2e0f9f5f --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi +use flake . --impure diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore new file mode 100644 index 00000000..470e7ca2 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -0,0 +1,6 @@ +/.devenv/ +/.direnv/ +/.pre-commit-config.yaml +/bin/ +/build/ +/var/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml new file mode 100644 index 00000000..bda96256 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml @@ -0,0 +1,48 @@ +version: "2" + +run: + timeout: 10m + +linters: + enable: + - govet + - ineffassign + # - misspell + - nolintlint + # - revive + + disable: + - errcheck + - staticcheck + - unused + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + # - golines + + settings: + gci: + sections: + - standard + - default + - localmodule + gofmt: + simplify: true + rewrite-rules: + - pattern: interface{} + replacement: any + + exclusions: + paths: + - internal/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md new file mode 100644 index 00000000..afd44e5f --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -0,0 +1,104 @@ +> [!WARNING] +> As of v2 of this library, change log can be found in GitHub releases. + +## 1.5.1 + +* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] +* Fix map of slices not decoding properly in certain cases. [GH-266] + +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE similarity index 100% rename from vendor/github.com/mitchellh/mapstructure/LICENSE rename to vendor/github.com/go-viper/mapstructure/v2/LICENSE diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md new file mode 100644 index 00000000..45db7197 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -0,0 +1,81 @@ +# mapstructure + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?style=flat-square)](https://github.com/go-viper/mapstructure/actions/workflows/ci.yaml) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/go-viper/mapstructure?style=flat-square&color=61CFDD) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-viper/mapstructure/badge?style=flat-square)](https://deps.dev/go/github.com%252Fgo-viper%252Fmapstructure%252Fv2) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +```shell +go get github.com/go-viper/mapstructure/v2 +``` + +## Migrating from `github.com/mitchellh/mapstructure` + +[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status. + +You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`. +The API is the same, so you don't need to change anything else. + +Here is a script that can help you with the migration: + +```shell +sed -i 's|github.com/mitchellh/mapstructure|github.com/go-viper/mapstructure/v2|g' $(find . -type f -name '*.go') +``` + +If you need more time to migrate your code, that is absolutely fine. + +Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate: + +```shell +replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0 +``` + +## Usage & Example + +For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. + +## Credits + +Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh). +This is a maintained fork of the original library. + +Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349). + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go new file mode 100644 index 00000000..a852a0a0 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -0,0 +1,714 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "net/netip" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an any) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []any{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// cachedDecodeHook takes a raw DecodeHookFunc (an any) and turns +// it into a closure to be used directly +// if the type fails to convert we return a closure always erroring to keep the previous behaviour +func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (any, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return func(from reflect.Value, to reflect.Value) (any, error) { + return f(from.Type(), to.Type(), from.Interface()) + } + case DecodeHookFuncKind: + return func(from reflect.Value, to reflect.Value) (any, error) { + return f(from.Kind(), to.Kind(), from.Interface()) + } + case DecodeHookFuncValue: + return func(from reflect.Value, to reflect.Value) (any, error) { + return f(from, to) + } + default: + return func(from reflect.Value, to reflect.Value) (any, error) { + return nil, errors.New("invalid decode hook signature") + } + } +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value, +) (any, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(fs)) + for _, f := range fs { + cached = append(cached, cachedDecodeHook(f)) + } + return func(f reflect.Value, t reflect.Value) (any, error) { + var err error + data := f.Interface() + + newFrom := f + for _, c := range cached { + data, err = c(newFrom, t) + if err != nil { + return nil, err + } + if v, ok := data.(reflect.Value); ok { + newFrom = v + } else { + newFrom = reflect.ValueOf(data) + } + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(ff)) + for _, f := range ff { + cached = append(cached, cachedDecodeHook(f)) + } + return func(a, b reflect.Value) (any, error) { + var allErrs string + var out any + var err error + + for _, c := range cached { + out, err = c(a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.SliceOf(f) { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToWeakSliceHookFunc brings back the old (pre-v2) behavior of [StringToSliceHookFunc]. +// +// As of mapstructure v2.0.0 [StringToSliceHookFunc] checks if the return type is a string slice. +// This function removes that check. +func StringToWeakSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + d, err := time.ParseDuration(data.(string)) + + return d, wrapTimeParseDurationError(err) + } +} + +// StringToTimeLocationHookFunc returns a DecodeHookFunc that converts +// strings to *time.Location. +func StringToTimeLocationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Local) { + return data, nil + } + d, err := time.LoadLocation(data.(string)) + + return d, wrapTimeParseLocationError(err) + } +} + +// StringToURLHookFunc returns a DecodeHookFunc that converts +// strings to *url.URL. +func StringToURLHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(&url.URL{}) { + return data, nil + } + + // Convert it by parsing + u, err := url.Parse(data.(string)) + + return u, wrapUrlError(err) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip") + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, wrapNetParseError(err) + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + ti, err := time.Parse(layout, data.(string)) + + return ti, wrapTimeParseError(err) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data any, +) (any, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (any, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i any = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]any) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + str, ok := data.(string) + if !ok { + str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() + } + if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { + return nil, err + } + return result, nil + } +} + +// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts +// strings to netip.Addr. +func StringToNetIPAddrHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Addr{}) { + return data, nil + } + + // Convert it by parsing + addr, err := netip.ParseAddr(data.(string)) + + return addr, wrapNetIPParseAddrError(err) + } +} + +// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts +// strings to netip.AddrPort. +func StringToNetIPAddrPortHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.AddrPort{}) { + return data, nil + } + + // Convert it by parsing + addrPort, err := netip.ParseAddrPort(data.(string)) + + return addrPort, wrapNetIPParseAddrPortError(err) + } +} + +// StringToNetIPPrefixHookFunc returns a DecodeHookFunc that converts +// strings to netip.Prefix. +func StringToNetIPPrefixHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Prefix{}) { + return data, nil + } + + // Convert it by parsing + prefix, err := netip.ParsePrefix(data.(string)) + + return prefix, wrapNetIPParsePrefixError(err) + } +} + +// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts +// strings to basic types. +// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128 +func StringToBasicTypeHookFunc() DecodeHookFunc { + return ComposeDecodeHookFunc( + StringToInt8HookFunc(), + StringToUint8HookFunc(), + StringToInt16HookFunc(), + StringToUint16HookFunc(), + StringToInt32HookFunc(), + StringToUint32HookFunc(), + StringToInt64HookFunc(), + StringToUint64HookFunc(), + StringToIntHookFunc(), + StringToUintHookFunc(), + StringToFloat32HookFunc(), + StringToFloat64HookFunc(), + StringToBoolHookFunc(), + // byte and rune are aliases for uint8 and int32 respectively + // StringToByteHookFunc(), + // StringToRuneHookFunc(), + StringToComplex64HookFunc(), + StringToComplex128HookFunc(), + ) +} + +// StringToInt8HookFunc returns a DecodeHookFunc that converts +// strings to int8. +func StringToInt8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int8 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 8) + return int8(i64), wrapStrconvNumError(err) + } +} + +// StringToUint8HookFunc returns a DecodeHookFunc that converts +// strings to uint8. +func StringToUint8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 8) + return uint8(u64), wrapStrconvNumError(err) + } +} + +// StringToInt16HookFunc returns a DecodeHookFunc that converts +// strings to int16. +func StringToInt16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int16 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 16) + return int16(i64), wrapStrconvNumError(err) + } +} + +// StringToUint16HookFunc returns a DecodeHookFunc that converts +// strings to uint16. +func StringToUint16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 16) + return uint16(u64), wrapStrconvNumError(err) + } +} + +// StringToInt32HookFunc returns a DecodeHookFunc that converts +// strings to int32. +func StringToInt32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int32 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 32) + return int32(i64), wrapStrconvNumError(err) + } +} + +// StringToUint32HookFunc returns a DecodeHookFunc that converts +// strings to uint32. +func StringToUint32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 32) + return uint32(u64), wrapStrconvNumError(err) + } +} + +// StringToInt64HookFunc returns a DecodeHookFunc that converts +// strings to int64. +func StringToInt64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int64 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 64) + return int64(i64), wrapStrconvNumError(err) + } +} + +// StringToUint64HookFunc returns a DecodeHookFunc that converts +// strings to uint64. +func StringToUint64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 64) + return uint64(u64), wrapStrconvNumError(err) + } +} + +// StringToIntHookFunc returns a DecodeHookFunc that converts +// strings to int. +func StringToIntHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 0) + return int(i64), wrapStrconvNumError(err) + } +} + +// StringToUintHookFunc returns a DecodeHookFunc that converts +// strings to uint. +func StringToUintHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 0) + return uint(u64), wrapStrconvNumError(err) + } +} + +// StringToFloat32HookFunc returns a DecodeHookFunc that converts +// strings to float32. +func StringToFloat32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float32 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 32) + return float32(f64), wrapStrconvNumError(err) + } +} + +// StringToFloat64HookFunc returns a DecodeHookFunc that converts +// strings to float64. +func StringToFloat64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float64 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 64) + return f64, wrapStrconvNumError(err) + } +} + +// StringToBoolHookFunc returns a DecodeHookFunc that converts +// strings to bool. +func StringToBoolHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Bool { + return data, nil + } + + // Convert it by parsing + b, err := strconv.ParseBool(data.(string)) + return b, wrapStrconvNumError(err) + } +} + +// StringToByteHookFunc returns a DecodeHookFunc that converts +// strings to byte. +func StringToByteHookFunc() DecodeHookFunc { + return StringToUint8HookFunc() +} + +// StringToRuneHookFunc returns a DecodeHookFunc that converts +// strings to rune. +func StringToRuneHookFunc() DecodeHookFunc { + return StringToInt32HookFunc() +} + +// StringToComplex64HookFunc returns a DecodeHookFunc that converts +// strings to complex64. +func StringToComplex64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 64) + return complex64(c128), wrapStrconvNumError(err) + } +} + +// StringToComplex128HookFunc returns a DecodeHookFunc that converts +// strings to complex128. +func StringToComplex128HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 128) + return c128, wrapStrconvNumError(err) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/errors.go b/vendor/github.com/go-viper/mapstructure/v2/errors.go new file mode 100644 index 00000000..07d31c22 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/errors.go @@ -0,0 +1,244 @@ +package mapstructure + +import ( + "errors" + "fmt" + "net" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// Error interface is implemented by all errors emitted by mapstructure. +// +// Use [errors.As] to check if an error implements this interface. +type Error interface { + error + + mapstructure() +} + +// DecodeError is a generic error type that holds information about +// a decoding error together with the name of the field that caused the error. +type DecodeError struct { + name string + err error +} + +func newDecodeError(name string, err error) *DecodeError { + return &DecodeError{ + name: name, + err: err, + } +} + +func (e *DecodeError) Name() string { + return e.name +} + +func (e *DecodeError) Unwrap() error { + return e.err +} + +func (e *DecodeError) Error() string { + return fmt.Sprintf("'%s' %s", e.name, e.err) +} + +func (*DecodeError) mapstructure() {} + +// ParseError is an error type that indicates a value could not be parsed +// into the expected type. +type ParseError struct { + Expected reflect.Value + Value any + Err error +} + +func (e *ParseError) Error() string { + return fmt.Sprintf("cannot parse value as '%s': %s", e.Expected.Type(), e.Err) +} + +func (*ParseError) mapstructure() {} + +// UnconvertibleTypeError is an error type that indicates a value could not be +// converted to the expected type. +type UnconvertibleTypeError struct { + Expected reflect.Value + Value any +} + +func (e *UnconvertibleTypeError) Error() string { + return fmt.Sprintf( + "expected type '%s', got unconvertible type '%s'", + e.Expected.Type(), + reflect.TypeOf(e.Value), + ) +} + +func (*UnconvertibleTypeError) mapstructure() {} + +func wrapStrconvNumError(err error) error { + if err == nil { + return nil + } + + if err, ok := err.(*strconv.NumError); ok { + return &strconvNumError{Err: err} + } + + return err +} + +type strconvNumError struct { + Err *strconv.NumError +} + +func (e *strconvNumError) Error() string { + return "strconv." + e.Err.Func + ": " + e.Err.Err.Error() +} + +func (e *strconvNumError) Unwrap() error { return e.Err } + +func wrapUrlError(err error) error { + if err == nil { + return nil + } + + if err, ok := err.(*url.Error); ok { + return &urlError{Err: err} + } + + return err +} + +type urlError struct { + Err *url.Error +} + +func (e *urlError) Error() string { + return fmt.Sprintf("%s", e.Err.Err) +} + +func (e *urlError) Unwrap() error { return e.Err } + +func wrapNetParseError(err error) error { + if err == nil { + return nil + } + + if err, ok := err.(*net.ParseError); ok { + return &netParseError{Err: err} + } + + return err +} + +type netParseError struct { + Err *net.ParseError +} + +func (e *netParseError) Error() string { + return "invalid " + e.Err.Type +} + +func (e *netParseError) Unwrap() error { return e.Err } + +func wrapTimeParseError(err error) error { + if err == nil { + return nil + } + + if err, ok := err.(*time.ParseError); ok { + return &timeParseError{Err: err} + } + + return err +} + +type timeParseError struct { + Err *time.ParseError +} + +func (e *timeParseError) Error() string { + if e.Err.Message == "" { + return fmt.Sprintf("parsing time as %q: cannot parse as %q", e.Err.Layout, e.Err.LayoutElem) + } + + return "parsing time " + e.Err.Message +} + +func (e *timeParseError) Unwrap() error { return e.Err } + +func wrapNetIPParseAddrError(err error) error { + if err == nil { + return nil + } + + if errMsg := err.Error(); strings.HasPrefix(errMsg, "ParseAddr") { + errPieces := strings.Split(errMsg, ": ") + + return fmt.Errorf("ParseAddr: %s", errPieces[len(errPieces)-1]) + } + + return err +} + +func wrapNetIPParseAddrPortError(err error) error { + if err == nil { + return nil + } + + errMsg := err.Error() + if strings.HasPrefix(errMsg, "invalid port ") { + return errors.New("invalid port") + } else if strings.HasPrefix(errMsg, "invalid ip:port ") { + return errors.New("invalid ip:port") + } + + return err +} + +func wrapNetIPParsePrefixError(err error) error { + if err == nil { + return nil + } + + if errMsg := err.Error(); strings.HasPrefix(errMsg, "netip.ParsePrefix") { + errPieces := strings.Split(errMsg, ": ") + + return fmt.Errorf("netip.ParsePrefix: %s", errPieces[len(errPieces)-1]) + } + + return err +} + +func wrapTimeParseDurationError(err error) error { + if err == nil { + return nil + } + + errMsg := err.Error() + if strings.HasPrefix(errMsg, "time: unknown unit ") { + return errors.New("time: unknown unit") + } else if strings.HasPrefix(errMsg, "time: ") { + idx := strings.LastIndex(errMsg, " ") + + return errors.New(errMsg[:idx]) + } + + return err +} + +func wrapTimeParseLocationError(err error) error { + if err == nil { + return nil + } + errMsg := err.Error() + if strings.Contains(errMsg, "unknown time zone") || strings.HasPrefix(errMsg, "time: unknown format") { + return fmt.Errorf("invalid time zone format: %w", err) + } + + return err +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock new file mode 100644 index 00000000..5e67bdd6 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -0,0 +1,294 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": [ + "devenv" + ], + "flake-compat": [ + "devenv" + ], + "git-hooks": [ + "devenv" + ], + "nixpkgs": "nixpkgs" + }, + "locked": { + "lastModified": 1742042642, + "narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=", + "owner": "cachix", + "repo": "cachix", + "rev": "a624d3eaf4b1d225f918de8543ed739f2f574203", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "latest", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat", + "git-hooks": "git-hooks", + "nix": "nix", + "nixpkgs": "nixpkgs_3" + }, + "locked": { + "lastModified": 1744876578, + "narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=", + "owner": "cachix", + "repo": "devenv", + "rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1733328505, + "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": [ + "devenv", + "nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1712014858, + "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-parts_2": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1743550720, + "narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "c621e8422220273271f52058f618c94e405bb0f5", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "git-hooks": { + "inputs": { + "flake-compat": [ + "devenv" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1742649964, + "narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "git-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "libgit2": { + "flake": false, + "locked": { + "lastModified": 1697646580, + "narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=", + "owner": "libgit2", + "repo": "libgit2", + "rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5", + "type": "github" + }, + "original": { + "owner": "libgit2", + "repo": "libgit2", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": [ + "devenv" + ], + "flake-parts": "flake-parts", + "libgit2": "libgit2", + "nixpkgs": "nixpkgs_2", + "nixpkgs-23-11": [ + "devenv" + ], + "nixpkgs-regression": [ + "devenv" + ], + "pre-commit-hooks": [ + "devenv" + ] + }, + "locked": { + "lastModified": 1741798497, + "narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=", + "owner": "domenkozar", + "repo": "nix", + "rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.24", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1733212471, + "narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "55d15ad12a74eb7d4646254e13638ad0c4128776", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1743296961, + "narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=", + "owner": "nix-community", + "repo": "nixpkgs.lib", + "rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixpkgs.lib", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1717432640, + "narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "88269ab3044128b7c2f4c7d68448b2fb50456870", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "release-24.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1733477122, + "narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_4": { + "locked": { + "lastModified": 1744536153, + "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts_2", + "nixpkgs": "nixpkgs_4" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix new file mode 100644 index 00000000..3b116f42 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.nix @@ -0,0 +1,46 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = + inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ + "x86_64-linux" + "x86_64-darwin" + "aarch64-darwin" + ]; + + perSystem = + { pkgs, ... }: + rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go new file mode 100644 index 00000000..d1c15e47 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go @@ -0,0 +1,11 @@ +package errors + +import "errors" + +func New(text string) error { + return errors.New(text) +} + +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go new file mode 100644 index 00000000..d74e3a0b --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go @@ -0,0 +1,9 @@ +//go:build go1.20 + +package errors + +import "errors" + +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go new file mode 100644 index 00000000..700b4022 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go @@ -0,0 +1,61 @@ +//go:build !go1.20 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + // Since Join returns nil if every value in errs is nil, + // e.errs cannot be empty. + if len(e.errs) == 1 { + return e.errs[0].Error() + } + + b := []byte(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b = append(b, '\n') + b = append(b, err.Error()...) + } + // At this point, b has at least one byte '\n'. + // return unsafe.String(&b[0], len(b)) + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go new file mode 100644 index 00000000..7c35bce0 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -0,0 +1,1712 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]any +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// # Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// # Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// # Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]any{ +// "person": map[string]any{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]any{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]any{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// # Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]any" or "map[any]any". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]any `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]any{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// # Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value, or a zero-length element. The zero value of all types is +// specified in the Go specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. And likewise for the URLs field, if the +// slice is nil or empty, it won't be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// URLs []string `mapstructure:",omitempty"` +// } +// +// # Omit Zero Values +// +// When decoding from a struct to any other value, you may use the +// ",omitzero" suffix on your tag to omit that value if it equates to the zero +// value. The zero value of all types is specified in the Go specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. And likewise for the URLs field, if the +// slice is nil, it won't be encoded into the destination type. +// +// Note that if the field is a slice, and it is empty but not nil, it will +// still be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitzero"` +// URLs []string `mapstructure:",omitzero"` +// } +// +// # Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]any{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// # Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/go-viper/mapstructure/v2/internal/errors" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc any + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, any) (any, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, any) (any, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (any, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // AllowUnsetPointer, if set to true, will prevent fields with pointer types + // from being reported as unset, even if ErrorUnset is true and the field was + // not present in the input data. This allows pointer fields to be optional + // without triggering an error when they are missing. + AllowUnsetPointer bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result any + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // The option of the value in the tag that indicates a field should + // be squashed. This defaults to "squash". + SquashTagOption string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool + + // DecodeNil, if set to true, will cause the DecodeHook (if present) to run + // even if the input is nil. This can be used to provide default values. + DecodeNil bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig + cachedDecodeHook func(from reflect.Value, to reflect.Value) (any, error) +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input any, output any) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output any) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input any, output any, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input any, output any, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.SquashTagOption == "" { + config.SquashTagOption = "squash" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + if config.DecodeHook != nil { + result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook) + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input any) error { + err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + + // Retain some of the original behavior when multiple errors ocurr + var joinedErr interface{ Unwrap() []error } + if errors.As(err, &joinedErr) { + return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err) + } + + return err +} + +// isNil returns true if the input is nil or a typed nil pointer. +func isNil(input any) bool { + if input == nil { + return true + } + val := reflect.ValueOf(input) + return val.Kind() == reflect.Ptr && val.IsNil() +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input any, outVal reflect.Value) error { + var ( + inputVal = reflect.ValueOf(input) + outputKind = getKind(outVal) + decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil + ) + if isNil(input) { + // Typed nils won't match the "input == nil" below, so reset input. + input = nil + } + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + if !decodeNil { + return nil + } + } + if !inputVal.IsValid() { + if !decodeNil { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + // Hooks need a valid inputVal, so reset it to zero value of outVal type. + switch outputKind { + case reflect.Struct, reflect.Map: + var mapVal map[string]any + inputVal = reflect.ValueOf(mapVal) // create nil map pointer + case reflect.Slice, reflect.Array: + var sliceVal []any + inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer + default: + inputVal = reflect.Zero(outVal.Type()) + } + } + + if d.cachedDecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = d.cachedDecodeHook(inputVal, outVal) + if err != nil { + return newDecodeError(name, err) + } + } + if isNil(input) { + return nil + } + + var err error + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind)) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data any, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: err, + }) + } + val.SetInt(i) + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: fmt.Errorf("%d overflows uint", i), + }) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: fmt.Errorf("%f overflows uint", f), + }) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + val.SetUint(i) + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: err, + }) + } + val.SetFloat(i) + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeComplex(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Complex64: + val.SetComplex(dataVal.Complex()) + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data any, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + dataVal := reflect.ValueOf(data) + + // Resolve any levels of indirection + for dataVal.Kind() == reflect.Pointer { + dataVal = reflect.Indirect(dataVal) + } + + // Check input type and based on the input type jump to the proper func + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + var errs []error + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errs = append(errs, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errs = append(errs, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return newDecodeError( + name+"."+f.Name, + fmt.Errorf("cannot assign type %q to map value field of type %q", v.Type(), valMap.Type().Elem()), + ) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "omitzero" is specified in the tag, it ignores zero values. + if strings.Index(tagValue[index+1:], "omitzero") != -1 && v.IsZero() { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption) + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return newDecodeError( + name+"."+f.Name, + fmt.Errorf("cannot squash non-struct type %q", v.Type()), + ) + } + } else { + if strings.Index(tagValue[index+1:], "remain") != -1 { + if v.Kind() != reflect.Map { + return newDecodeError( + name+"."+f.Name, + fmt.Errorf("error remain-tag field with invalid type: %q", v.Type()), + ) + } + + ptr := v.MapRange() + for ptr.Next() { + valMap.SetMapIndex(ptr.Key(), ptr.Value()) + } + continue + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data any, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data any, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []any{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []any{data}, val) + } + } + + return newDecodeError(name, + fmt.Errorf("source data must be an array or slice, got %s", dataValKind)) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } else if valSlice.Len() > dataVal.Len() { + valSlice = valSlice.Slice(0, dataVal.Len()) + } + + // Accumulate any errors + var errs []error + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errs = append(errs, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeArray(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []any{data}, val) + } + } + + return newDecodeError(name, + fmt.Errorf("source data must be an array or slice, got %s", dataValKind)) + + } + if dataVal.Len() > arrayType.Len() { + return newDecodeError(name, + fmt.Errorf("expected source data to have length less or equal to %d, got %d", arrayType.Len(), dataVal.Len())) + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + var errs []error + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errs = append(errs, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeStruct(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]any)(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return newDecodeError(name, + fmt.Errorf("expected a map or struct, got %q", dataValKind)) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return newDecodeError(name, + fmt.Errorf("needs a map with string keys, has %q keys", kind)) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[any]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[any]struct{}) + + var errs []error + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == d.config.SquashTagOption { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + switch fieldVal.Kind() { + case reflect.Struct: + structs = append(structs, fieldVal) + case reflect.Interface: + if !fieldVal.IsNil() { + structs = append(structs, fieldVal.Elem().Elem()) + } + default: + errs = append(errs, newDecodeError( + name+"."+fieldType.Name, + fmt.Errorf("unsupported type for squash: %s", fieldVal.Kind()), + )) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + if !(d.config.AllowUnsetPointer && fieldValue.Kind() == reflect.Ptr) { + targetValKeysUnused[fieldName] = struct{}{} + } + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errs = append(errs, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[any]any{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errs = append(errs, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + errs = append(errs, newDecodeError( + name, + fmt.Errorf("has invalid keys: %s", strings.Join(keys, ", ")), + )) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + errs = append(errs, newDecodeError( + name, + fmt.Errorf("has unset fields: %s", strings.Join(keys, ", ")), + )) + } + + if err := errors.Join(errs...); err != nil { + return err + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + case kind >= reflect.Complex64 && kind <= reflect.Complex128: + return reflect.Complex64 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go new file mode 100644 index 00000000..d0913fff --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go @@ -0,0 +1,44 @@ +//go:build !go1.20 + +package mapstructure + +import "reflect" + +func isComparable(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Invalid: + return false + + case reflect.Array: + switch v.Type().Elem().Kind() { + case reflect.Interface, reflect.Array, reflect.Struct: + for i := 0; i < v.Type().Len(); i++ { + // if !v.Index(i).Comparable() { + if !isComparable(v.Index(i)) { + return false + } + } + return true + } + return v.Type().Comparable() + + case reflect.Interface: + // return v.Elem().Comparable() + return isComparable(v.Elem()) + + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + return false + + // if !v.Field(i).Comparable() { + if !isComparable(v.Field(i)) { + return false + } + } + return true + + default: + return v.Type().Comparable() + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go new file mode 100644 index 00000000..f8255a1b --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package mapstructure + +import "reflect" + +// TODO: remove once we drop support for Go <1.20 +func isComparable(v reflect.Value) bool { + return v.Comparable() +} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 00000000..3d97fc7a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 00000000..1b4f6c20 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 00000000..f57de90d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 00000000..0b4659b7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 00000000..081c86fa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 00000000..1e91766a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 00000000..f6502e4b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 00000000..b80c8565 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 00000000..390d4e4b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go new file mode 100644 index 00000000..e8134ec8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1435 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) +const maxSecondsInDuration = 315576000000 + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type isWkt interface { + XXX_WellKnownType() string +} + +var ( + wktType = reflect.TypeOf((*isWkt)(nil)).Elem() + messageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + s, ns := s.Field(0).Int(), s.Field(1).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if ns < 0 { + ns = -ns + if s == 0 { + f = "-%d.%09d" + } + } + x := fmt.Sprintf(f, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + //this is not a protobuf field + if valueField.Tag.Get("protobuf") == "" && valueField.Tag.Get("protobuf_oneof") == "" { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + // If the map value is a cast type, it may not implement proto.Message, therefore + // allow the struct tag to declare the underlying message type. Change the property + // of the child types, use CustomType as a passer. CastType currently property is + // not used in json encoding. + if value.Kind() == reflect.Map { + if tag := valueField.Tag.Get("protobuf"); tag != "" { + for _, v := range strings.Split(tag, ",") { + if !strings.HasPrefix(v, "castvaluetype=") { + continue + } + v = strings.TrimPrefix(v, "castvaluetype=") + prop.MapValProp.CustomType = v + break + } + } + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(isWkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if v.Type().Implements(wktType) { + wkt := v.Interface().(isWkt) + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + if t, ok := v.Interface().(time.Time); ok { + ts, err := types.TimestampProto(t) + if err != nil { + return err + } + return m.marshalValue(out, prop, reflect.ValueOf(ts), indent) + } + + if d, ok := v.Interface().(time.Duration); ok { + dur := types.DurationProto(d) + return m.marshalValue(out, prop, reflect.ValueOf(dur), indent) + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + enumStr = string(data) + enumStr, err = strconv.Unquote(enumStr) + if err != nil { + return err + } + } + + isKnownEnum := enumStr != valStr + + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + i := v + if v.CanAddr() { + i = v.Addr() + } else { + i = reflect.New(v.Type()) + i.Elem().Set(v) + } + iface := i.Interface() + if iface == nil { + out.write(`null`) + return out.err + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + out.write(string(data)) + return nil + } + + pm, ok := iface.(proto.Message) + if !ok { + if prop.CustomType == "" { + return fmt.Errorf("%v does not implement proto.Message", v.Type()) + } + t := proto.MessageType(prop.CustomType) + if t == nil || !i.Type().ConvertibleTo(t) { + return fmt.Errorf("%v declared custom type %s but it is not convertible to %v", v.Type(), prop.CustomType, t) + } + pm = i.Convert(t).Interface().(proto.Message) + } + return m.marshalObject(out, pm, indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&types.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(isWkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(isWkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, uerr := json.Marshal(jsonFields) + if uerr != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", uerr) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + target.Field(0).Set(reflect.ValueOf(map[string]*types.Value{})) + for k, jv := range m { + pv := &types.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*types.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&types.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_NumberValue{NumberValue: v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_StringValue{StringValue: v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_BoolValue{BoolValue: v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &types.ListValue{} + target.Field(0).Set(reflect.ValueOf(&types.Value_ListValue{ListValue: lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &types.Struct{} + target.Field(0).Set(reflect.ValueOf(&types.Value_StructValue{StructValue: sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + if t, ok := target.Addr().Interface().(*time.Time); ok { + ts := &types.Timestamp{} + if err := u.unmarshalValue(reflect.ValueOf(ts).Elem(), inputValue, prop); err != nil { + return err + } + tt, err := types.TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil + } + + if d, ok := target.Addr().Interface().(*time.Duration); ok { + dur := &types.Duration{} + if err := u.unmarshalValue(reflect.ValueOf(dur).Elem(), inputValue, prop); err != nil { + return err + } + dd, err := types.DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + if prop != nil && len(prop.CustomType) > 0 && target.CanAddr() { + if m, ok := target.Addr().Interface().(interface { + UnmarshalJSON([]byte) error + }); ok { + return json.Unmarshal(inputValue, m) + } + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays + if targetType.Kind() == reflect.Slice { + if targetType.Elem().Kind() == reflect.Uint8 { + outRef := reflect.New(targetType) + outVal := outRef.Interface() + //CustomType with underlying type []byte + if _, ok := outVal.(interface { + UnmarshalJSON([]byte) error + }); ok { + if err := json.Unmarshal(inputValue, outVal); err != nil { + return err + } + target.Set(outRef.Elem()) + return nil + } + // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling + // strings into aliased []byte types. + // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 + // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a + var out []byte + if err := json.Unmarshal(inputValue, &out); err != nil { + return err + } + target.SetBytes(out) + return nil + } + + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + if !k.Type().AssignableTo(targetType.Key()) { + k = k.Convert(targetType.Key()) + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(isWkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if v.Type().Implements(messageType) { + return checkRequiredFields(v.Interface().(proto.Message)) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 00000000..00d65f32 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 00000000..a26b046d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 00000000..24552483 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 00000000..63b0f08b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 00000000..35b882c0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 00000000..fe1bd7d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 00000000..93464c91 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 00000000..e748e173 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 00000000..9581ccd3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,205 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + sizVar := SizeVarint(uint64(siz)) + p.grow(siz + sizVar) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 00000000..0f5fb173 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 00000000..d4db5a1c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 00000000..341c6f57 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,605 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 00000000..6f1ae120 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,389 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 00000000..80db1c15 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,973 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 00000000..b3aa3919 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 00000000..f48a7567 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 00000000..b6cad908 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 00000000..7ffd3c29 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 00000000..d55a335d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 00000000..aca8eed0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 00000000..28da1475 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,610 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + if isOneofMessage { + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 00000000..40ea3dd9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 00000000..5a5fd93f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 00000000..f8babdef --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3009 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if isOneofMessage { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] + pp, err = m.XXX_Marshal(pp, p.deterministic) + p.buf = append(p.buf, pp...) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 00000000..997f57c1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 00000000..60dcf70d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,676 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case isSlice && !isPointer: // E.g. []pb.T + mergeInfo := getMergeInfo(tf) + zero := reflect.Zero(tf) + mfi.merge = func(dst, src pointer) { + // TODO: Make this faster? + dstsp := dst.asPointerTo(f.Type) + dsts := dstsp.Elem() + srcs := src.asPointerTo(f.Type).Elem() + for i := 0; i < srcs.Len(); i++ { + dsts = reflect.Append(dsts, zero) + srcElement := srcs.Index(i).Addr() + dstElement := dsts.Index(dsts.Len() - 1).Addr() + mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) + } + if dsts.IsNil() { + dsts = reflect.MakeSlice(f.Type, 0, 0) + } + dstsp.Elem().Set(dsts) + } + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 00000000..93722938 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2249 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if len(oneofFields) > 0 { + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 00000000..00d6c7ad --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 00000000..87416afe --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,930 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 00000000..1d6c6aa0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 00000000..f85c0cc8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 00000000..9324f654 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 00000000..38439fa9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 00000000..b175d1b6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 00000000..c1cf7bf8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 00000000..3496dc99 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 00000000..a85bf198 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 00000000..18b2a331 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 00000000..165b2110 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 00000000..e0846a35 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 00000000..ceadde6a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,101 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/gogo/protobuf/types/any.go b/vendor/github.com/gogo/protobuf/types/any.go new file mode 100644 index 00000000..df4787de --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.go @@ -0,0 +1,140 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func EmptyAny(any *Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = EmptyAny(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/gogo/protobuf/types/any.pb.go new file mode 100644 index 00000000..e3d4d949 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -0,0 +1,694 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/any.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return m.Size() +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*Any) XXX_MessageName() string { + return "google.protobuf.Any" +} +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } + +var fileDescriptor_b53526c13ae22eb4 = []byte{ + // 211 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e, + 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, + 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, + 0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4, + 0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28, + 0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94, + 0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94, + 0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed, + 0x00, 0x00, 0x00, +} + +func (this *Any) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Any) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Any) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Any{") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAny(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Any) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAny(dAtA []byte, offset int, v uint64) int { + offset -= sovAny(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedAny(r randyAny, easy bool) *Any { + this := &Any{} + this.TypeUrl = string(randStringAny(r)) + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedAny(r, 3) + } + return this +} + +type randyAny interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAny(r randyAny) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAny(r randyAny) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneAny(r) + } + return string(tmps) +} +func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAny(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateAny(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAny(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Any) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovAny(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAny(x uint64) (n int) { + return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Any) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Any{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringAny(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Any) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAny(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAny(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAny + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAny + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAny + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAny = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/api.pb.go b/vendor/github.com/gogo/protobuf/types/api.pb.go new file mode 100644 index 00000000..83e88692 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/api.pb.go @@ -0,0 +1,2134 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/api.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins,proto3" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Api) Reset() { *m = Api{} } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{0} +} +func (m *Api) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Api) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Api.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Api) XXX_Merge(src proto.Message) { + xxx_messageInfo_Api.Merge(m, src) +} +func (m *Api) XXX_Size() int { + return m.Size() +} +func (m *Api) XXX_DiscardUnknown() { + xxx_messageInfo_Api.DiscardUnknown(m) +} + +var xxx_messageInfo_Api proto.InternalMessageInfo + +func (m *Api) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Api) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +func (m *Api) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Api) XXX_MessageName() string { + return "google.protobuf.Api" +} + +// Method represents a method of an API interface. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*Option `protobuf:"bytes,6,rep,name=options,proto3" json:"options,omitempty"` + // The source syntax of this method. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Method) Reset() { *m = Method{} } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{1} +} +func (m *Method) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Method) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Method.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Method) XXX_Merge(src proto.Message) { + xxx_messageInfo_Method.Merge(m, src) +} +func (m *Method) XXX_Size() int { + return m.Size() +} +func (m *Method) XXX_DiscardUnknown() { + xxx_messageInfo_Method.DiscardUnknown(m) +} + +var xxx_messageInfo_Method proto.InternalMessageInfo + +func (m *Method) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Method) GetRequestTypeUrl() string { + if m != nil { + return m.RequestTypeUrl + } + return "" +} + +func (m *Method) GetRequestStreaming() bool { + if m != nil { + return m.RequestStreaming + } + return false +} + +func (m *Method) GetResponseTypeUrl() string { + if m != nil { + return m.ResponseTypeUrl + } + return "" +} + +func (m *Method) GetResponseStreaming() bool { + if m != nil { + return m.ResponseStreaming + } + return false +} + +func (m *Method) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Method) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Method) XXX_MessageName() string { + return "google.protobuf.Method" +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{2} +} +func (m *Mixin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mixin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mixin.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mixin) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mixin.Merge(m, src) +} +func (m *Mixin) XXX_Size() int { + return m.Size() +} +func (m *Mixin) XXX_DiscardUnknown() { + xxx_messageInfo_Mixin.DiscardUnknown(m) +} + +var xxx_messageInfo_Mixin proto.InternalMessageInfo + +func (m *Mixin) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Mixin) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +func (*Mixin) XXX_MessageName() string { + return "google.protobuf.Mixin" +} +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} + +func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor_a2ec32096296c143) } + +var fileDescriptor_a2ec32096296c143 = []byte{ + // 467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31, + 0x14, 0xc7, 0xeb, 0xbb, 0xe4, 0x52, 0x5c, 0x91, 0x82, 0x91, 0xc0, 0x64, 0xb0, 0x4e, 0x15, 0xc3, + 0x09, 0xc4, 0x45, 0x94, 0x4f, 0xd0, 0x20, 0xd4, 0x01, 0x21, 0xa2, 0x0b, 0x08, 0x89, 0x25, 0x4a, + 0x83, 0x09, 0x96, 0xee, 0x6c, 0x63, 0x3b, 0x90, 0x4c, 0xf0, 0x59, 0x98, 0x10, 0x23, 0xdf, 0x80, + 0xad, 0x23, 0x23, 0x23, 0xb9, 0x2e, 0x8c, 0x1d, 0x19, 0x91, 0x7d, 0xe7, 0xa6, 0x5c, 0x83, 0x04, + 0x9b, 0xdf, 0xfb, 0xff, 0xfc, 0xf7, 0x7b, 0x7f, 0xc3, 0x9b, 0x33, 0x21, 0x66, 0x39, 0xed, 0x4b, + 0x25, 0x8c, 0x38, 0x9a, 0xbf, 0xea, 0x4f, 0x24, 0x4b, 0x5d, 0x81, 0x76, 0x2b, 0x29, 0xf5, 0x52, + 0xef, 0x56, 0x93, 0xd5, 0x62, 0xae, 0xa6, 0x74, 0x3c, 0x15, 0xdc, 0xd0, 0x85, 0xa9, 0xc0, 0x5e, + 0xaf, 0x49, 0x99, 0xa5, 0xac, 0x4d, 0xf6, 0xbe, 0x06, 0x30, 0x3c, 0x90, 0x0c, 0x21, 0xd8, 0xe2, + 0x93, 0x82, 0x62, 0x10, 0x83, 0xe4, 0x52, 0xe6, 0xce, 0xe8, 0x1e, 0xec, 0x14, 0xd4, 0xbc, 0x16, + 0x2f, 0x35, 0x0e, 0xe2, 0x30, 0xd9, 0xd9, 0xbf, 0x91, 0x36, 0x06, 0x48, 0x1f, 0x3b, 0x3d, 0xf3, + 0x9c, 0xbd, 0x22, 0xa4, 0x61, 0x82, 0x6b, 0x1c, 0xfe, 0xe5, 0xca, 0x13, 0xa7, 0x67, 0x9e, 0x43, + 0x18, 0x76, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0xb8, 0xe5, 0x1e, 0xf7, 0x25, 0x7a, 0x08, 0xbb, 0x7f, + 0xee, 0x83, 0xdb, 0x31, 0x48, 0x76, 0xf6, 0xc9, 0x05, 0xcf, 0x91, 0xc3, 0x1e, 0x54, 0x54, 0x76, + 0x59, 0x9f, 0x2f, 0x51, 0x0a, 0xa3, 0x82, 0x2d, 0x18, 0xd7, 0x38, 0x72, 0x23, 0x5d, 0xbf, 0xb8, + 0x85, 0x95, 0xb3, 0x9a, 0x42, 0x7d, 0x18, 0xe9, 0x25, 0x37, 0x93, 0x05, 0xee, 0xc4, 0x20, 0xe9, + 0x6e, 0x58, 0x61, 0xe4, 0xe4, 0xac, 0xc6, 0xf6, 0xbe, 0x04, 0x30, 0xaa, 0x82, 0xd8, 0x18, 0x63, + 0x02, 0xaf, 0x28, 0xfa, 0x66, 0x4e, 0xb5, 0x19, 0xdb, 0xe0, 0xc7, 0x73, 0x95, 0xe3, 0xc0, 0xe9, + 0xdd, 0xba, 0xff, 0x74, 0x29, 0xe9, 0x33, 0x95, 0xa3, 0x3b, 0xf0, 0xaa, 0x27, 0xb5, 0x51, 0x74, + 0x52, 0x30, 0x3e, 0xc3, 0x61, 0x0c, 0x92, 0xed, 0xcc, 0x5b, 0x8c, 0x7c, 0x1f, 0xdd, 0xb6, 0xb0, + 0x96, 0x82, 0x6b, 0xba, 0xf6, 0xad, 0x12, 0xdc, 0xf5, 0x82, 0x37, 0xbe, 0x0b, 0xd1, 0x19, 0xbb, + 0x76, 0x6e, 0x3b, 0xe7, 0x33, 0x97, 0xb5, 0xf5, 0xb9, 0x5f, 0x8c, 0xfe, 0xf1, 0x17, 0xff, 0x3b, + 0xb4, 0x3e, 0x6c, 0xbb, 0xd8, 0x37, 0x46, 0x86, 0x60, 0x4b, 0x09, 0x61, 0xea, 0x98, 0xdc, 0x79, + 0xf0, 0xfe, 0xfb, 0x8a, 0x6c, 0x9d, 0xae, 0x08, 0xf8, 0xb5, 0x22, 0xe0, 0x43, 0x49, 0xc0, 0xa7, + 0x92, 0x80, 0xe3, 0x92, 0x80, 0x6f, 0x25, 0x01, 0x3f, 0x4a, 0x02, 0x7e, 0x96, 0x64, 0xeb, 0xd4, + 0xf6, 0x4f, 0x08, 0x38, 0x3e, 0x21, 0x00, 0x5e, 0x9b, 0x8a, 0xa2, 0x39, 0xc6, 0x60, 0xfb, 0x40, + 0xb2, 0xa1, 0x2d, 0x86, 0xe0, 0x45, 0xdb, 0xe6, 0xa6, 0x3f, 0x06, 0xe1, 0xe1, 0x70, 0xf0, 0x39, + 0x20, 0x87, 0x15, 0x3a, 0xf4, 0x13, 0x3f, 0xa7, 0x79, 0xfe, 0x88, 0x8b, 0x77, 0xdc, 0xc6, 0xa8, + 0x8f, 0x22, 0xe7, 0x71, 0xff, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x64, 0x40, 0x40, 0xa1, + 0x03, 0x00, 0x00, +} + +func (this *Api) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Methods) != len(that1.Methods) { + if len(this.Methods) < len(that1.Methods) { + return -1 + } + return 1 + } + for i := range this.Methods { + if c := this.Methods[i].Compare(that1.Methods[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Version != that1.Version { + if this.Version < that1.Version { + return -1 + } + return 1 + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if len(this.Mixins) != len(that1.Mixins) { + if len(this.Mixins) < len(that1.Mixins) { + return -1 + } + return 1 + } + for i := range this.Mixins { + if c := this.Mixins[i].Compare(that1.Mixins[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Method) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + if this.RequestTypeUrl < that1.RequestTypeUrl { + return -1 + } + return 1 + } + if this.RequestStreaming != that1.RequestStreaming { + if !this.RequestStreaming { + return -1 + } + return 1 + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + if this.ResponseTypeUrl < that1.ResponseTypeUrl { + return -1 + } + return 1 + } + if this.ResponseStreaming != that1.ResponseStreaming { + if !this.ResponseStreaming { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Mixin) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Root != that1.Root { + if this.Root < that1.Root { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Api) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Methods) != len(that1.Methods) { + return false + } + for i := range this.Methods { + if !this.Methods[i].Equal(that1.Methods[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Version != that1.Version { + return false + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if len(this.Mixins) != len(that1.Mixins) { + return false + } + for i := range this.Mixins { + if !this.Mixins[i].Equal(that1.Mixins[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Method) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + return false + } + if this.RequestStreaming != that1.RequestStreaming { + return false + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + return false + } + if this.ResponseStreaming != that1.ResponseStreaming { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Mixin) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Root != that1.Root { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Api) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Api{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Methods != nil { + s = append(s, "Methods: "+fmt.Sprintf("%#v", this.Methods)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + if this.Mixins != nil { + s = append(s, "Mixins: "+fmt.Sprintf("%#v", this.Mixins)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Method) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Method{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "RequestTypeUrl: "+fmt.Sprintf("%#v", this.RequestTypeUrl)+",\n") + s = append(s, "RequestStreaming: "+fmt.Sprintf("%#v", this.RequestStreaming)+",\n") + s = append(s, "ResponseTypeUrl: "+fmt.Sprintf("%#v", this.ResponseTypeUrl)+",\n") + s = append(s, "ResponseStreaming: "+fmt.Sprintf("%#v", this.ResponseStreaming)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Mixin) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Mixin{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Root: "+fmt.Sprintf("%#v", this.Root)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringApi(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Api) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Api) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Api) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Mixins) > 0 { + for iNdEx := len(m.Mixins) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mixins[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Methods) > 0 { + for iNdEx := len(m.Methods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Methods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Method) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Method) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Method) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ResponseStreaming { + i-- + if m.ResponseStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ResponseTypeUrl) > 0 { + i -= len(m.ResponseTypeUrl) + copy(dAtA[i:], m.ResponseTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResponseTypeUrl))) + i-- + dAtA[i] = 0x22 + } + if m.RequestStreaming { + i-- + if m.RequestStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.RequestTypeUrl) > 0 { + i -= len(m.RequestTypeUrl) + copy(dAtA[i:], m.RequestTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Mixin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mixin) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mixin) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintApi(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedApi(r randyApi, easy bool) *Api { + this := &Api{} + this.Name = string(randStringApi(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Methods = make([]*Method, v1) + for i := 0; i < v1; i++ { + this.Methods[i] = NewPopulatedMethod(r, easy) + } + } + if r.Intn(5) != 0 { + v2 := r.Intn(5) + this.Options = make([]*Option, v2) + for i := 0; i < v2; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Version = string(randStringApi(r)) + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Mixins = make([]*Mixin, v3) + for i := 0; i < v3; i++ { + this.Mixins[i] = NewPopulatedMixin(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMethod(r randyApi, easy bool) *Method { + this := &Method{} + this.Name = string(randStringApi(r)) + this.RequestTypeUrl = string(randStringApi(r)) + this.RequestStreaming = bool(bool(r.Intn(2) == 0)) + this.ResponseTypeUrl = string(randStringApi(r)) + this.ResponseStreaming = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMixin(r randyApi, easy bool) *Mixin { + this := &Mixin{} + this.Name = string(randStringApi(r)) + this.Root = string(randStringApi(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 3) + } + return this +} + +type randyApi interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneApi(r randyApi) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringApi(r randyApi) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneApi(r) + } + return string(tmps) +} +func randUnrecognizedApi(r randyApi, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldApi(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldApi(dAtA []byte, r randyApi, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateApi(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateApi(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateApi(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Api) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Methods) > 0 { + for _, e := range m.Methods { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Mixins) > 0 { + for _, e := range m.Mixins { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Method) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RequestTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RequestStreaming { + n += 2 + } + l = len(m.ResponseTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ResponseStreaming { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Mixin) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovApi(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Api) String() string { + if this == nil { + return "nil" + } + repeatedStringForMethods := "[]*Method{" + for _, f := range this.Methods { + repeatedStringForMethods += strings.Replace(f.String(), "Method", "Method", 1) + "," + } + repeatedStringForMethods += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + repeatedStringForMixins := "[]*Mixin{" + for _, f := range this.Mixins { + repeatedStringForMixins += strings.Replace(f.String(), "Mixin", "Mixin", 1) + "," + } + repeatedStringForMixins += "}" + s := strings.Join([]string{`&Api{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Methods:` + repeatedStringForMethods + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Mixins:` + repeatedStringForMixins + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Method) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Method{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RequestTypeUrl:` + fmt.Sprintf("%v", this.RequestTypeUrl) + `,`, + `RequestStreaming:` + fmt.Sprintf("%v", this.RequestStreaming) + `,`, + `ResponseTypeUrl:` + fmt.Sprintf("%v", this.ResponseTypeUrl) + `,`, + `ResponseStreaming:` + fmt.Sprintf("%v", this.ResponseStreaming) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Mixin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mixin{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Root:` + fmt.Sprintf("%v", this.Root) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Api) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Api: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Api: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Methods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Methods = append(m.Methods, &Method{}) + if err := m.Methods[len(m.Methods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mixins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mixins = append(m.Mixins, &Mixin{}) + if err := m.Mixins[len(m.Mixins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Method) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Method: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Method: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequestStreaming = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResponseTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ResponseStreaming = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mixin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mixin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mixin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthApi + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupApi + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/doc.go b/vendor/github.com/gogo/protobuf/types/doc.go new file mode 100644 index 00000000..ff2810af --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package types contains code for interacting with well-known types. +*/ +package types diff --git a/vendor/github.com/gogo/protobuf/types/duration.go b/vendor/github.com/gogo/protobuf/types/duration.go new file mode 100644 index 00000000..979b8e78 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func DurationFromProto(p *Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) * time.Nanosecond + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func DurationProto(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/gogo/protobuf/types/duration.pb.go new file mode 100644 index 00000000..4deafcb1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -0,0 +1,517 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Duration) XXX_MessageName() string { + return "google.protobuf.Duration" +} +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } + +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c, + 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, + 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, + 0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b, + 0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00, + 0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, + 0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00, + 0x00, +} + +func (this *Duration) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Duration) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Duration) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Duration{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDuration(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { + offset -= sovDuration(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovDuration(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovDuration(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDuration(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDuration(x uint64) (n int) { + return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDuration(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDuration(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDuration + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDuration + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDuration + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDuration = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/duration_gogo.go b/vendor/github.com/gogo/protobuf/types/duration_gogo.go new file mode 100644 index 00000000..90e7670e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration_gogo.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "fmt" + "time" +) + +func NewPopulatedDuration(r interface { + Int63() int64 +}, easy bool) *Duration { + this := &Duration{} + maxSecs := time.Hour.Nanoseconds() / 1e9 + max := 2 * maxSecs + s := int64(r.Int63()) % max + s -= maxSecs + neg := int64(1) + if s < 0 { + neg = -1 + } + this.Seconds = s + this.Nanos = int32(neg * (r.Int63() % 1e9)) + return this +} + +func (d *Duration) String() string { + td, err := DurationFromProto(d) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return td.String() +} + +func NewPopulatedStdDuration(r interface { + Int63() int64 +}, easy bool) *time.Duration { + dur := NewPopulatedDuration(r, easy) + d, err := DurationFromProto(dur) + if err != nil { + return nil + } + return &d +} + +func SizeOfStdDuration(d time.Duration) int { + dur := DurationProto(d) + return dur.Size() +} + +func StdDurationMarshal(d time.Duration) ([]byte, error) { + size := SizeOfStdDuration(d) + buf := make([]byte, size) + _, err := StdDurationMarshalTo(d, buf) + return buf, err +} + +func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) { + dur := DurationProto(d) + return dur.MarshalTo(data) +} + +func StdDurationUnmarshal(d *time.Duration, data []byte) error { + dur := &Duration{} + if err := dur.Unmarshal(data); err != nil { + return err + } + dd, err := DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/gogo/protobuf/types/empty.pb.go new file mode 100644 index 00000000..9e94748b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -0,0 +1,462 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_900544acb223d5b8, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return m.Size() +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func (*Empty) XXX_MessageName() string { + return "google.protobuf.Empty" +} +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) } + +var fileDescriptor_900544acb223d5b8 = []byte{ + // 176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, + 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, + 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, + 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05, + 0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, + 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd, + 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, + 0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00, +} + +func (this *Empty) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Empty) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Empty) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&types.Empty{") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEmpty(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { + offset -= sovEmpty(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { + this := &Empty{} + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedEmpty(r, 1) + } + return this +} + +type randyEmpty interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneEmpty(r randyEmpty) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringEmpty(r randyEmpty) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneEmpty(r) + } + return string(tmps) +} +func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Empty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEmpty(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEmpty(x uint64) (n int) { + return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringEmpty(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEmpty + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEmpty(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEmpty(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEmpty + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEmpty + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEmpty + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEmpty = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go new file mode 100644 index 00000000..6ae346d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -0,0 +1,738 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_5158202634f0da48, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(m, src) +} +func (m *FieldMask) XXX_Size() int { + return m.Size() +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func (*FieldMask) XXX_MessageName() string { + return "google.protobuf.FieldMask" +} +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) } + +var fileDescriptor_5158202634f0da48 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c, + 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, + 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, + 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7, + 0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50, + 0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee, + 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72, + 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00, +} + +func (this *FieldMask) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Paths) != len(that1.Paths) { + if len(this.Paths) < len(that1.Paths) { + return -1 + } + return 1 + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + if this.Paths[i] < that1.Paths[i] { + return -1 + } + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FieldMask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Paths) != len(that1.Paths) { + return false + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FieldMask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FieldMask{") + s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFieldMask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FieldMask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { + offset -= sovFieldMask(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { + this := &FieldMask{} + v1 := r.Intn(10) + this.Paths = make([]string, v1) + for i := 0; i < v1; i++ { + this.Paths[i] = string(randStringFieldMask(r)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2) + } + return this +} + +type randyFieldMask interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneFieldMask(r randyFieldMask) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringFieldMask(r randyFieldMask) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneFieldMask(r) + } + return string(tmps) +} +func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FieldMask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovFieldMask(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovFieldMask(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFieldMask(x uint64) (n int) { + return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FieldMask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldMask{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringFieldMask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FieldMask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldMask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFieldMask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFieldMask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFieldMask(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFieldMask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFieldMask + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFieldMask + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFieldMask + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFieldMask = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/protosize.go b/vendor/github.com/gogo/protobuf/types/protosize.go new file mode 100644 index 00000000..3a2d1b7e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/protosize.go @@ -0,0 +1,34 @@ +package types + +func (m *Any) ProtoSize() (n int) { return m.Size() } +func (m *Api) ProtoSize() (n int) { return m.Size() } +func (m *Method) ProtoSize() (n int) { return m.Size() } +func (m *Mixin) ProtoSize() (n int) { return m.Size() } +func (m *Duration) ProtoSize() (n int) { return m.Size() } +func (m *Empty) ProtoSize() (n int) { return m.Size() } +func (m *FieldMask) ProtoSize() (n int) { return m.Size() } +func (m *SourceContext) ProtoSize() (n int) { return m.Size() } +func (m *Struct) ProtoSize() (n int) { return m.Size() } +func (m *Value) ProtoSize() (n int) { return m.Size() } +func (m *Value_NullValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_NumberValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StringValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StructValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_ListValue) ProtoSize() (n int) { return m.Size() } +func (m *ListValue) ProtoSize() (n int) { return m.Size() } +func (m *Timestamp) ProtoSize() (n int) { return m.Size() } +func (m *Type) ProtoSize() (n int) { return m.Size() } +func (m *Field) ProtoSize() (n int) { return m.Size() } +func (m *Enum) ProtoSize() (n int) { return m.Size() } +func (m *EnumValue) ProtoSize() (n int) { return m.Size() } +func (m *Option) ProtoSize() (n int) { return m.Size() } +func (m *DoubleValue) ProtoSize() (n int) { return m.Size() } +func (m *FloatValue) ProtoSize() (n int) { return m.Size() } +func (m *Int64Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt64Value) ProtoSize() (n int) { return m.Size() } +func (m *Int32Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt32Value) ProtoSize() (n int) { return m.Size() } +func (m *BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *StringValue) ProtoSize() (n int) { return m.Size() } +func (m *BytesValue) ProtoSize() (n int) { return m.Size() } diff --git a/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/vendor/github.com/gogo/protobuf/types/source_context.pb.go new file mode 100644 index 00000000..8e6ce71b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/source_context.pb.go @@ -0,0 +1,524 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { + return fileDescriptor_b686cdb126d509db, []int{0} +} +func (m *SourceContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SourceContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceContext.Merge(m, src) +} +func (m *SourceContext) XXX_Size() int { + return m.Size() +} +func (m *SourceContext) XXX_DiscardUnknown() { + xxx_messageInfo_SourceContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceContext proto.InternalMessageInfo + +func (m *SourceContext) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func (*SourceContext) XXX_MessageName() string { + return "google.protobuf.SourceContext" +} +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} + +func init() { + proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db) +} + +var fileDescriptor_b686cdb126d509db = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, + 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63, + 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, + 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, + 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39, + 0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac, + 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, + 0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, + 0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1, + 0xf9, 0x00, 0x00, 0x00, +} + +func (this *SourceContext) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.FileName != that1.FileName { + if this.FileName < that1.FileName { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *SourceContext) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FileName != that1.FileName { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *SourceContext) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.SourceContext{") + s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSourceContext(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SourceContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FileName) > 0 { + i -= len(m.FileName) + copy(dAtA[i:], m.FileName) + i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int { + offset -= sovSourceContext(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext { + this := &SourceContext{} + this.FileName = string(randStringSourceContext(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2) + } + return this +} + +type randySourceContext interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneSourceContext(r randySourceContext) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringSourceContext(r randySourceContext) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneSourceContext(r) + } + return string(tmps) +} +func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *SourceContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FileName) + if l > 0 { + n += 1 + l + sovSourceContext(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSourceContext(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSourceContext(x uint64) (n int) { + return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SourceContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceContext{`, + `FileName:` + fmt.Sprintf("%v", this.FileName) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringSourceContext(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SourceContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSourceContext + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSourceContext + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSourceContext(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSourceContext(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSourceContext + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSourceContext + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSourceContext + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSourceContext = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/gogo/protobuf/types/struct.pb.go new file mode 100644 index 00000000..c0457312 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -0,0 +1,2271 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return m.Size() +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +func (*Struct) XXX_MessageName() string { + return "google.protobuf.Struct" +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return m.Size() +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int + Compare(interface{}) int +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof" json:"null_value,omitempty"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof" json:"number_value,omitempty"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof" json:"struct_value,omitempty"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof" json:"list_value,omitempty"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func (*Value) XXX_MessageName() string { + return "google.protobuf.Value" +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return m.Size() +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func (*ListValue) XXX_MessageName() string { + return "google.protobuf.ListValue" +} +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 443 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40, + 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0xa8, 0x54, 0x87, 0x04, 0x51, 0x41, 0x47, 0x94, 0x2e, + 0x11, 0x42, 0xae, 0x14, 0x16, 0x44, 0x58, 0x88, 0x54, 0x5a, 0x89, 0xa8, 0x32, 0x86, 0x16, 0x89, + 0x25, 0xc2, 0xae, 0x1b, 0x59, 0xbd, 0xde, 0x55, 0xf6, 0x1d, 0x28, 0x1b, 0x0b, 0xff, 0x03, 0x33, + 0x13, 0x62, 0xe4, 0xaf, 0xe8, 0xc8, 0xc8, 0x48, 0xdc, 0x85, 0xb1, 0x63, 0x47, 0x74, 0x77, 0xb6, + 0x41, 0x8d, 0xb2, 0xf9, 0x7d, 0xf7, 0x7b, 0xdf, 0x7b, 0xdf, 0x33, 0xde, 0x9f, 0x09, 0x31, 0x63, + 0xe9, 0xf6, 0x59, 0x2e, 0xa4, 0x88, 0xd5, 0xf1, 0x76, 0x21, 0x73, 0x95, 0xc8, 0xc0, 0xd4, 0xe4, + 0x96, 0x7d, 0x0d, 0xea, 0xd7, 0xfe, 0x17, 0x40, 0xef, 0xb5, 0x21, 0xc8, 0x08, 0xbd, 0xe3, 0x2c, + 0x65, 0x47, 0x45, 0x17, 0x7a, 0xad, 0x81, 0x3f, 0xdc, 0x0a, 0xae, 0xc1, 0x81, 0x05, 0x83, 0x17, + 0x86, 0xda, 0xe1, 0x32, 0x9f, 0x47, 0x55, 0xcb, 0xe6, 0x2b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0xb6, + 0x4e, 0xd2, 0x79, 0x17, 0x7a, 0x30, 0xe8, 0x44, 0xfa, 0x93, 0x3c, 0xc2, 0xf6, 0x87, 0xf7, 0x4c, + 0xa5, 0x5d, 0xb7, 0x07, 0x03, 0x7f, 0x78, 0x67, 0xc9, 0xfc, 0x50, 0xbf, 0x46, 0x16, 0x7a, 0xea, + 0x3e, 0x81, 0xfe, 0x0f, 0x17, 0xdb, 0x46, 0x24, 0x23, 0x44, 0xae, 0x18, 0x9b, 0x5a, 0x03, 0x6d, + 0xba, 0x3e, 0xdc, 0x5c, 0x32, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x3a, 0xbc, 0x2e, 0xc8, + 0x16, 0xde, 0xe4, 0xea, 0x34, 0x4e, 0xf3, 0xe9, 0xbf, 0xf9, 0xb0, 0xe7, 0x44, 0xbe, 0x55, 0x1b, + 0xa8, 0x90, 0x79, 0xc6, 0x67, 0x15, 0xd4, 0xd2, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0x7a, 0x80, 0x18, + 0x0b, 0x51, 0xaf, 0xb1, 0xd6, 0x83, 0xc1, 0x0d, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x66, 0x5c, 0x54, + 0x22, 0x2b, 0xa4, 0x6d, 0xa2, 0xde, 0x5d, 0x71, 0xc7, 0xca, 0x5e, 0x25, 0xb2, 0x49, 0xc9, 0xb2, + 0xa2, 0xee, 0xf5, 0x4c, 0xef, 0x72, 0xca, 0x49, 0x56, 0xc8, 0x26, 0x25, 0xab, 0x8b, 0xb1, 0x87, + 0x6b, 0x27, 0x19, 0x3f, 0xea, 0x8f, 0xb0, 0xd3, 0x10, 0x24, 0x40, 0xcf, 0x98, 0xd5, 0x7f, 0x74, + 0xd5, 0xd1, 0x2b, 0xea, 0xe1, 0x3d, 0xec, 0x34, 0x47, 0x24, 0xeb, 0x88, 0xfb, 0x07, 0x93, 0xc9, + 0xf4, 0xf0, 0xf9, 0xe4, 0x60, 0x67, 0xc3, 0x19, 0x7f, 0x86, 0x5f, 0x0b, 0xea, 0x5c, 0x2e, 0x28, + 0x5c, 0x2d, 0x28, 0x7c, 0x2a, 0x29, 0x7c, 0x2b, 0x29, 0x9c, 0x97, 0x14, 0x7e, 0x96, 0x14, 0x7e, + 0x97, 0x14, 0xfe, 0x94, 0xd4, 0xb9, 0xd4, 0xfa, 0x05, 0x85, 0xf3, 0x0b, 0x0a, 0x78, 0x3b, 0x11, + 0xa7, 0xd7, 0x47, 0x8e, 0x7d, 0x9b, 0x3e, 0xd4, 0x75, 0x08, 0xef, 0xda, 0x72, 0x7e, 0x96, 0x16, + 0x57, 0x00, 0x5f, 0xdd, 0xd6, 0x6e, 0x38, 0xfe, 0xee, 0xd2, 0x5d, 0xdb, 0x10, 0xd6, 0x3b, 0xbe, + 0x4d, 0x19, 0x7b, 0xc9, 0xc5, 0x47, 0xfe, 0x46, 0x93, 0xb1, 0x67, 0x9c, 0x1e, 0xff, 0x0d, 0x00, + 0x00, 0xff, 0xff, 0x26, 0x30, 0xdb, 0xbe, 0xe9, 0x02, 0x00, 0x00, +} + +func (this *Struct) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if that1.Kind == nil { + if this.Kind != nil { + return 1 + } + } else if this.Kind == nil { + return -1 + } else { + thisType := -1 + switch this.Kind.(type) { + case *Value_NullValue: + thisType = 0 + case *Value_NumberValue: + thisType = 1 + case *Value_StringValue: + thisType = 2 + case *Value_BoolValue: + thisType = 3 + case *Value_StructValue: + thisType = 4 + case *Value_ListValue: + thisType = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.Kind)) + } + that1Type := -1 + switch that1.Kind.(type) { + case *Value_NullValue: + that1Type = 0 + case *Value_NumberValue: + that1Type = 1 + case *Value_StringValue: + that1Type = 2 + case *Value_BoolValue: + that1Type = 3 + case *Value_StructValue: + that1Type = 4 + case *Value_ListValue: + that1Type = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.Kind)) + } + if thisType == that1Type { + if c := this.Kind.Compare(that1.Kind); c != 0 { + return c + } + } else if thisType < that1Type { + return -1 + } else if thisType > that1Type { + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value_NullValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NullValue != that1.NullValue { + if this.NullValue < that1.NullValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_NumberValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NumberValue != that1.NumberValue { + if this.NumberValue < that1.NumberValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.StringValue != that1.StringValue { + if this.StringValue < that1.StringValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.BoolValue != that1.BoolValue { + if !this.BoolValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StructValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.StructValue.Compare(that1.StructValue); c != 0 { + return c + } + return 0 +} +func (this *Value_ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.ListValue.Compare(that1.ListValue); c != 0 { + return c + } + return 0 +} +func (this *ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Values) != len(that1.Values) { + if len(this.Values) < len(that1.Values) { + return -1 + } + return 1 + } + for i := range this.Values { + if c := this.Values[i].Compare(that1.Values[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x NullValue) String() string { + s, ok := NullValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Struct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value_NullValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NullValue != that1.NullValue { + return false + } + return true +} +func (this *Value_NumberValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NumberValue != that1.NumberValue { + return false + } + return true +} +func (this *Value_StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StringValue != that1.StringValue { + return false + } + return true +} +func (this *Value_BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BoolValue != that1.BoolValue { + return false + } + return true +} +func (this *Value_StructValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StructValue.Equal(that1.StructValue) { + return false + } + return true +} +func (this *Value_ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListValue.Equal(that1.ListValue) { + return false + } + return true +} +func (this *ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Struct) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Struct{") + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) + } + mapStringForFields += "}" + if this.Fields != nil { + s = append(s, "Fields: "+mapStringForFields+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Value{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_NullValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NullValue{` + + `NullValue:` + fmt.Sprintf("%#v", this.NullValue) + `}`}, ", ") + return s +} +func (this *Value_NumberValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NumberValue{` + + `NumberValue:` + fmt.Sprintf("%#v", this.NumberValue) + `}`}, ", ") + return s +} +func (this *Value_StringValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StringValue{` + + `StringValue:` + fmt.Sprintf("%#v", this.StringValue) + `}`}, ", ") + return s +} +func (this *Value_BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_BoolValue{` + + `BoolValue:` + fmt.Sprintf("%#v", this.BoolValue) + `}`}, ", ") + return s +} +func (this *Value_StructValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StructValue{` + + `StructValue:` + fmt.Sprintf("%#v", this.StructValue) + `}`}, ", ") + return s +} +func (this *Value_ListValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_ListValue{` + + `ListValue:` + fmt.Sprintf("%#v", this.ListValue) + `}`}, ", ") + return s +} +func (this *ListValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.ListValue{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStruct(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Struct) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintStruct(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintStruct(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Kind != nil { + { + size := m.Kind.Size() + i -= size + if _, err := m.Kind.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintStruct(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + { + size, err := m.StructValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + { + size, err := m.ListValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { + offset -= sovStruct(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedStruct(r randyStruct, easy bool) *Struct { + this := &Struct{} + if r.Intn(5) == 0 { + v1 := r.Intn(10) + this.Fields = make(map[string]*Value) + for i := 0; i < v1; i++ { + this.Fields[randStringStruct(r)] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +func NewPopulatedValue(r randyStruct, easy bool) *Value { + this := &Value{} + oneofNumber_Kind := []int32{1, 2, 3, 4, 5, 6}[r.Intn(6)] + switch oneofNumber_Kind { + case 1: + this.Kind = NewPopulatedValue_NullValue(r, easy) + case 2: + this.Kind = NewPopulatedValue_NumberValue(r, easy) + case 3: + this.Kind = NewPopulatedValue_StringValue(r, easy) + case 4: + this.Kind = NewPopulatedValue_BoolValue(r, easy) + case 5: + this.Kind = NewPopulatedValue_StructValue(r, easy) + case 6: + this.Kind = NewPopulatedValue_ListValue(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 7) + } + return this +} + +func NewPopulatedValue_NullValue(r randyStruct, easy bool) *Value_NullValue { + this := &Value_NullValue{} + this.NullValue = NullValue([]int32{0}[r.Intn(1)]) + return this +} +func NewPopulatedValue_NumberValue(r randyStruct, easy bool) *Value_NumberValue { + this := &Value_NumberValue{} + this.NumberValue = float64(r.Float64()) + if r.Intn(2) == 0 { + this.NumberValue *= -1 + } + return this +} +func NewPopulatedValue_StringValue(r randyStruct, easy bool) *Value_StringValue { + this := &Value_StringValue{} + this.StringValue = string(randStringStruct(r)) + return this +} +func NewPopulatedValue_BoolValue(r randyStruct, easy bool) *Value_BoolValue { + this := &Value_BoolValue{} + this.BoolValue = bool(bool(r.Intn(2) == 0)) + return this +} +func NewPopulatedValue_StructValue(r randyStruct, easy bool) *Value_StructValue { + this := &Value_StructValue{} + this.StructValue = NewPopulatedStruct(r, easy) + return this +} +func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue { + this := &Value_ListValue{} + this.ListValue = NewPopulatedListValue(r, easy) + return this +} +func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { + this := &ListValue{} + if r.Intn(5) == 0 { + v2 := r.Intn(5) + this.Values = make([]*Value, v2) + for i := 0; i < v2; i++ { + this.Values[i] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +type randyStruct interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStruct(r randyStruct) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStruct(r randyStruct) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RuneStruct(r) + } + return string(tmps) +} +func randUnrecognizedStruct(r randyStruct, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStruct(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStruct(dAtA []byte, r randyStruct, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulateStruct(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStruct(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStruct(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Struct) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovStruct(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovStruct(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value_NullValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovStruct(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovStruct(uint64(l)) + return n +} +func (m *Value_BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StructValue != nil { + l = m.StructValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *Value_ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListValue != nil { + l = m.ListValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovStruct(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovStruct(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStruct(x uint64) (n int) { + return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Struct) String() string { + if this == nil { + return "nil" + } + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) + } + mapStringForFields += "}" + s := strings.Join([]string{`&Struct{`, + `Fields:` + mapStringForFields + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NullValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NullValue{`, + `NullValue:` + fmt.Sprintf("%v", this.NullValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NumberValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NumberValue{`, + `NumberValue:` + fmt.Sprintf("%v", this.NumberValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StringValue{`, + `StringValue:` + fmt.Sprintf("%v", this.StringValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_BoolValue{`, + `BoolValue:` + fmt.Sprintf("%v", this.BoolValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StructValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StructValue{`, + `StructValue:` + strings.Replace(fmt.Sprintf("%v", this.StructValue), "Struct", "Struct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_ListValue{`, + `ListValue:` + strings.Replace(fmt.Sprintf("%v", this.ListValue), "ListValue", "ListValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForValues := "[]*Value{" + for _, f := range this.Values { + repeatedStringForValues += strings.Replace(f.String(), "Value", "Value", 1) + "," + } + repeatedStringForValues += "}" + s := strings.Join([]string{`&ListValue{`, + `Values:` + repeatedStringForValues + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringStruct(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Struct) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*Value) + } + var mapkey string + var mapvalue *Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthStruct + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &Value_NullValue{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &Value_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &Value_BoolValue{b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Struct{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_StructValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ListValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_ListValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStruct(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStruct + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStruct + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStruct + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStruct = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.go b/vendor/github.com/gogo/protobuf/types/timestamp.go new file mode 100644 index 00000000..232ada57 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -0,0 +1,130 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func TimestampFromProto(ts *Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*Timestamp, error) { + ts := &Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *Timestamp) string { + t, err := TimestampFromProto(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go new file mode 100644 index 00000000..45db7b3b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -0,0 +1,539 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Timestamp) XXX_MessageName() string { + return "google.protobuf.Timestamp" +} +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } + +var fileDescriptor_292007bbfe81227e = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d, + 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, + 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, + 0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, + 0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90, + 0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, + 0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, + 0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd, + 0xfa, 0x00, 0x00, 0x00, +} + +func (this *Timestamp) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Timestamp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Timestamp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Timestamp{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTimestamp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { + offset -= sovTimestamp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovTimestamp(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovTimestamp(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTimestamp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTimestamp(x uint64) (n int) { + return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTimestamp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTimestamp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTimestamp + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTimestamp + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTimestamp + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTimestamp = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go new file mode 100644 index 00000000..e03fa131 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go @@ -0,0 +1,94 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "time" +) + +func NewPopulatedTimestamp(r interface { + Int63() int64 +}, easy bool) *Timestamp { + this := &Timestamp{} + ns := int64(r.Int63()) + this.Seconds = ns / 1e9 + this.Nanos = int32(ns % 1e9) + return this +} + +func (ts *Timestamp) String() string { + return TimestampString(ts) +} + +func NewPopulatedStdTime(r interface { + Int63() int64 +}, easy bool) *time.Time { + timestamp := NewPopulatedTimestamp(r, easy) + t, err := TimestampFromProto(timestamp) + if err != nil { + return nil + } + return &t +} + +func SizeOfStdTime(t time.Time) int { + ts, err := TimestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshal(t time.Time) ([]byte, error) { + size := SizeOfStdTime(t) + buf := make([]byte, size) + _, err := StdTimeMarshalTo(t, buf) + return buf, err +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := TimestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/type.pb.go b/vendor/github.com/gogo/protobuf/types/type.pb.go new file mode 100644 index 00000000..791427bb --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/type.pb.go @@ -0,0 +1,3355 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/type.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} + +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (Syntax) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} + +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs,proto3" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Type) Reset() { *m = Type{} } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} +func (m *Type) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Type) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Type.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Type) XXX_Merge(src proto.Message) { + xxx_messageInfo_Type.Merge(m, src) +} +func (m *Type) XXX_Size() int { + return m.Size() +} +func (m *Type) XXX_DiscardUnknown() { + xxx_messageInfo_Type.DiscardUnknown(m) +} + +var xxx_messageInfo_Type proto.InternalMessageInfo + +func (m *Type) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOneofs() []string { + if m != nil { + return m.Oneofs + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Type) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Type) XXX_MessageName() string { + return "google.protobuf.Type" +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1} +} +func (m *Field) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Field) XXX_Merge(src proto.Message) { + xxx_messageInfo_Field.Merge(m, src) +} +func (m *Field) XXX_Size() int { + return m.Size() +} +func (m *Field) XXX_DiscardUnknown() { + xxx_messageInfo_Field.DiscardUnknown(m) +} + +var xxx_messageInfo_Field proto.InternalMessageInfo + +func (m *Field) GetKind() Field_Kind { + if m != nil { + return m.Kind + } + return Field_TYPE_UNKNOWN +} + +func (m *Field) GetCardinality() Field_Cardinality { + if m != nil { + return m.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (m *Field) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Field) GetOneofIndex() int32 { + if m != nil { + return m.OneofIndex + } + return 0 +} + +func (m *Field) GetPacked() bool { + if m != nil { + return m.Packed + } + return false +} + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Field) GetJsonName() string { + if m != nil { + return m.JsonName + } + return "" +} + +func (m *Field) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +func (*Field) XXX_MessageName() string { + return "google.protobuf.Field" +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue,proto3" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{2} +} +func (m *Enum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Enum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Enum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Enum) XXX_Merge(src proto.Message) { + xxx_messageInfo_Enum.Merge(m, src) +} +func (m *Enum) XXX_Size() int { + return m.Size() +} +func (m *Enum) XXX_DiscardUnknown() { + xxx_messageInfo_Enum.DiscardUnknown(m) +} + +var xxx_messageInfo_Enum proto.InternalMessageInfo + +func (m *Enum) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Enum) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Enum) XXX_MessageName() string { + return "google.protobuf.Enum" +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{3} +} +func (m *EnumValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnumValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnumValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnumValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValue.Merge(m, src) +} +func (m *EnumValue) XXX_Size() int { + return m.Size() +} +func (m *EnumValue) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValue.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValue proto.InternalMessageInfo + +func (m *EnumValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnumValue) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (*EnumValue) XXX_MessageName() string { + return "google.protobuf.EnumValue" +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Option) Reset() { *m = Option{} } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{4} +} +func (m *Option) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Option) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Option.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Option) XXX_Merge(src proto.Message) { + xxx_messageInfo_Option.Merge(m, src) +} +func (m *Option) XXX_Size() int { + return m.Size() +} +func (m *Option) XXX_DiscardUnknown() { + xxx_messageInfo_Option.DiscardUnknown(m) +} + +var xxx_messageInfo_Option proto.InternalMessageInfo + +func (m *Option) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Option) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +func (*Option) XXX_MessageName() string { + return "google.protobuf.Option" +} +func init() { + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") +} + +func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor_dd271cc1e348c538) } + +var fileDescriptor_dd271cc1e348c538 = []byte{ + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x73, 0xda, 0x46, + 0x14, 0xf6, 0x0a, 0x21, 0xa3, 0x87, 0xc1, 0x9b, 0x4d, 0x26, 0x51, 0x9c, 0x19, 0x95, 0xa1, 0x3d, + 0x30, 0x39, 0xe0, 0x29, 0x78, 0x3c, 0xbd, 0x82, 0x91, 0x29, 0x63, 0x22, 0xa9, 0x8b, 0x68, 0xe2, + 0x5e, 0x18, 0x0c, 0x72, 0x86, 0x44, 0xac, 0x18, 0x24, 0x5a, 0x73, 0xeb, 0x4c, 0xcf, 0xfd, 0x27, + 0x7a, 0xea, 0xf4, 0xdc, 0x3f, 0xc2, 0xc7, 0x1e, 0x7b, 0xac, 0xc9, 0xa5, 0xc7, 0x1c, 0x73, 0x6b, + 0x67, 0x57, 0x20, 0x8b, 0x1f, 0x9d, 0x49, 0xdb, 0x1b, 0xef, 0xfb, 0xbe, 0xf7, 0x73, 0x9f, 0x1e, + 0x70, 0xf4, 0xda, 0xf7, 0x5f, 0x7b, 0xee, 0xf1, 0x64, 0xea, 0x87, 0xfe, 0xd5, 0xec, 0xfa, 0x38, + 0x9c, 0x4f, 0xdc, 0xb2, 0xb0, 0xc8, 0x61, 0xc4, 0x95, 0x57, 0xdc, 0xd1, 0xd3, 0x4d, 0x71, 0x9f, + 0xcd, 0x23, 0xf6, 0xe8, 0xb3, 0x4d, 0x2a, 0xf0, 0x67, 0xd3, 0x81, 0xdb, 0x1b, 0xf8, 0x2c, 0x74, + 0x6f, 0xc2, 0x48, 0x55, 0xfc, 0x51, 0x02, 0xd9, 0x99, 0x4f, 0x5c, 0x42, 0x40, 0x66, 0xfd, 0xb1, + 0xab, 0xa1, 0x02, 0x2a, 0xa9, 0x54, 0xfc, 0x26, 0x65, 0x50, 0xae, 0x47, 0xae, 0x37, 0x0c, 0x34, + 0xa9, 0x90, 0x2a, 0x65, 0x2b, 0x8f, 0xcb, 0x1b, 0xf9, 0xcb, 0xe7, 0x9c, 0xa6, 0x4b, 0x15, 0x79, + 0x0c, 0x8a, 0xcf, 0x5c, 0xff, 0x3a, 0xd0, 0x52, 0x85, 0x54, 0x49, 0xa5, 0x4b, 0x8b, 0x7c, 0x0e, + 0xfb, 0xfe, 0x24, 0x1c, 0xf9, 0x2c, 0xd0, 0x64, 0x11, 0xe8, 0xc9, 0x56, 0x20, 0x4b, 0xf0, 0x74, + 0xa5, 0x23, 0x06, 0xe4, 0xd7, 0xeb, 0xd5, 0xd2, 0x05, 0x54, 0xca, 0x56, 0xf4, 0x2d, 0xcf, 0x8e, + 0x90, 0x9d, 0x45, 0x2a, 0x9a, 0x0b, 0x92, 0x26, 0x39, 0x06, 0x25, 0x98, 0xb3, 0xb0, 0x7f, 0xa3, + 0x29, 0x05, 0x54, 0xca, 0xef, 0x48, 0xdc, 0x11, 0x34, 0x5d, 0xca, 0x8a, 0xbf, 0x2a, 0x90, 0x16, + 0x4d, 0x91, 0x63, 0x90, 0xdf, 0x8e, 0xd8, 0x50, 0x0c, 0x24, 0x5f, 0x79, 0xb6, 0xbb, 0xf5, 0xf2, + 0xc5, 0x88, 0x0d, 0xa9, 0x10, 0x92, 0x06, 0x64, 0x07, 0xfd, 0xe9, 0x70, 0xc4, 0xfa, 0xde, 0x28, + 0x9c, 0x6b, 0x92, 0xf0, 0x2b, 0xfe, 0x83, 0xdf, 0xd9, 0xbd, 0x92, 0x26, 0xdd, 0xf8, 0x0c, 0xd9, + 0x6c, 0x7c, 0xe5, 0x4e, 0xb5, 0x54, 0x01, 0x95, 0xd2, 0x74, 0x69, 0xc5, 0xef, 0x23, 0x27, 0xde, + 0xe7, 0x29, 0x64, 0xf8, 0x72, 0xf4, 0x66, 0x53, 0x4f, 0xf4, 0xa7, 0xd2, 0x7d, 0x6e, 0x77, 0xa7, + 0x1e, 0xf9, 0x04, 0xb2, 0x62, 0xf8, 0xbd, 0x11, 0x1b, 0xba, 0x37, 0xda, 0xbe, 0x88, 0x05, 0x02, + 0x6a, 0x71, 0x84, 0xe7, 0x99, 0xf4, 0x07, 0x6f, 0xdd, 0xa1, 0x96, 0x29, 0xa0, 0x52, 0x86, 0x2e, + 0xad, 0xe4, 0x5b, 0xa9, 0x1f, 0xf9, 0x56, 0xcf, 0x40, 0x7d, 0x13, 0xf8, 0xac, 0x27, 0xea, 0x03, + 0x51, 0x47, 0x86, 0x03, 0x26, 0xaf, 0xf1, 0x53, 0xc8, 0x0d, 0xdd, 0xeb, 0xfe, 0xcc, 0x0b, 0x7b, + 0xdf, 0xf6, 0xbd, 0x99, 0xab, 0x65, 0x85, 0xe0, 0x60, 0x09, 0x7e, 0xcd, 0xb1, 0xe2, 0xad, 0x04, + 0x32, 0x9f, 0x24, 0xc1, 0x70, 0xe0, 0x5c, 0xda, 0x46, 0xaf, 0x6b, 0x5e, 0x98, 0xd6, 0x4b, 0x13, + 0xef, 0x91, 0x43, 0xc8, 0x0a, 0xa4, 0x61, 0x75, 0xeb, 0x6d, 0x03, 0x23, 0x92, 0x07, 0x10, 0xc0, + 0x79, 0xdb, 0xaa, 0x39, 0x58, 0x8a, 0xed, 0x96, 0xe9, 0x9c, 0x9e, 0xe0, 0x54, 0xec, 0xd0, 0x8d, + 0x00, 0x39, 0x29, 0xa8, 0x56, 0x70, 0x3a, 0xce, 0x71, 0xde, 0x7a, 0x65, 0x34, 0x4e, 0x4f, 0xb0, + 0xb2, 0x8e, 0x54, 0x2b, 0x78, 0x9f, 0xe4, 0x40, 0x15, 0x48, 0xdd, 0xb2, 0xda, 0x38, 0x13, 0xc7, + 0xec, 0x38, 0xb4, 0x65, 0x36, 0xb1, 0x1a, 0xc7, 0x6c, 0x52, 0xab, 0x6b, 0x63, 0x88, 0x23, 0xbc, + 0x30, 0x3a, 0x9d, 0x5a, 0xd3, 0xc0, 0xd9, 0x58, 0x51, 0xbf, 0x74, 0x8c, 0x0e, 0x3e, 0x58, 0x2b, + 0xab, 0x5a, 0xc1, 0xb9, 0x38, 0x85, 0x61, 0x76, 0x5f, 0xe0, 0x3c, 0x79, 0x00, 0xb9, 0x28, 0xc5, + 0xaa, 0x88, 0xc3, 0x0d, 0xe8, 0xf4, 0x04, 0xe3, 0xfb, 0x42, 0xa2, 0x28, 0x0f, 0xd6, 0x80, 0xd3, + 0x13, 0x4c, 0x8a, 0x21, 0x64, 0x13, 0xbb, 0x45, 0x9e, 0xc0, 0xc3, 0xb3, 0x1a, 0x6d, 0xb4, 0xcc, + 0x5a, 0xbb, 0xe5, 0x5c, 0x26, 0xe6, 0xaa, 0xc1, 0xa3, 0x24, 0x61, 0xd9, 0x4e, 0xcb, 0x32, 0x6b, + 0x6d, 0x8c, 0x36, 0x19, 0x6a, 0x7c, 0xd5, 0x6d, 0x51, 0xa3, 0x81, 0xa5, 0x6d, 0xc6, 0x36, 0x6a, + 0x8e, 0xd1, 0xc0, 0xa9, 0xe2, 0x5f, 0x08, 0x64, 0x83, 0xcd, 0xc6, 0x3b, 0xcf, 0xc8, 0x17, 0xa0, + 0xba, 0x6c, 0x36, 0x8e, 0x9e, 0x3f, 0xba, 0x24, 0x47, 0x5b, 0x4b, 0xc5, 0xbd, 0xc5, 0x32, 0xd0, + 0x7b, 0x71, 0x72, 0x19, 0x53, 0xff, 0xf9, 0x70, 0xc8, 0xff, 0xef, 0x70, 0xa4, 0x3f, 0xee, 0x70, + 0xbc, 0x01, 0x35, 0x6e, 0x61, 0xe7, 0x14, 0xee, 0x3f, 0x6c, 0x69, 0xed, 0xc3, 0xfe, 0xf7, 0x3d, + 0x16, 0xbf, 0x04, 0x25, 0x82, 0x76, 0x26, 0x7a, 0x0e, 0xe9, 0xd5, 0xa8, 0x79, 0xe3, 0x8f, 0xb6, + 0xc2, 0xd5, 0xd8, 0x9c, 0x46, 0x92, 0xe7, 0x65, 0x50, 0xa2, 0x3e, 0xf8, 0xb2, 0x75, 0x2e, 0x4d, + 0xa7, 0xf6, 0xaa, 0x67, 0x53, 0xcb, 0xb1, 0x2a, 0x78, 0x6f, 0x13, 0xaa, 0x62, 0x54, 0xff, 0x01, + 0xfd, 0x7e, 0xa7, 0xef, 0xbd, 0xbf, 0xd3, 0xd1, 0x87, 0x3b, 0x1d, 0x7d, 0xbf, 0xd0, 0xd1, 0xcf, + 0x0b, 0x1d, 0xdd, 0x2e, 0x74, 0xf4, 0xdb, 0x42, 0x47, 0x7f, 0x2c, 0x74, 0xf4, 0xe7, 0x42, 0xdf, + 0x7b, 0xcf, 0xf1, 0x77, 0x3a, 0xba, 0x7d, 0xa7, 0x23, 0x78, 0x38, 0xf0, 0xc7, 0x9b, 0x25, 0xd4, + 0x55, 0xfe, 0x9f, 0x63, 0x73, 0xcb, 0x46, 0xdf, 0xa4, 0xf9, 0xd1, 0x0a, 0x3e, 0x20, 0xf4, 0x93, + 0x94, 0x6a, 0xda, 0xf5, 0x5f, 0x24, 0xbd, 0x19, 0xc9, 0xed, 0x55, 0xc5, 0x2f, 0x5d, 0xcf, 0xbb, + 0x60, 0xfe, 0x77, 0x8c, 0xbb, 0x05, 0x57, 0x8a, 0x88, 0x53, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, + 0xbc, 0x2a, 0x5e, 0x82, 0x2b, 0x07, 0x00, 0x00, +} + +func (this *Type) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + if len(this.Oneofs) < len(that1.Oneofs) { + return -1 + } + return 1 + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + if this.Oneofs[i] < that1.Oneofs[i] { + return -1 + } + return 1 + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Field) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Kind != that1.Kind { + if this.Kind < that1.Kind { + return -1 + } + return 1 + } + if this.Cardinality != that1.Cardinality { + if this.Cardinality < that1.Cardinality { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if this.OneofIndex != that1.OneofIndex { + if this.OneofIndex < that1.OneofIndex { + return -1 + } + return 1 + } + if this.Packed != that1.Packed { + if !this.Packed { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.JsonName != that1.JsonName { + if this.JsonName < that1.JsonName { + return -1 + } + return 1 + } + if this.DefaultValue != that1.DefaultValue { + if this.DefaultValue < that1.DefaultValue { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Enum) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + if len(this.Enumvalue) < len(that1.Enumvalue) { + return -1 + } + return 1 + } + for i := range this.Enumvalue { + if c := this.Enumvalue[i].Compare(that1.Enumvalue[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *EnumValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Option) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if c := this.Value.Compare(that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x Syntax) String() string { + s, ok := Syntax_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Kind) String() string { + s, ok := Field_Kind_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Cardinality) String() string { + s, ok := Field_Cardinality_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Type) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + return false + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Field) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.Cardinality != that1.Cardinality { + return false + } + if this.Number != that1.Number { + return false + } + if this.Name != that1.Name { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if this.OneofIndex != that1.OneofIndex { + return false + } + if this.Packed != that1.Packed { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.JsonName != that1.JsonName { + return false + } + if this.DefaultValue != that1.DefaultValue { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Enum) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + return false + } + for i := range this.Enumvalue { + if !this.Enumvalue[i].Equal(that1.Enumvalue[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *EnumValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Number != that1.Number { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Option) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Value.Equal(that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Type) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Type{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Fields != nil { + s = append(s, "Fields: "+fmt.Sprintf("%#v", this.Fields)+",\n") + } + s = append(s, "Oneofs: "+fmt.Sprintf("%#v", this.Oneofs)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Field) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&types.Field{") + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "OneofIndex: "+fmt.Sprintf("%#v", this.OneofIndex)+",\n") + s = append(s, "Packed: "+fmt.Sprintf("%#v", this.Packed)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "JsonName: "+fmt.Sprintf("%#v", this.JsonName)+",\n") + s = append(s, "DefaultValue: "+fmt.Sprintf("%#v", this.DefaultValue)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Enum) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&types.Enum{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Enumvalue != nil { + s = append(s, "Enumvalue: "+fmt.Sprintf("%#v", this.Enumvalue)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&types.EnumValue{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Option) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Option{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringType(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Type) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Type) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Type) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x30 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Oneofs) > 0 { + for iNdEx := len(m.Oneofs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Oneofs[iNdEx]) + copy(dAtA[i:], m.Oneofs[iNdEx]) + i = encodeVarintType(dAtA, i, uint64(len(m.Oneofs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x5a + } + if len(m.JsonName) > 0 { + i -= len(m.JsonName) + copy(dAtA[i:], m.JsonName) + i = encodeVarintType(dAtA, i, uint64(len(m.JsonName))) + i-- + dAtA[i] = 0x52 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.Packed { + i-- + if m.Packed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.OneofIndex != 0 { + i = encodeVarintType(dAtA, i, uint64(m.OneofIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x32 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x18 + } + if m.Cardinality != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Cardinality)) + i-- + dAtA[i] = 0x10 + } + if m.Kind != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Enum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Enum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Enum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x28 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Enumvalue) > 0 { + for iNdEx := len(m.Enumvalue) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enumvalue[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnumValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnumValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnumValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Option) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Option) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Option) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintType(dAtA []byte, offset int, v uint64) int { + offset -= sovType(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedType(r randyType, easy bool) *Type { + this := &Type{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Fields = make([]*Field, v1) + for i := 0; i < v1; i++ { + this.Fields[i] = NewPopulatedField(r, easy) + } + } + v2 := r.Intn(10) + this.Oneofs = make([]string, v2) + for i := 0; i < v2; i++ { + this.Oneofs[i] = string(randStringType(r)) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Options = make([]*Option, v3) + for i := 0; i < v3; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 7) + } + return this +} + +func NewPopulatedField(r randyType, easy bool) *Field { + this := &Field{} + this.Kind = Field_Kind([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}[r.Intn(19)]) + this.Cardinality = Field_Cardinality([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + this.Name = string(randStringType(r)) + this.TypeUrl = string(randStringType(r)) + this.OneofIndex = int32(r.Int31()) + if r.Intn(2) == 0 { + this.OneofIndex *= -1 + } + this.Packed = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.JsonName = string(randStringType(r)) + this.DefaultValue = string(randStringType(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 12) + } + return this +} + +func NewPopulatedEnum(r randyType, easy bool) *Enum { + this := &Enum{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v5 := r.Intn(5) + this.Enumvalue = make([]*EnumValue, v5) + for i := 0; i < v5; i++ { + this.Enumvalue[i] = NewPopulatedEnumValue(r, easy) + } + } + if r.Intn(5) != 0 { + v6 := r.Intn(5) + this.Options = make([]*Option, v6) + for i := 0; i < v6; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 6) + } + return this +} + +func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue { + this := &EnumValue{} + this.Name = string(randStringType(r)) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + if r.Intn(5) != 0 { + v7 := r.Intn(5) + this.Options = make([]*Option, v7) + for i := 0; i < v7; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 4) + } + return this +} + +func NewPopulatedOption(r randyType, easy bool) *Option { + this := &Option{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + this.Value = NewPopulatedAny(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 3) + } + return this +} + +type randyType interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneType(r randyType) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringType(r randyType) string { + v8 := r.Intn(100) + tmps := make([]rune, v8) + for i := 0; i < v8; i++ { + tmps[i] = randUTF8RuneType(r) + } + return string(tmps) +} +func randUnrecognizedType(r randyType, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldType(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldType(dAtA []byte, r randyType, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + v9 := r.Int63() + if r.Intn(2) == 0 { + v9 *= -1 + } + dAtA = encodeVarintPopulateType(dAtA, uint64(v9)) + case 1: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateType(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateType(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Type) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + l = len(s) + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Field) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovType(uint64(m.Kind)) + } + if m.Cardinality != 0 { + n += 1 + sovType(uint64(m.Cardinality)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.OneofIndex != 0 { + n += 1 + sovType(uint64(m.OneofIndex)) + } + if m.Packed { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + l = len(m.JsonName) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Enum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Enumvalue) > 0 { + for _, e := range m.Enumvalue { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnumValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Option) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovType(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozType(x uint64) (n int) { + return sovType(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Type) String() string { + if this == nil { + return "nil" + } + repeatedStringForFields := "[]*Field{" + for _, f := range this.Fields { + repeatedStringForFields += strings.Replace(f.String(), "Field", "Field", 1) + "," + } + repeatedStringForFields += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Type{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Fields:` + repeatedStringForFields + `,`, + `Oneofs:` + fmt.Sprintf("%v", this.Oneofs) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Field) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Field{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `OneofIndex:` + fmt.Sprintf("%v", this.OneofIndex) + `,`, + `Packed:` + fmt.Sprintf("%v", this.Packed) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `JsonName:` + fmt.Sprintf("%v", this.JsonName) + `,`, + `DefaultValue:` + fmt.Sprintf("%v", this.DefaultValue) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Enum) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnumvalue := "[]*EnumValue{" + for _, f := range this.Enumvalue { + repeatedStringForEnumvalue += strings.Replace(f.String(), "EnumValue", "EnumValue", 1) + "," + } + repeatedStringForEnumvalue += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Enum{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Enumvalue:` + repeatedStringForEnumvalue + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *EnumValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&EnumValue{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Option) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Option{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Any", "Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringType(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Type) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Type: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Type: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Oneofs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Oneofs = append(m.Oneofs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Field_Kind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) + } + m.Cardinality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cardinality |= Field_Cardinality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OneofIndex", wireType) + } + m.OneofIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OneofIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Packed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Packed = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JsonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JsonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Enum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Enum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Enum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enumvalue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enumvalue = append(m.Enumvalue, &EnumValue{}) + if err := m.Enumvalue[len(m.Enumvalue)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnumValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnumValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnumValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Option) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Option: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Option: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &Any{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipType(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthType + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupType + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthType + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowType = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupType = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go new file mode 100644 index 00000000..8d415420 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -0,0 +1,2703 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return m.Size() +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*DoubleValue) XXX_MessageName() string { + return "google.protobuf.DoubleValue" +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return m.Size() +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*FloatValue) XXX_MessageName() string { + return "google.protobuf.FloatValue" +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return m.Size() +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int64Value) XXX_MessageName() string { + return "google.protobuf.Int64Value" +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return m.Size() +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt64Value) XXX_MessageName() string { + return "google.protobuf.UInt64Value" +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return m.Size() +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int32Value) XXX_MessageName() string { + return "google.protobuf.Int32Value" +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return m.Size() +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt32Value) XXX_MessageName() string { + return "google.protobuf.UInt32Value" +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return m.Size() +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +func (*BoolValue) XXX_MessageName() string { + return "google.protobuf.BoolValue" +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return m.Size() +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (*StringValue) XXX_MessageName() string { + return "google.protobuf.StringValue" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return m.Size() +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*BytesValue) XXX_MessageName() string { + return "google.protobuf.BytesValue" +} +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x3b, + 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, + 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, + 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, + 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x45, 0x87, 0x13, 0x6f, 0x38, 0x34, 0xbe, 0x02, + 0x40, 0x22, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x3f, 0x18, 0x19, 0x17, 0x31, + 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x68, 0x09, 0x80, 0x6a, 0xd1, 0x0b, 0x4f, + 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4c, 0x62, 0x03, 0x9b, 0x65, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0x31, 0x55, 0x64, 0x90, 0x0a, 0x02, 0x00, 0x00, +} + +func (this *DoubleValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FloatValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if !this.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BytesValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *DoubleValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FloatValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BytesValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *DoubleValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.DoubleValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FloatValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FloatValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BoolValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StringValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.StringValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BytesValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BytesValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWrappers(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DoubleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { + offset -= sovWrappers(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { + this := &DoubleValue{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { + this := &FloatValue{} + this.Value = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { + this := &Int64Value{} + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { + this := &UInt64Value{} + this.Value = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { + this := &Int32Value{} + this.Value = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { + this := &UInt32Value{} + this.Value = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { + this := &BoolValue{} + this.Value = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { + this := &StringValue{} + this.Value = string(randStringWrappers(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { + this := &BytesValue{} + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +type randyWrappers interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneWrappers(r randyWrappers) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringWrappers(r randyWrappers) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneWrappers(r) + } + return string(tmps) +} +func randUnrecognizedWrappers(r randyWrappers, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldWrappers(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldWrappers(dAtA []byte, r randyWrappers, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateWrappers(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *DoubleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FloatValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 5 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BytesValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWrappers(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWrappers(x uint64) (n int) { + return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DoubleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DoubleValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *FloatValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FloatValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoolValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BytesValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringWrappers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DoubleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWrappers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWrappers + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWrappers + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWrappers + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWrappers = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go new file mode 100644 index 00000000..d905df36 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go @@ -0,0 +1,300 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +func NewPopulatedStdDouble(r randyWrappers, easy bool) *float64 { + v := NewPopulatedDoubleValue(r, easy) + return &v.Value +} + +func SizeOfStdDouble(v float64) int { + pv := &DoubleValue{Value: v} + return pv.Size() +} + +func StdDoubleMarshal(v float64) ([]byte, error) { + size := SizeOfStdDouble(v) + buf := make([]byte, size) + _, err := StdDoubleMarshalTo(v, buf) + return buf, err +} + +func StdDoubleMarshalTo(v float64, data []byte) (int, error) { + pv := &DoubleValue{Value: v} + return pv.MarshalTo(data) +} + +func StdDoubleUnmarshal(v *float64, data []byte) error { + pv := &DoubleValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdFloat(r randyWrappers, easy bool) *float32 { + v := NewPopulatedFloatValue(r, easy) + return &v.Value +} + +func SizeOfStdFloat(v float32) int { + pv := &FloatValue{Value: v} + return pv.Size() +} + +func StdFloatMarshal(v float32) ([]byte, error) { + size := SizeOfStdFloat(v) + buf := make([]byte, size) + _, err := StdFloatMarshalTo(v, buf) + return buf, err +} + +func StdFloatMarshalTo(v float32, data []byte) (int, error) { + pv := &FloatValue{Value: v} + return pv.MarshalTo(data) +} + +func StdFloatUnmarshal(v *float32, data []byte) error { + pv := &FloatValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt64(r randyWrappers, easy bool) *int64 { + v := NewPopulatedInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdInt64(v int64) int { + pv := &Int64Value{Value: v} + return pv.Size() +} + +func StdInt64Marshal(v int64) ([]byte, error) { + size := SizeOfStdInt64(v) + buf := make([]byte, size) + _, err := StdInt64MarshalTo(v, buf) + return buf, err +} + +func StdInt64MarshalTo(v int64, data []byte) (int, error) { + pv := &Int64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt64Unmarshal(v *int64, data []byte) error { + pv := &Int64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt64(r randyWrappers, easy bool) *uint64 { + v := NewPopulatedUInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt64(v uint64) int { + pv := &UInt64Value{Value: v} + return pv.Size() +} + +func StdUInt64Marshal(v uint64) ([]byte, error) { + size := SizeOfStdUInt64(v) + buf := make([]byte, size) + _, err := StdUInt64MarshalTo(v, buf) + return buf, err +} + +func StdUInt64MarshalTo(v uint64, data []byte) (int, error) { + pv := &UInt64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt64Unmarshal(v *uint64, data []byte) error { + pv := &UInt64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt32(r randyWrappers, easy bool) *int32 { + v := NewPopulatedInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdInt32(v int32) int { + pv := &Int32Value{Value: v} + return pv.Size() +} + +func StdInt32Marshal(v int32) ([]byte, error) { + size := SizeOfStdInt32(v) + buf := make([]byte, size) + _, err := StdInt32MarshalTo(v, buf) + return buf, err +} + +func StdInt32MarshalTo(v int32, data []byte) (int, error) { + pv := &Int32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt32Unmarshal(v *int32, data []byte) error { + pv := &Int32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt32(r randyWrappers, easy bool) *uint32 { + v := NewPopulatedUInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt32(v uint32) int { + pv := &UInt32Value{Value: v} + return pv.Size() +} + +func StdUInt32Marshal(v uint32) ([]byte, error) { + size := SizeOfStdUInt32(v) + buf := make([]byte, size) + _, err := StdUInt32MarshalTo(v, buf) + return buf, err +} + +func StdUInt32MarshalTo(v uint32, data []byte) (int, error) { + pv := &UInt32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt32Unmarshal(v *uint32, data []byte) error { + pv := &UInt32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBool(r randyWrappers, easy bool) *bool { + v := NewPopulatedBoolValue(r, easy) + return &v.Value +} + +func SizeOfStdBool(v bool) int { + pv := &BoolValue{Value: v} + return pv.Size() +} + +func StdBoolMarshal(v bool) ([]byte, error) { + size := SizeOfStdBool(v) + buf := make([]byte, size) + _, err := StdBoolMarshalTo(v, buf) + return buf, err +} + +func StdBoolMarshalTo(v bool, data []byte) (int, error) { + pv := &BoolValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBoolUnmarshal(v *bool, data []byte) error { + pv := &BoolValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdString(r randyWrappers, easy bool) *string { + v := NewPopulatedStringValue(r, easy) + return &v.Value +} + +func SizeOfStdString(v string) int { + pv := &StringValue{Value: v} + return pv.Size() +} + +func StdStringMarshal(v string) ([]byte, error) { + size := SizeOfStdString(v) + buf := make([]byte, size) + _, err := StdStringMarshalTo(v, buf) + return buf, err +} + +func StdStringMarshalTo(v string, data []byte) (int, error) { + pv := &StringValue{Value: v} + return pv.MarshalTo(data) +} + +func StdStringUnmarshal(v *string, data []byte) error { + pv := &StringValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBytes(r randyWrappers, easy bool) *[]byte { + v := NewPopulatedBytesValue(r, easy) + return &v.Value +} + +func SizeOfStdBytes(v []byte) int { + pv := &BytesValue{Value: v} + return pv.Size() +} + +func StdBytesMarshal(v []byte) ([]byte, error) { + size := SizeOfStdBytes(v) + buf := make([]byte, size) + _, err := StdBytesMarshalTo(v, buf) + return buf, err +} + +func StdBytesMarshalTo(v []byte, data []byte) (int, error) { + pv := &BytesValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBytesUnmarshal(v *[]byte, data []byte) error { + pv := &BytesValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/vendor/github.com/golang/groupcache/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go deleted file mode 100644 index eac1c766..00000000 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specifies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key Key, value interface{}) - - ll *list.List - cache map[interface{}]*list.Element -} - -// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators -type Key interface{} - -type entry struct { - key Key - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[interface{}]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key Key, value interface{}) { - if c.cache == nil { - c.cache = make(map[interface{}]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} - -// Clear purges all stored items from the cache. -func (c *Cache) Clear() { - if c.OnEvicted != nil { - for _, e := range c.cache { - kv := e.Value.(*entry) - c.OnEvicted(kv.key, kv.value) - } - } - c.ll = nil - c.cache = nil -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 00000000..0f646931 --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 00000000..e810e6fe --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 00000000..d399bf06 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 00000000..e8db57e0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,113 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: Do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: Do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 00000000..2187e877 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,58 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +func DiscardUnknown(m Message) { + if m != nil { + discardUnknown(MessageReflect(m)) + } +} + +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) + } + } + return true + }) + + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 00000000..42fc120c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,356 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo + + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 + + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 + + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) + +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false + } + + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has + }) + } + + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] + } + return has +} + +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } + return true + }) + } + clearUnknown(mr, fieldNum(xt.Field)) +} + +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) +} + +// GetExtension retrieves a proto2 extended field from m. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] + } + + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil + } + + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } + } + + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: + return nil, ErrMissingExtension + } + + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + return v, nil +} + +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } + +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil + } + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil + } + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable + } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) + if err != nil { + if err == ErrMissingExtension { + continue + } + return vs, err + } + vs[i] = v + } + return vs, nil +} + +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable + } + + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) + } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() + } + } + + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) + return nil +} + +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] + } + + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} + +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] + } + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) + } + return xts, nil +} + +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) +} + +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + return true + default: + return false + } +} + +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] + } + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 00000000..dcdc2202 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,306 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. +type StructProperties struct { + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the protobuf field name. + OneofTypes map[string]*OneofProperties +} + +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. +type Properties struct { + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. + WireType int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. + Required bool + // Optional reports whether this is a optional field. + Optional bool + // Repeated reports whether this is a repeated field. + Repeated bool + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} + +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != "" { + s += ",json=" + p.JSONName + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { + s += ",proto3" + } + if p.Oneof { + s += ",oneof" + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": + p.Optional = true + case s == "req": + p.Required = true + case s == "rep": + p.Repeated = true + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": + p.Packed = true + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + p.HasDefault = true + p.Default, i = tag[len("def="):], len(tag) + } + tag = strings.TrimPrefix(tag[i:], ",") + } +} + +// Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } +} + +var propertiesCache sync.Map // map[reflect.Type]*StructProperties + +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. +func GetProperties(t reflect.Type) *StructProperties { + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) + } + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) +} + +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + + var hasOneof bool + prop := new(StructProperties) + + // Construct a list of properties for each field in the struct. + for i := 0; i < t.NumField(); i++ { + p := new(Properties) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) + + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof + } + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field + } + + prop.Prop = append(prop.Prop, p) + } + + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } + + prop.OneofTypes = make(map[string]*OneofProperties) + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T + Prop: new(Properties), + } + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true + } + } + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p + } + } + + return prop +} + +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 00000000..5aee89c3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 00000000..066b4323 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,317 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 00000000..47eb3e44 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 00000000..a31134ee --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 00000000..d7c28da5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 00000000..398e3485 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index 548f31da..f47c77a2 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -297,6 +297,8 @@ const ( ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 // Internal use only. ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 ValidatePeerCertificateChainReq_VerificationMode = 6 ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -308,6 +310,7 @@ var ( 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", + 6: "RESERVED_CUSTOM_VERIFICATION_MODE_6", } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ "UNSPECIFIED": 0, @@ -316,6 +319,7 @@ var ( "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, + "RESERVED_CUSTOM_VERIFICATION_MODE_6": 6, } ) @@ -1978,8 +1982,8 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4, - 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9d, + 0x06, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, @@ -2013,7 +2017,7 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x69, 0x63, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, @@ -2025,141 +2029,143 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, - 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, - 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, - 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, - 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, - 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, - 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, - 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, - 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, - 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, - 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, + 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, + 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x36, 0x10, 0x06, + 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, + 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, + 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, + 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, + 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, + 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, + 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, - 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, + 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, - 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, - 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, - 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, - 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, - 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, - 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, - 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, - 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, - 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, - 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, - 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, - 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, - 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, - 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, + 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, + 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, + 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, + 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, - 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, - 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, - 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, - 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, - 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, - 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, - 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, - 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, - 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, - 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, - 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, - 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, + 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, + 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, + 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, + 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, + 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, + 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, + 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, + 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, + 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index a6402ee4..0cc78547 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -64,13 +64,13 @@ type s2av2TransportCreds struct { localIdentities []*commonpb.Identity verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode fallbackClientHandshake fallback.ClientHandshake - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + getS2AStream stream.GetS2AStream serverAuthorizationPolicy []byte } // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream stream.GetS2AStream, serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -101,7 +101,7 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream stream.GetS2AStream) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -306,8 +306,9 @@ func NewClientTLSConfig( tokenManager tokenmanager.AccessTokenManager, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverName string, - serverAuthorizationPolicy []byte) (*tls.Config, error) { - s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil) + serverAuthorizationPolicy []byte, + getStream stream.GetS2AStream) (*tls.Config, error) { + s2AStream, err := createStream(ctx, s2av2Address, transportCreds, getStream) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, err @@ -350,7 +351,7 @@ func (x s2AGrpcStream) CloseSend() error { return x.stream.CloseSend() } -func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { +func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream stream.GetS2AStream) (stream.S2AStream, error) { if getS2AStream != nil { return getS2AStream(ctx, s2av2Address) } diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go index fa0002e3..6ca75f56 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -75,7 +75,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) } - // Extract TLS configiguration from SessionResp. + // Extract TLS configuration from SessionResp. tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() var cert tls.Certificate diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index cc79bd09..c52fccdd 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -35,6 +35,7 @@ import ( "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2" "github.com/google/s2a-go/retry" + "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/protobuf/proto" @@ -330,6 +331,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: nil, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + getStream: opts.getS2AStream, }, nil } return &s2aTLSClientConfigFactory{ @@ -338,6 +340,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: tokenManager, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + getStream: opts.getS2AStream, }, nil } @@ -347,6 +350,7 @@ type s2aTLSClientConfigFactory struct { tokenManager tokenmanager.AccessTokenManager verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode serverAuthorizationPolicy []byte + getStream stream.GetS2AStream } func (f *s2aTLSClientConfigFactory) Build( @@ -355,7 +359,7 @@ func (f *s2aTLSClientConfigFactory) Build( if opts != nil && opts.ServerName != "" { serverName = opts.ServerName } - return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy, f.getStream) } func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { @@ -370,6 +374,8 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 case ReservedCustomVerificationMode5: return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 + case ReservedCustomVerificationMode6: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 default: return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED } diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index 5bbf31bf..b7a277f9 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -19,7 +19,6 @@ package s2a import ( - "context" "crypto/tls" "errors" "sync" @@ -28,7 +27,7 @@ import ( "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" - s2apbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av1pb "github.com/google/s2a-go/internal/proto/common_go_proto" s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) @@ -36,6 +35,17 @@ import ( type Identity interface { // Name returns the name of the identity. Name() string + Attributes() map[string]string +} + +type UnspecifiedID struct { + Attr map[string]string +} + +func (u *UnspecifiedID) Name() string { return "" } + +func (u *UnspecifiedID) Attributes() map[string]string { + return u.Attr } type spiffeID struct { @@ -44,10 +54,10 @@ type spiffeID struct { func (s *spiffeID) Name() string { return s.spiffeID } +func (spiffeID) Attributes() map[string]string { return nil } + // NewSpiffeID creates a SPIFFE ID from id. -func NewSpiffeID(id string) Identity { - return &spiffeID{spiffeID: id} -} +func NewSpiffeID(id string) Identity { return &spiffeID{spiffeID: id} } type hostname struct { hostname string @@ -55,10 +65,10 @@ type hostname struct { func (h *hostname) Name() string { return h.hostname } +func (hostname) Attributes() map[string]string { return nil } + // NewHostname creates a hostname from name. -func NewHostname(name string) Identity { - return &hostname{hostname: name} -} +func NewHostname(name string) Identity { return &hostname{hostname: name} } type uid struct { uid string @@ -66,10 +76,10 @@ type uid struct { func (h *uid) Name() string { return h.uid } +func (uid) Attributes() map[string]string { return nil } + // NewUID creates a UID from name. -func NewUID(name string) Identity { - return &uid{uid: name} -} +func NewUID(name string) Identity { return &uid{uid: name} } // VerificationModeType specifies the mode that S2A must use to verify the peer // certificate chain. @@ -83,6 +93,7 @@ const ( ReservedCustomVerificationMode3 ReservedCustomVerificationMode4 ReservedCustomVerificationMode5 + ReservedCustomVerificationMode6 ) // ClientOptions contains the client-side options used to establish a secure @@ -137,7 +148,7 @@ type ClientOptions struct { FallbackOpts *FallbackOptions // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + getS2AStream stream.GetS2AStream // Serialized user specified policy for server authorization. serverAuthorizationPolicy []byte @@ -191,7 +202,7 @@ type ServerOptions struct { VerificationMode VerificationModeType // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + getS2AStream stream.GetS2AStream } // DefaultServerOptions returns the default server options. @@ -202,17 +213,30 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions { } } -func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) { +func toProtoIdentity(identity Identity) (*s2av1pb.Identity, error) { if identity == nil { return nil, nil } switch id := identity.(type) { case *spiffeID: - return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + return &s2av1pb.Identity{ + IdentityOneof: &s2av1pb.Identity_SpiffeId{SpiffeId: id.Name()}, + Attributes: id.Attributes(), + }, nil case *hostname: - return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Hostname{Hostname: id.Name()}}, nil + return &s2av1pb.Identity{ + IdentityOneof: &s2av1pb.Identity_Hostname{Hostname: id.Name()}, + Attributes: id.Attributes(), + }, nil case *uid: - return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Uid{Uid: id.Name()}}, nil + return &s2av1pb.Identity{ + IdentityOneof: &s2av1pb.Identity_Uid{Uid: id.Name()}, + Attributes: id.Attributes(), + }, nil + case *UnspecifiedID: + return &s2av1pb.Identity{ + Attributes: id.Attributes(), + }, nil default: return nil, errors.New("unrecognized identity type") } @@ -224,11 +248,24 @@ func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { } switch id := identity.(type) { case *spiffeID: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + return &s2apb.Identity{ + IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}, + Attributes: id.Attributes(), + }, nil case *hostname: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil + return &s2apb.Identity{ + IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}, + Attributes: id.Attributes(), + }, nil case *uid: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil + return &s2apb.Identity{ + IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}, + Attributes: id.Attributes(), + }, nil + case *UnspecifiedID: + return &s2apb.Identity{ + Attributes: id.Attributes(), + }, nil default: return nil, errors.New("unrecognized identity type") } diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go index 584bf32b..ae2d5eb4 100644 --- a/vendor/github.com/google/s2a-go/stream/s2a_stream.go +++ b/vendor/github.com/google/s2a-go/stream/s2a_stream.go @@ -20,6 +20,8 @@ package stream import ( + "context" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -32,3 +34,6 @@ type S2AStream interface { // Closes the channel to the S2A server. CloseSend() error } + +// GetS2AStream type is for generating an S2AStream interface for talking to the S2A server. +type GetS2AStream func(ctx context.Context, s2av2Address string, opts ...string) (S2AStream, error) diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 44d4d002..2fcff6e2 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.13.0" + "v2": "2.15.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index d63421b7..fec6b1da 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,38 @@ # Changelog +## [2.15.0](https://github.com/googleapis/gax-go/compare/v2.14.2...v2.15.0) (2025-07-09) + + +### Features + +* **apierror:** improve gRPC status code mapping for HTTP errors ([#431](https://github.com/googleapis/gax-go/issues/431)) ([c207f2a](https://github.com/googleapis/gax-go/commit/c207f2a19ab91d3baee458b57d4aa992519025c7)) + +## [2.14.2](https://github.com/googleapis/gax-go/compare/v2.14.1...v2.14.2) (2025-05-12) + + +### Documentation + +* **v2:** Fix Backoff doc to accurately explain Multiplier ([#423](https://github.com/googleapis/gax-go/issues/423)) ([16d1791](https://github.com/googleapis/gax-go/commit/16d17917121ea9f5d84ba52b5c7c7f2ec0f9e784)), refs [#422](https://github.com/googleapis/gax-go/issues/422) + +## [2.14.1](https://github.com/googleapis/gax-go/compare/v2.14.0...v2.14.1) (2024-12-19) + + +### Bug Fixes + +* update golang.org/x/net to v0.33.0 ([#391](https://github.com/googleapis/gax-go/issues/391)) ([547a5b4](https://github.com/googleapis/gax-go/commit/547a5b43aa6f376f71242da9f18e65fbdfb342f6)) + + +### Documentation + +* fix godoc to refer to the proper envvar ([#387](https://github.com/googleapis/gax-go/issues/387)) ([dc6baf7](https://github.com/googleapis/gax-go/commit/dc6baf75c1a737233739630b5af6c9759f08abcd)) + +## [2.14.0](https://github.com/googleapis/gax-go/compare/v2.13.0...v2.14.0) (2024-11-13) + + +### Features + +* **internallog:** add a logging support package ([#380](https://github.com/googleapis/gax-go/issues/380)) ([c877470](https://github.com/googleapis/gax-go/commit/c87747098135631a3de5865ed03aaf2c79fd9319)) + ## [2.13.0](https://github.com/googleapis/gax-go/compare/v2.12.5...v2.13.0) (2024-07-22) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 7de60773..90a40d29 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -38,6 +38,7 @@ package apierror import ( "errors" "fmt" + "net/http" "strings" jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" @@ -49,6 +50,39 @@ import ( "google.golang.org/protobuf/proto" ) +// canonicalMap maps HTTP codes to gRPC status code equivalents. +var canonicalMap = map[int]codes.Code{ + http.StatusOK: codes.OK, + http.StatusBadRequest: codes.InvalidArgument, + http.StatusForbidden: codes.PermissionDenied, + http.StatusNotFound: codes.NotFound, + http.StatusConflict: codes.Aborted, + http.StatusRequestedRangeNotSatisfiable: codes.OutOfRange, + http.StatusTooManyRequests: codes.ResourceExhausted, + http.StatusGatewayTimeout: codes.DeadlineExceeded, + http.StatusNotImplemented: codes.Unimplemented, + http.StatusServiceUnavailable: codes.Unavailable, + http.StatusUnauthorized: codes.Unauthenticated, +} + +// toCode maps an http code to the most correct equivalent. +func toCode(httpCode int) codes.Code { + if sCode, ok := canonicalMap[httpCode]; ok { + return sCode + } + switch { + case httpCode >= 200 && httpCode < 300: + return codes.OK + + case httpCode >= 400 && httpCode < 500: + return codes.FailedPrecondition + + case httpCode >= 500 && httpCode < 600: + return codes.Internal + } + return codes.Unknown +} + // ErrDetails holds the google/rpc/error_details.proto messages. type ErrDetails struct { ErrorInfo *errdetails.ErrorInfo @@ -217,6 +251,11 @@ func (a *APIError) Error() string { // GRPCStatus extracts the underlying gRPC Status error. // This method is necessary to fulfill the interface // described in https://pkg.go.dev/google.golang.org/grpc/status#FromError. +// +// For errors that originated as an HTTP-based googleapi.Error, GRPCStatus() +// returns a status that attempts to map from the original HTTP code to an +// equivalent gRPC status code. For use cases where you want to avoid this +// behavior, error unwrapping can be used. func (a *APIError) GRPCStatus() *status.Status { return a.status } @@ -243,9 +282,9 @@ func (a *APIError) Metadata() map[string]string { // setDetailsFromError parses a Status error or a googleapi.Error // and sets status and details or httpErr and details, respectively. // It returns false if neither Status nor googleapi.Error can be parsed. -// When err is a googleapi.Error, the status of the returned error will -// be set to an Unknown error, rather than nil, since a nil code is -// interpreted as OK in the gRPC status package. +// +// When err is a googleapi.Error, the status of the returned error will be +// mapped to the closest equivalent gGRPC status code. func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) var herr *googleapi.Error @@ -258,7 +297,7 @@ func (a *APIError) setDetailsFromError(err error) bool { case isHTTPErr: a.httpErr = herr a.details = parseHTTPDetails(herr) - a.status = status.New(codes.Unknown, herr.Message) + a.status = status.New(toCode(a.httpErr.Code), herr.Message) default: return false } diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go index c52e03f6..ac1f2b11 100644 --- a/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -156,10 +156,13 @@ func (r *httpRetryer) Retry(err error) (time.Duration, bool) { return 0, false } -// Backoff implements exponential backoff. The wait time between retries is a -// random value between 0 and the "retry period" - the time between retries. The -// retry period starts at Initial and increases by the factor of Multiplier -// every retry, but is capped at Max. +// Backoff implements backoff logic for retries. The configuration for retries +// is described in https://google.aip.dev/client-libraries/4221. The current +// retry limit starts at Initial and increases by a factor of Multiplier every +// retry, but is capped at Max. The actual wait time between retries is a +// random value between 1ns and the current retry limit. The purpose of this +// random jitter is explained in +// https://www.awsarchitectureblog.com/2015/03/backoff.html. // // Note: MaxNumRetries / RPCDeadline is specifically not provided. These should // be built on top of Backoff. diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index e12421cf..0ab1bce5 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.13.0" +const Version = "2.15.0" diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/grpclog/grpclog.go b/vendor/github.com/googleapis/gax-go/v2/internallog/grpclog/grpclog.go new file mode 100644 index 00000000..bf1d864b --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/grpclog/grpclog.go @@ -0,0 +1,88 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package grpclog in intended for internal use by generated clients only. +package grpclog + +import ( + "context" + "encoding/json" + "log/slog" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// ProtoMessageRequest returns a lazily evaluated [slog.LogValuer] for +// the provided message. The context is used to extract outgoing headers. +func ProtoMessageRequest(ctx context.Context, msg proto.Message) slog.LogValuer { + return &protoMessage{ctx: ctx, msg: msg} +} + +// ProtoMessageResponse returns a lazily evaluated [slog.LogValuer] for +// the provided message. +func ProtoMessageResponse(msg proto.Message) slog.LogValuer { + return &protoMessage{msg: msg} +} + +type protoMessage struct { + ctx context.Context + msg proto.Message +} + +func (m *protoMessage) LogValue() slog.Value { + if m == nil || m.msg == nil { + return slog.Value{} + } + + var groupValueAttrs []slog.Attr + + if m.ctx != nil { + var headerAttr []slog.Attr + if m, ok := metadata.FromOutgoingContext(m.ctx); ok { + for k, v := range m { + headerAttr = append(headerAttr, slog.String(k, strings.Join(v, ","))) + } + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + } + mo := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + if b, err := mo.Marshal(m.msg); err == nil { + var m map[string]any + if err := json.Unmarshal(b, &m); err == nil { + groupValueAttrs = append(groupValueAttrs, slog.Any("payload", m)) + } + } + + return slog.GroupValue(groupValueAttrs...) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go new file mode 100644 index 00000000..19f4be35 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go @@ -0,0 +1,134 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package internal provides some common logic and types to other logging +// sub-packages. +package internal + +import ( + "context" + "io" + "log/slog" + "os" + "strings" + "time" +) + +const ( + // LoggingLevelEnvVar is the environment variable used to enable logging + // at a particular level. + LoggingLevelEnvVar = "GOOGLE_SDK_GO_LOGGING_LEVEL" + + googLvlKey = "severity" + googMsgKey = "message" + googSourceKey = "sourceLocation" + googTimeKey = "timestamp" +) + +// NewLoggerWithWriter is exposed for testing. +func NewLoggerWithWriter(w io.Writer) *slog.Logger { + lvl, loggingEnabled := checkLoggingLevel() + if !loggingEnabled { + return slog.New(noOpHandler{}) + } + return slog.New(newGCPSlogHandler(lvl, w)) +} + +// checkLoggingLevel returned the configured logging level and whether or not +// logging is enabled. +func checkLoggingLevel() (slog.Leveler, bool) { + sLevel := strings.ToLower(os.Getenv(LoggingLevelEnvVar)) + var level slog.Level + switch sLevel { + case "debug": + level = slog.LevelDebug + case "info": + level = slog.LevelInfo + case "warn": + level = slog.LevelWarn + case "error": + level = slog.LevelError + default: + return nil, false + } + return level, true +} + +// newGCPSlogHandler returns a Handler that is configured to output in a JSON +// format with well-known keys. For more information on this format see +// https://cloud.google.com/logging/docs/agent/logging/configuration#special-fields. +func newGCPSlogHandler(lvl slog.Leveler, w io.Writer) slog.Handler { + return slog.NewJSONHandler(w, &slog.HandlerOptions{ + Level: lvl, + ReplaceAttr: replaceAttr, + }) +} + +// replaceAttr remaps default Go logging keys to match what is expected in +// cloud logging. +func replaceAttr(groups []string, a slog.Attr) slog.Attr { + if groups == nil { + if a.Key == slog.LevelKey { + a.Key = googLvlKey + return a + } else if a.Key == slog.MessageKey { + a.Key = googMsgKey + return a + } else if a.Key == slog.SourceKey { + a.Key = googSourceKey + return a + } else if a.Key == slog.TimeKey { + a.Key = googTimeKey + if a.Value.Kind() == slog.KindTime { + a.Value = slog.StringValue(a.Value.Time().Format(time.RFC3339)) + } + return a + } + } + return a +} + +// The handler returned if logging is not enabled. +type noOpHandler struct{} + +func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false +} + +func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error { + return nil +} + +func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return h +} + +func (h noOpHandler) WithGroup(_ string) slog.Handler { + return h +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go new file mode 100644 index 00000000..e47ab32a --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go @@ -0,0 +1,154 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package internallog in intended for internal use by generated clients only. +package internallog + +import ( + "bytes" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "os" + "strings" + + "github.com/googleapis/gax-go/v2/internallog/internal" +) + +// New returns a new [slog.Logger] default logger, or the provided logger if +// non-nil. The returned logger will be a no-op logger unless the environment +// variable GOOGLE_SDK_GO_LOGGING_LEVEL is set. +func New(l *slog.Logger) *slog.Logger { + if l != nil { + return l + } + return internal.NewLoggerWithWriter(os.Stderr) +} + +// HTTPRequest returns a lazily evaluated [slog.LogValuer] for a +// [http.Request] and the associated body. +func HTTPRequest(req *http.Request, body []byte) slog.LogValuer { + return &request{ + req: req, + payload: body, + } +} + +type request struct { + req *http.Request + payload []byte +} + +func (r *request) LogValue() slog.Value { + if r == nil || r.req == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method)) + groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String())) + + var headerAttr []slog.Attr + for k, val := range r.req.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +// HTTPResponse returns a lazily evaluated [slog.LogValuer] for a +// [http.Response] and the associated body. +func HTTPResponse(resp *http.Response, body []byte) slog.LogValuer { + return &response{ + resp: resp, + payload: body, + } +} + +type response struct { + resp *http.Response + payload []byte +} + +func (r *response) LogValue() slog.Value { + if r == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode))) + + var headerAttr []slog.Attr + for k, val := range r.resp.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +func processPayload(payload []byte) (slog.Attr, bool) { + peekChar := payload[0] + if peekChar == '{' { + // JSON object + var m map[string]any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else if peekChar == '[' { + // JSON array + var m []any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else { + // Everything else + buf := &bytes.Buffer{} + if err := json.Compact(buf, payload); err != nil { + // Write raw payload incase of error + buf.Write(payload) + } + return slog.String("payload", buf.String()), true + } + return slog.Attr{}, false +} diff --git a/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go b/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go new file mode 100644 index 00000000..d4d6019f --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go @@ -0,0 +1,63 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//go:build go1.23 + +// Package iterator contains helper for working with iterators. It is meant for +// internal use only by the Go Client Libraries. +package iterator + +import ( + "iter" + + otherit "google.golang.org/api/iterator" +) + +// RangeAdapter transforms client iterator type into a [iter.Seq2] that can +// be used with Go's range expressions. +// +// This is for internal use only. +func RangeAdapter[T any](next func() (T, error)) iter.Seq2[T, error] { + var err error + return func(yield func(T, error) bool) { + for { + if err != nil { + return + } + var resp T + resp, err = next() + if err == otherit.Done { + return + } + if !yield(resp, err) { + return + } + } + } +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go deleted file mode 100644 index d59ecbb2..00000000 --- a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go +++ /dev/null @@ -1,65 +0,0 @@ -package parseutil - -import ( - "errors" - "fmt" - "io/ioutil" - "net/url" - "os" - "strings" -) - -var ( - ErrNotAUrl = errors.New("not a url") - ErrNotParsed = errors.New("not a parsed value") -) - -// ParsePath parses a URL with schemes file://, env://, or any other. Depending -// on the scheme it will return specific types of data: -// -// * file:// will return a string with the file's contents -// -// * env:// will return a string with the env var's contents -// -// * Anything else will return the string as it was. Functionally this means -// anything for which Go's `url.Parse` function does not throw an error. If you -// want to ensure that this function errors if a known scheme is not found, use -// MustParsePath. -// -// On error, we return the original string along with the error. The caller can -// switch on errors.Is(err, ErrNotAUrl) to understand whether it was the parsing -// step that errored or something else (such as a file not found). This is -// useful to attempt to read a non-URL string from some resource, but where the -// original input may simply be a valid string of that type. -func ParsePath(path string) (string, error) { - return parsePath(path, false) -} - -// MustParsePath behaves like ParsePath but will return ErrNotAUrl if the value -// is not a URL with a scheme that can be parsed by this function. -func MustParsePath(path string) (string, error) { - return parsePath(path, true) -} - -func parsePath(path string, mustParse bool) (string, error) { - path = strings.TrimSpace(path) - parsed, err := url.Parse(path) - if err != nil { - return path, fmt.Errorf("error parsing url (%q): %w", err.Error(), ErrNotAUrl) - } - switch parsed.Scheme { - case "file": - contents, err := ioutil.ReadFile(strings.TrimPrefix(path, "file://")) - if err != nil { - return path, fmt.Errorf("error reading file at %s: %w", path, err) - } - return strings.TrimSpace(string(contents)), nil - case "env": - return strings.TrimSpace(os.Getenv(strings.TrimPrefix(path, "env://"))), nil - default: - if mustParse { - return "", ErrNotParsed - } - return path, nil - } -} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go deleted file mode 100644 index e469499b..00000000 --- a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go +++ /dev/null @@ -1,502 +0,0 @@ -package parseutil - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "regexp" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-secure-stdlib/strutil" - sockaddr "github.com/hashicorp/go-sockaddr" - "github.com/mitchellh/mapstructure" -) - -var validCapacityString = regexp.MustCompile("^[\t ]*([0-9]+)[\t ]?([kmgtKMGT][iI]?[bB])?[\t ]*$") - -// ParseCapacityString parses a capacity string and returns the number of bytes it represents. -// Capacity strings are things like 5gib or 10MB. Supported prefixes are kb, kib, mb, mib, gb, -// gib, tb, tib, which are not case sensitive. If no prefix is present, the number is assumed -// to be in bytes already. -func ParseCapacityString(in interface{}) (uint64, error) { - var cap uint64 - - jsonIn, ok := in.(json.Number) - if ok { - in = jsonIn.String() - } - - switch inp := in.(type) { - case nil: - // return default of zero - case string: - if inp == "" { - return cap, nil - } - - matches := validCapacityString.FindStringSubmatch(inp) - - // no sub-groups means we couldn't parse it - if len(matches) <= 1 { - return cap, errors.New("could not parse capacity from input") - } - - var multiplier uint64 = 1 - switch strings.ToLower(matches[2]) { - case "kb": - multiplier = 1000 - case "kib": - multiplier = 1024 - case "mb": - multiplier = 1000 * 1000 - case "mib": - multiplier = 1024 * 1024 - case "gb": - multiplier = 1000 * 1000 * 1000 - case "gib": - multiplier = 1024 * 1024 * 1024 - case "tb": - multiplier = 1000 * 1000 * 1000 * 1000 - case "tib": - multiplier = 1024 * 1024 * 1024 * 1024 - } - - size, err := strconv.ParseUint(matches[1], 10, 64) - if err != nil { - return cap, err - } - - cap = size * multiplier - case int: - cap = uint64(inp) - case int32: - cap = uint64(inp) - case int64: - cap = uint64(inp) - case uint: - cap = uint64(inp) - case uint32: - cap = uint64(inp) - case uint64: - cap = uint64(inp) - case float32: - cap = uint64(inp) - case float64: - cap = uint64(inp) - default: - return cap, errors.New("could not parse capacity from input") - } - - return cap, nil -} - -// Parse a duration from an arbitrary value (a string or numeric value) into -// a time.Duration; when units are missing (such as when a numeric type is -// provided), the duration is assumed to be in seconds. -func ParseDurationSecond(in interface{}) (time.Duration, error) { - var dur time.Duration - jsonIn, ok := in.(json.Number) - if ok { - in = jsonIn.String() - } - switch inp := in.(type) { - case nil: - // return default of zero - case string: - if inp == "" { - return dur, nil - } - - if v, err := strconv.ParseInt(inp, 10, 64); err == nil { - return time.Duration(v) * time.Second, nil - } - - if strings.HasSuffix(inp, "d") { - v, err := strconv.ParseInt(inp[:len(inp)-1], 10, 64) - if err != nil { - return dur, err - } - return time.Duration(v) * 24 * time.Hour, nil - } - - var err error - if dur, err = time.ParseDuration(inp); err != nil { - return dur, err - } - case int: - dur = time.Duration(inp) * time.Second - case int32: - dur = time.Duration(inp) * time.Second - case int64: - dur = time.Duration(inp) * time.Second - case uint: - dur = time.Duration(inp) * time.Second - case uint32: - dur = time.Duration(inp) * time.Second - case uint64: - dur = time.Duration(inp) * time.Second - case float32: - dur = time.Duration(inp) * time.Second - case float64: - dur = time.Duration(inp) * time.Second - case time.Duration: - dur = inp - default: - return 0, errors.New("could not parse duration from input") - } - - return dur, nil -} - -// Parse an absolute timestamp from the provided arbitrary value (string or -// numeric value). When an untyped numeric value is provided, it is assumed -// to be seconds from the Unix Epoch. -func ParseAbsoluteTime(in interface{}) (time.Time, error) { - var t time.Time - switch inp := in.(type) { - case nil: - // return default of zero - return t, nil - case string: - // Allow RFC3339 with nanoseconds, or without, - // or an epoch time as an integer. - var err error - t, err = time.Parse(time.RFC3339Nano, inp) - if err == nil { - break - } - t, err = time.Parse(time.RFC3339, inp) - if err == nil { - break - } - epochTime, err := strconv.ParseInt(inp, 10, 64) - if err == nil { - t = time.Unix(epochTime, 0) - break - } - return t, errors.New("could not parse string as date and time") - case json.Number: - epochTime, err := inp.Int64() - if err != nil { - return t, err - } - t = time.Unix(epochTime, 0) - case int: - t = time.Unix(int64(inp), 0) - case int32: - t = time.Unix(int64(inp), 0) - case int64: - t = time.Unix(inp, 0) - case uint: - t = time.Unix(int64(inp), 0) - case uint32: - t = time.Unix(int64(inp), 0) - case uint64: - t = time.Unix(int64(inp), 0) - default: - return t, errors.New("could not parse time from input type") - } - return t, nil -} - -// ParseInt takes an arbitrary value (either a string or numeric type) and -// parses it as an int64 value. This value is assumed to be larger than the -// provided type, but cannot safely be cast. -// -// When the end value is bounded (such as an int value), it is recommended -// to instead call SafeParseInt or SafeParseIntRange to safely cast to a -// more restrictive type. -func ParseInt(in interface{}) (int64, error) { - var ret int64 - jsonIn, ok := in.(json.Number) - if ok { - in = jsonIn.String() - } - switch in.(type) { - case string: - inp := in.(string) - if inp == "" { - return 0, nil - } - var err error - left, err := strconv.ParseInt(inp, 10, 64) - if err != nil { - return ret, err - } - ret = left - case int: - ret = int64(in.(int)) - case int32: - ret = int64(in.(int32)) - case int64: - ret = in.(int64) - case uint: - ret = int64(in.(uint)) - case uint32: - ret = int64(in.(uint32)) - case uint64: - ret = int64(in.(uint64)) - default: - return 0, errors.New("could not parse value from input") - } - - return ret, nil -} - -// ParseDirectIntSlice behaves similarly to ParseInt, but accepts typed -// slices, returning a slice of int64s. -// -// If the starting value may not be in slice form (e.g.. a bare numeric value -// could be provided), it is suggested to call ParseIntSlice instead. -func ParseDirectIntSlice(in interface{}) ([]int64, error) { - var ret []int64 - - switch in.(type) { - case []int: - for _, v := range in.([]int) { - ret = append(ret, int64(v)) - } - case []int32: - for _, v := range in.([]int32) { - ret = append(ret, int64(v)) - } - case []int64: - // For consistency to ensure callers can always modify ret without - // impacting in. - for _, v := range in.([]int64) { - ret = append(ret, v) - } - case []uint: - for _, v := range in.([]uint) { - ret = append(ret, int64(v)) - } - case []uint32: - for _, v := range in.([]uint32) { - ret = append(ret, int64(v)) - } - case []uint64: - for _, v := range in.([]uint64) { - ret = append(ret, int64(v)) - } - case []json.Number: - for _, v := range in.([]json.Number) { - element, err := ParseInt(v) - if err != nil { - return nil, err - } - ret = append(ret, element) - } - case []string: - for _, v := range in.([]string) { - element, err := ParseInt(v) - if err != nil { - return nil, err - } - ret = append(ret, element) - } - default: - return nil, errors.New("could not parse value from input") - } - - return ret, nil -} - -// ParseIntSlice is a helper function for handling upgrades of optional -// slices; that is, if the API accepts a type similar to , -// nicely handle the common cases of providing only an int-ish, providing -// an actual slice of int-ishes, or providing a comma-separated list of -// numbers. -// -// When []int64 is not the desired final type (or the values should be -// range-bound), it is suggested to call SafeParseIntSlice or -// SafeParseIntSliceRange instead. -func ParseIntSlice(in interface{}) ([]int64, error) { - if ret, err := ParseInt(in); err == nil { - return []int64{ret}, nil - } - - if ret, err := ParseDirectIntSlice(in); err == nil { - return ret, nil - } - - if strings, err := ParseCommaStringSlice(in); err == nil { - var ret []int64 - for _, v := range strings { - if v == "" { - // Ignore empty fields - continue - } - - element, err := ParseInt(v) - if err != nil { - return nil, err - } - ret = append(ret, element) - } - - return ret, nil - } - - return nil, errors.New("could not parse value from input") -} - -// Parses the provided arbitrary value as a boolean-like value. -func ParseBool(in interface{}) (bool, error) { - var result bool - if err := mapstructure.WeakDecode(in, &result); err != nil { - return false, err - } - return result, nil -} - -// Parses the provided arbitrary value as a string. -func ParseString(in interface{}) (string, error) { - var result string - if err := mapstructure.WeakDecode(in, &result); err != nil { - return "", err - } - return result, nil -} - -// Parses the provided string-like value as a comma-separated list of values. -func ParseCommaStringSlice(in interface{}) ([]string, error) { - jsonIn, ok := in.(json.Number) - if ok { - in = jsonIn.String() - } - - rawString, ok := in.(string) - if ok && rawString == "" { - return []string{}, nil - } - var result []string - config := &mapstructure.DecoderConfig{ - Result: &result, - WeaklyTypedInput: true, - DecodeHook: mapstructure.StringToSliceHookFunc(","), - } - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return nil, err - } - if err := decoder.Decode(in); err != nil { - return nil, err - } - return strutil.TrimStrings(result), nil -} - -// Parses the specified value as one or more addresses, separated by commas. -func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) { - out := make([]*sockaddr.SockAddrMarshaler, 0) - stringAddrs := make([]string, 0) - - switch addrs.(type) { - case string: - stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",") - if len(stringAddrs) == 0 { - return nil, fmt.Errorf("unable to parse addresses from %v", addrs) - } - - case []string: - stringAddrs = addrs.([]string) - - case []interface{}: - for _, v := range addrs.([]interface{}) { - stringAddr, ok := v.(string) - if !ok { - return nil, fmt.Errorf("error parsing %v as string", v) - } - stringAddrs = append(stringAddrs, stringAddr) - } - - default: - return nil, fmt.Errorf("unknown address input type %T", addrs) - } - - for _, addr := range stringAddrs { - sa, err := sockaddr.NewSockAddr(addr) - if err != nil { - return nil, fmt.Errorf("error parsing address %q: %w", addr, err) - } - out = append(out, &sockaddr.SockAddrMarshaler{ - SockAddr: sa, - }) - } - - return out, nil -} - -// Parses the provided arbitrary value (see ParseInt), ensuring it is within -// the specified range (inclusive of bounds). If this range corresponds to a -// smaller type, the returned value can then be safely cast without risking -// overflow. -func SafeParseIntRange(in interface{}, min int64, max int64) (int64, error) { - raw, err := ParseInt(in) - if err != nil { - return 0, err - } - - if raw < min || raw > max { - return 0, fmt.Errorf("error parsing int value; out of range [%v to %v]: %v", min, max, raw) - } - - return raw, nil -} - -// Parses the specified arbitrary value (see ParseInt), ensuring that the -// resulting value is within the range for an int value. If no error occurred, -// the caller knows no overflow occurred. -func SafeParseInt(in interface{}) (int, error) { - raw, err := SafeParseIntRange(in, math.MinInt, math.MaxInt) - return int(raw), err -} - -// Parses the provided arbitrary value (see ParseIntSlice) into a slice of -// int64 values, ensuring each is within the specified range (inclusive of -// bounds). If this range corresponds to a smaller type, the returned value -// can then be safely cast without risking overflow. -// -// If elements is positive, it is used to ensure the resulting slice is -// bounded above by that many number of elements (inclusive). -func SafeParseIntSliceRange(in interface{}, minValue int64, maxValue int64, elements int) ([]int64, error) { - raw, err := ParseIntSlice(in) - if err != nil { - return nil, err - } - - if elements > 0 && len(raw) > elements { - return nil, fmt.Errorf("error parsing value from input: got %v but expected at most %v elements", len(raw), elements) - } - - for index, value := range raw { - if value < minValue || value > maxValue { - return nil, fmt.Errorf("error parsing value from input: element %v was outside of range [%v to %v]: %v", index, minValue, maxValue, value) - } - } - - return raw, nil -} - -// Parses the provided arbitrary value (see ParseIntSlice) into a slice of -// int values, ensuring the each resulting value in the slice is within the -// range for an int value. If no error occurred, the caller knows no overflow -// occurred. -// -// If elements is positive, it is used to ensure the resulting slice is -// bounded above by that many number of elements (inclusive). -func SafeParseIntSlice(in interface{}, elements int) ([]int, error) { - raw, err := SafeParseIntSliceRange(in, math.MinInt, math.MaxInt, elements) - if err != nil || raw == nil { - return nil, err - } - - var result = make([]int, 0, len(raw)) - for _, element := range raw { - result = append(result, int(element)) - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go deleted file mode 100644 index 102462dc..00000000 --- a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go +++ /dev/null @@ -1,510 +0,0 @@ -package strutil - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "sort" - "strings" - "unicode" - - glob "github.com/ryanuber/go-glob" -) - -// StrListContainsGlob looks for a string in a list of strings and allows -// globs. -func StrListContainsGlob(haystack []string, needle string) bool { - for _, item := range haystack { - if glob.Glob(item, needle) { - return true - } - } - return false -} - -// StrListContains looks for a string in a list of strings. -func StrListContains(haystack []string, needle string) bool { - for _, item := range haystack { - if item == needle { - return true - } - } - return false -} - -// StrListContainsCaseInsensitive looks for a string in a list of strings. -func StrListContainsCaseInsensitive(haystack []string, needle string) bool { - for _, item := range haystack { - if strings.EqualFold(item, needle) { - return true - } - } - return false -} - -// StrListSubset checks if a given list is a subset -// of another set -func StrListSubset(super, sub []string) bool { - for _, item := range sub { - if !StrListContains(super, item) { - return false - } - } - return true -} - -// ParseDedupAndSortStrings parses a comma separated list of strings -// into a slice of strings. The return slice will be sorted and will -// not contain duplicate or empty items. -func ParseDedupAndSortStrings(input string, sep string) []string { - input = strings.TrimSpace(input) - parsed := []string{} - if input == "" { - // Don't return nil - return parsed - } - return RemoveDuplicates(strings.Split(input, sep), false) -} - -// ParseDedupLowercaseAndSortStrings parses a comma separated list of -// strings into a slice of strings. The return slice will be sorted and -// will not contain duplicate or empty items. The values will be converted -// to lower case. -func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { - input = strings.TrimSpace(input) - parsed := []string{} - if input == "" { - // Don't return nil - return parsed - } - return RemoveDuplicates(strings.Split(input, sep), true) -} - -// ParseKeyValues parses a comma separated list of `=` tuples -// into a map[string]string. -func ParseKeyValues(input string, out map[string]string, sep string) error { - if out == nil { - return fmt.Errorf("'out is nil") - } - - keyValues := ParseDedupLowercaseAndSortStrings(input, sep) - if len(keyValues) == 0 { - return nil - } - - for _, keyValue := range keyValues { - shards := strings.Split(keyValue, "=") - if len(shards) != 2 { - return fmt.Errorf("invalid format") - } - - key := strings.TrimSpace(shards[0]) - value := strings.TrimSpace(shards[1]) - if key == "" || value == "" { - return fmt.Errorf("invalid pair: key: %q value: %q", key, value) - } - out[key] = value - } - return nil -} - -// ParseArbitraryKeyValues parses arbitrary tuples. The input -// can be one of the following: -// * JSON string -// * Base64 encoded JSON string -// * Comma separated list of `=` pairs -// * Base64 encoded string containing comma separated list of -// `=` pairs -// -// Input will be parsed into the output parameter, which should -// be a non-nil map[string]string. -func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error { - input = strings.TrimSpace(input) - if input == "" { - return nil - } - if out == nil { - return fmt.Errorf("'out' is nil") - } - - // Try to base64 decode the input. If successful, consider the decoded - // value as input. - inputBytes, err := base64.StdEncoding.DecodeString(input) - if err == nil { - input = string(inputBytes) - } - - // Try to JSON unmarshal the input. If successful, consider that the - // metadata was supplied as JSON input. - err = json.Unmarshal([]byte(input), &out) - if err != nil { - // If JSON unmarshaling fails, consider that the input was - // supplied as a comma separated string of 'key=value' pairs. - if err = ParseKeyValues(input, out, sep); err != nil { - return fmt.Errorf("failed to parse the input: %w", err) - } - } - - // Validate the parsed input - for key, value := range out { - if key != "" && value == "" { - return fmt.Errorf("invalid value for key %q", key) - } - } - - return nil -} - -// ParseStringSlice parses a `sep`-separated list of strings into a -// []string with surrounding whitespace removed. -// -// The output will always be a valid slice but may be of length zero. -func ParseStringSlice(input string, sep string) []string { - input = strings.TrimSpace(input) - if input == "" { - return []string{} - } - - splitStr := strings.Split(input, sep) - ret := make([]string, len(splitStr)) - for i, val := range splitStr { - ret[i] = strings.TrimSpace(val) - } - - return ret -} - -// ParseArbitraryStringSlice parses arbitrary string slice. The input -// can be one of the following: -// * JSON string -// * Base64 encoded JSON string -// * `sep` separated list of values -// * Base64-encoded string containing a `sep` separated list of values -// -// Note that the separator is ignored if the input is found to already be in a -// structured format (e.g., JSON) -// -// The output will always be a valid slice but may be of length zero. -func ParseArbitraryStringSlice(input string, sep string) []string { - input = strings.TrimSpace(input) - if input == "" { - return []string{} - } - - // Try to base64 decode the input. If successful, consider the decoded - // value as input. - inputBytes, err := base64.StdEncoding.DecodeString(input) - if err == nil { - input = string(inputBytes) - } - - ret := []string{} - - // Try to JSON unmarshal the input. If successful, consider that the - // metadata was supplied as JSON input. - err = json.Unmarshal([]byte(input), &ret) - if err != nil { - // If JSON unmarshaling fails, consider that the input was - // supplied as a separated string of values. - return ParseStringSlice(input, sep) - } - - if ret == nil { - return []string{} - } - - return ret -} - -// TrimStrings takes a slice of strings and returns a slice of strings -// with trimmed spaces -func TrimStrings(items []string) []string { - ret := make([]string, len(items)) - for i, item := range items { - ret[i] = strings.TrimSpace(item) - } - return ret -} - -// RemoveDuplicates removes duplicate and empty elements from a slice of -// strings. This also may convert the items in the slice to lower case and -// returns a sorted slice. -func RemoveDuplicates(items []string, lowercase bool) []string { - itemsMap := make(map[string]struct{}, len(items)) - for _, item := range items { - item = strings.TrimSpace(item) - if item == "" { - continue - } - if lowercase { - item = strings.ToLower(item) - } - itemsMap[item] = struct{}{} - } - items = make([]string, 0, len(itemsMap)) - for item := range itemsMap { - items = append(items, item) - } - sort.Strings(items) - return items -} - -// RemoveDuplicatesStable removes duplicate and empty elements from a slice of -// strings, preserving order (and case) of the original slice. -// In all cases, strings are compared after trimming whitespace -// If caseInsensitive, strings will be compared after ToLower() -func RemoveDuplicatesStable(items []string, caseInsensitive bool) []string { - itemsMap := make(map[string]struct{}, len(items)) - deduplicated := make([]string, 0, len(items)) - - for _, item := range items { - key := strings.TrimSpace(item) - if _, ok := itemsMap[key]; ok || key == "" { - continue - } - if caseInsensitive { - key = strings.ToLower(key) - } - if _, ok := itemsMap[key]; ok { - continue - } - itemsMap[key] = struct{}{} - deduplicated = append(deduplicated, item) - } - return deduplicated -} - -// RemoveEmpty removes empty elements from a slice of -// strings -func RemoveEmpty(items []string) []string { - if len(items) == 0 { - return items - } - itemsSlice := make([]string, 0, len(items)) - for _, item := range items { - if item == "" { - continue - } - itemsSlice = append(itemsSlice, item) - } - return itemsSlice -} - -// EquivalentSlices checks whether the given string sets are equivalent, as in, -// they contain the same values. -func EquivalentSlices(a, b []string) bool { - if a == nil && b == nil { - return true - } - - if a == nil || b == nil { - return false - } - - // First we'll build maps to ensure unique values - mapA := make(map[string]struct{}, len(a)) - mapB := make(map[string]struct{}, len(b)) - for _, keyA := range a { - mapA[keyA] = struct{}{} - } - for _, keyB := range b { - mapB[keyB] = struct{}{} - } - - // Now we'll build our checking slices - sortedA := make([]string, 0, len(mapA)) - sortedB := make([]string, 0, len(mapB)) - for keyA := range mapA { - sortedA = append(sortedA, keyA) - } - for keyB := range mapB { - sortedB = append(sortedB, keyB) - } - sort.Strings(sortedA) - sort.Strings(sortedB) - - // Finally, compare - if len(sortedA) != len(sortedB) { - return false - } - - for i := range sortedA { - if sortedA[i] != sortedB[i] { - return false - } - } - - return true -} - -// EqualStringMaps tests whether two map[string]string objects are equal. -// Equal means both maps have the same sets of keys and values. This function -// is 6-10x faster than a call to reflect.DeepEqual(). -func EqualStringMaps(a, b map[string]string) bool { - if len(a) != len(b) { - return false - } - - for k := range a { - v, ok := b[k] - if !ok || a[k] != v { - return false - } - } - - return true -} - -// StrListDelete removes the first occurrence of the given item from the slice -// of strings if the item exists. -func StrListDelete(s []string, d string) []string { - if s == nil { - return s - } - - for index, element := range s { - if element == d { - return append(s[:index], s[index+1:]...) - } - } - - return s -} - -// GlobbedStringsMatch compares item to val with support for a leading and/or -// trailing wildcard '*' in item. -func GlobbedStringsMatch(item, val string) bool { - if len(item) < 2 { - return val == item - } - - hasPrefix := strings.HasPrefix(item, "*") - hasSuffix := strings.HasSuffix(item, "*") - - if hasPrefix && hasSuffix { - return strings.Contains(val, item[1:len(item)-1]) - } else if hasPrefix { - return strings.HasSuffix(val, item[1:]) - } else if hasSuffix { - return strings.HasPrefix(val, item[:len(item)-1]) - } - - return val == item -} - -// AppendIfMissing adds a string to a slice if the given string is not present -func AppendIfMissing(slice []string, i string) []string { - if StrListContains(slice, i) { - return slice - } - return append(slice, i) -} - -// MergeSlices adds an arbitrary number of slices together, uniquely -func MergeSlices(args ...[]string) []string { - all := map[string]struct{}{} - for _, slice := range args { - for _, v := range slice { - all[v] = struct{}{} - } - } - - result := make([]string, 0, len(all)) - for k := range all { - result = append(result, k) - } - sort.Strings(result) - return result -} - -// Difference returns the set difference (A - B) of the two given slices. The -// result will also remove any duplicated values in set A regardless of whether -// that matches any values in set B. -func Difference(a, b []string, lowercase bool) []string { - if len(a) == 0 { - return a - } - if len(b) == 0 { - if !lowercase { - return a - } - newA := make([]string, len(a)) - for i, v := range a { - newA[i] = strings.ToLower(v) - } - return newA - } - - a = RemoveDuplicates(a, lowercase) - b = RemoveDuplicates(b, lowercase) - - itemsMap := map[string]struct{}{} - for _, aVal := range a { - itemsMap[aVal] = struct{}{} - } - - // Perform difference calculation - for _, bVal := range b { - if _, ok := itemsMap[bVal]; ok { - delete(itemsMap, bVal) - } - } - - items := []string{} - for item := range itemsMap { - items = append(items, item) - } - sort.Strings(items) - return items -} - -// GetString attempts to retrieve a value from the provided map and assert that it is a string. If the key does not -// exist in the map, this will return an empty string. If the key exists, but the value is not a string type, this will -// return an error. If no map or key is provied, this will return an error -func GetString(m map[string]interface{}, key string) (string, error) { - if m == nil { - return "", fmt.Errorf("missing map") - } - if key == "" { - return "", fmt.Errorf("missing key") - } - - rawVal, ok := m[key] - if !ok { - return "", nil - } - - str, ok := rawVal.(string) - if !ok { - return "", fmt.Errorf("invalid value at %s: is a %T", key, rawVal) - } - return str, nil -} - -// Printable returns true if all characters in the string are printable -// according to Unicode -func Printable(s string) bool { - return strings.IndexFunc(s, func(c rune) bool { - return !unicode.IsPrint(c) - }) == -1 -} - -// StringListToInterfaceList simply takes a []string and turns it into a -// []interface{} to satisfy the input requirements for other library functions -func StringListToInterfaceList(in []string) []interface{} { - ret := make([]interface{}, len(in)) - for i, v := range in { - ret[i] = v - } - return ret -} - -// Reverse reverses the input string -func Reverse(in string) string { - l := len(in) - out := make([]byte, l) - for i := 0; i <= l/2; i++ { - out[i], out[l-1-i] = in[l-1-i], in[i] - } - return string(out) -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/.gitignore b/vendor/github.com/hashicorp/go-sockaddr/.gitignore deleted file mode 100644 index 41720b86..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -.cover.out* -coverage.html diff --git a/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile deleted file mode 100644 index 0f3ae166..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile +++ /dev/null @@ -1,65 +0,0 @@ -TOOLS= golang.org/x/tools/cover -GOCOVER_TMPFILE?= $(GOCOVER_FILE).tmp -GOCOVER_FILE?= .cover.out -GOCOVERHTML?= coverage.html -FIND=`/usr/bin/which 2> /dev/null gfind find | /usr/bin/grep -v ^no | /usr/bin/head -n 1` -XARGS=`/usr/bin/which 2> /dev/null gxargs xargs | /usr/bin/grep -v ^no | /usr/bin/head -n 1` - -test:: $(GOCOVER_FILE) - @$(MAKE) -C cmd/sockaddr test - -cover:: coverage_report - -$(GOCOVER_FILE):: - @${FIND} . -type d ! -path '*cmd*' ! -path '*.git*' -print0 | ${XARGS} -0 -I % sh -ec "cd % && rm -f $(GOCOVER_TMPFILE) && go test -coverprofile=$(GOCOVER_TMPFILE)" - - @echo 'mode: set' > $(GOCOVER_FILE) - @${FIND} . -type f ! -path '*cmd*' ! -path '*.git*' -name "$(GOCOVER_TMPFILE)" -print0 | ${XARGS} -0 -n1 cat $(GOCOVER_TMPFILE) | grep -v '^mode: ' >> ${PWD}/$(GOCOVER_FILE) - -$(GOCOVERHTML): $(GOCOVER_FILE) - go tool cover -html=$(GOCOVER_FILE) -o $(GOCOVERHTML) - -coverage_report:: $(GOCOVER_FILE) - go tool cover -html=$(GOCOVER_FILE) - -audit_tools:: - @go get -u github.com/golang/lint/golint && echo "Installed golint:" - @go get -u github.com/fzipp/gocyclo && echo "Installed gocyclo:" - @go get -u github.com/remyoudompheng/go-misc/deadcode && echo "Installed deadcode:" - @go get -u github.com/client9/misspell/cmd/misspell && echo "Installed misspell:" - @go get -u github.com/gordonklaus/ineffassign && echo "Installed ineffassign:" - -audit:: - deadcode - go tool vet -all *.go - go tool vet -shadow=true *.go - golint *.go - ineffassign . - gocyclo -over 65 *.go - misspell *.go - -clean:: - rm -f $(GOCOVER_FILE) $(GOCOVERHTML) - -dev:: - @go build - @$(MAKE) -B -C cmd/sockaddr sockaddr - -install:: - @go install - @$(MAKE) -C cmd/sockaddr install - -doc:: - @echo Visit: http://127.0.0.1:6161/pkg/github.com/hashicorp/go-sockaddr/ - godoc -http=:6161 -goroot $GOROOT - -world:: - @set -e; \ - for os in solaris darwin freebsd linux windows android; do \ - for arch in amd64; do \ - printf "Building on %s-%s\n" "$${os}" "$${arch}" ; \ - env GOOS="$${os}" GOARCH="$${arch}" go build -o /dev/null; \ - done; \ - done - - $(MAKE) -C cmd/sockaddr world diff --git a/vendor/github.com/hashicorp/go-sockaddr/LICENSE b/vendor/github.com/hashicorp/go-sockaddr/LICENSE deleted file mode 100644 index a612ad98..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-sockaddr/README.md b/vendor/github.com/hashicorp/go-sockaddr/README.md deleted file mode 100644 index a2e170ae..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/README.md +++ /dev/null @@ -1,118 +0,0 @@ -# go-sockaddr - -## `sockaddr` Library - -Socket address convenience functions for Go. `go-sockaddr` is a convenience -library that makes doing the right thing with IP addresses easy. `go-sockaddr` -is loosely modeled after the UNIX `sockaddr_t` and creates a union of the family -of `sockaddr_t` types (see below for an ascii diagram). Library documentation -is available -at -[https://godoc.org/github.com/hashicorp/go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr). -The primary intent of the library was to make it possible to define heuristics -for selecting the correct IP addresses when a configuration is evaluated at -runtime. See -the -[docs](https://godoc.org/github.com/hashicorp/go-sockaddr), -[`template` package](https://godoc.org/github.com/hashicorp/go-sockaddr/template), -tests, -and -[CLI utility](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr) -for details and hints as to how to use this library. - -For example, with this library it is possible to find an IP address that: - -* is attached to a default route - ([`GetDefaultInterfaces()`](https://godoc.org/github.com/hashicorp/go-sockaddr#GetDefaultInterfaces)) -* is contained within a CIDR block ([`IfByNetwork()`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByNetwork)) -* is an RFC1918 address - ([`IfByRFC("1918")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC)) -* is ordered - ([`OrderedIfAddrBy(args)`](https://godoc.org/github.com/hashicorp/go-sockaddr#OrderedIfAddrBy) where - `args` includes, but is not limited - to, - [`AscIfType`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscIfType), - [`AscNetworkSize`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscNetworkSize)) -* excludes all IPv6 addresses - ([`IfByType("^(IPv4)$")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByType)) -* is larger than a `/32` - ([`IfByMaskSize(32)`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByMaskSize)) -* is not on a `down` interface - ([`ExcludeIfs("flags", "down")`](https://godoc.org/github.com/hashicorp/go-sockaddr#ExcludeIfs)) -* preferences an IPv6 address over an IPv4 address - ([`SortIfByType()`](https://godoc.org/github.com/hashicorp/go-sockaddr#SortIfByType) + - [`ReverseIfAddrs()`](https://godoc.org/github.com/hashicorp/go-sockaddr#ReverseIfAddrs)); and -* excludes any IP in RFC6890 address - ([`IfByRFC("6890")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC)) - -Or any combination or variation therein. - -There are also a few simple helper functions such as `GetPublicIP` and -`GetPrivateIP` which both return strings and select the first public or private -IP address on the default interface, respectively. Similarly, there is also a -helper function called `GetInterfaceIP` which returns the first usable IP -address on the named interface. - -## `sockaddr` CLI - -Given the possible complexity of the `sockaddr` library, there is a CLI utility -that accompanies the library, also -called -[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr). -The -[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr) -utility exposes nearly all of the functionality of the library and can be used -either as an administrative tool or testing tool. To install -the -[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr), -run: - -```text -$ go get -u github.com/hashicorp/go-sockaddr/cmd/sockaddr -``` - -If you're familiar with UNIX's `sockaddr` struct's, the following diagram -mapping the C `sockaddr` (top) to `go-sockaddr` structs (bottom) and -interfaces will be helpful: - -``` -+-------------------------------------------------------+ -| | -| sockaddr | -| SockAddr | -| | -| +--------------+ +----------------------------------+ | -| | sockaddr_un | | | | -| | SockAddrUnix | | sockaddr_in{,6} | | -| +--------------+ | IPAddr | | -| | | | -| | +-------------+ +--------------+ | | -| | | sockaddr_in | | sockaddr_in6 | | | -| | | IPv4Addr | | IPv6Addr | | | -| | +-------------+ +--------------+ | | -| | | | -| +----------------------------------+ | -| | -+-------------------------------------------------------+ -``` - -## Inspiration and Design - -There were many subtle inspirations that led to this design, but the most direct -inspiration for the filtering syntax was -OpenBSD's -[`pf.conf(5)`](https://www.freebsd.org/cgi/man.cgi?query=pf.conf&apropos=0&sektion=0&arch=default&format=html#PARAMETERS) firewall -syntax that lets you select the first IP address on a given named interface. -The original problem stemmed from: - -* needing to create immutable images using [Packer](https://www.packer.io) that - ran the [Consul](https://www.consul.io) process (Consul can only use one IP - address at a time); -* images that may or may not have multiple interfaces or IP addresses at - runtime; and -* we didn't want to rely on configuration management to render out the correct - IP address if the VM image was being used in an auto-scaling group. - -Instead we needed some way to codify a heuristic that would correctly select the -right IP address but the input parameters were not known when the image was -created. diff --git a/vendor/github.com/hashicorp/go-sockaddr/doc.go b/vendor/github.com/hashicorp/go-sockaddr/doc.go deleted file mode 100644 index 90671deb..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -/* -Package sockaddr is a Go implementation of the UNIX socket family data types and -related helper functions. -*/ -package sockaddr diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go deleted file mode 100644 index 0811b275..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go +++ /dev/null @@ -1,254 +0,0 @@ -package sockaddr - -import "strings" - -// ifAddrAttrMap is a map of the IfAddr type-specific attributes. -var ifAddrAttrMap map[AttrName]func(IfAddr) string -var ifAddrAttrs []AttrName - -func init() { - ifAddrAttrInit() -} - -// GetPrivateIP returns a string with a single IP address that is part of RFC -// 6890 and has a default route. If the system can't determine its IP address -// or find an RFC 6890 IP address, an empty string will be returned instead. -// This function is the `eval` equivalent of: -// -// ``` -// $ sockaddr eval -r '{{GetPrivateInterfaces | attr "address"}}' -/// ``` -func GetPrivateIP() (string, error) { - privateIfs, err := GetPrivateInterfaces() - if err != nil { - return "", err - } - if len(privateIfs) < 1 { - return "", nil - } - - ifAddr := privateIfs[0] - ip := *ToIPAddr(ifAddr.SockAddr) - return ip.NetIP().String(), nil -} - -// GetPrivateIPs returns a string with all IP addresses that are part of RFC -// 6890 (regardless of whether or not there is a default route, unlike -// GetPublicIP). If the system can't find any RFC 6890 IP addresses, an empty -// string will be returned instead. This function is the `eval` equivalent of: -// -// ``` -// $ sockaddr eval -r '{{GetAllInterfaces | include "RFC" "6890" | join "address" " "}}' -/// ``` -func GetPrivateIPs() (string, error) { - ifAddrs, err := GetAllInterfaces() - if err != nil { - return "", err - } else if len(ifAddrs) < 1 { - return "", nil - } - - ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) - if len(ifAddrs) == 0 { - return "", nil - } - - OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) - - ifAddrs, _, err = IfByRFC("6890", ifAddrs) - if err != nil { - return "", err - } else if len(ifAddrs) == 0 { - return "", nil - } - - _, ifAddrs, err = IfByRFC(ForwardingBlacklistRFC, ifAddrs) - if err != nil { - return "", err - } else if len(ifAddrs) == 0 { - return "", nil - } - - ips := make([]string, 0, len(ifAddrs)) - for _, ifAddr := range ifAddrs { - ip := *ToIPAddr(ifAddr.SockAddr) - s := ip.NetIP().String() - ips = append(ips, s) - } - - return strings.Join(ips, " "), nil -} - -// GetPublicIP returns a string with a single IP address that is NOT part of RFC -// 6890 and has a default route. If the system can't determine its IP address -// or find a non RFC 6890 IP address, an empty string will be returned instead. -// This function is the `eval` equivalent of: -// -// ``` -// $ sockaddr eval -r '{{GetPublicInterfaces | attr "address"}}' -/// ``` -func GetPublicIP() (string, error) { - publicIfs, err := GetPublicInterfaces() - if err != nil { - return "", err - } else if len(publicIfs) < 1 { - return "", nil - } - - ifAddr := publicIfs[0] - ip := *ToIPAddr(ifAddr.SockAddr) - return ip.NetIP().String(), nil -} - -// GetPublicIPs returns a string with all IP addresses that are NOT part of RFC -// 6890 (regardless of whether or not there is a default route, unlike -// GetPublicIP). If the system can't find any non RFC 6890 IP addresses, an -// empty string will be returned instead. This function is the `eval` -// equivalent of: -// -// ``` -// $ sockaddr eval -r '{{GetAllInterfaces | exclude "RFC" "6890" | join "address" " "}}' -/// ``` -func GetPublicIPs() (string, error) { - ifAddrs, err := GetAllInterfaces() - if err != nil { - return "", err - } else if len(ifAddrs) < 1 { - return "", nil - } - - ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) - if len(ifAddrs) == 0 { - return "", nil - } - - OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) - - _, ifAddrs, err = IfByRFC("6890", ifAddrs) - if err != nil { - return "", err - } else if len(ifAddrs) == 0 { - return "", nil - } - - ips := make([]string, 0, len(ifAddrs)) - for _, ifAddr := range ifAddrs { - ip := *ToIPAddr(ifAddr.SockAddr) - s := ip.NetIP().String() - ips = append(ips, s) - } - - return strings.Join(ips, " "), nil -} - -// GetInterfaceIP returns a string with a single IP address sorted by the size -// of the network (i.e. IP addresses with a smaller netmask, larger network -// size, are sorted first). This function is the `eval` equivalent of: -// -// ``` -// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | include "flag" "forwardable" | attr "address" }}' -/// ``` -func GetInterfaceIP(namedIfRE string) (string, error) { - ifAddrs, err := GetAllInterfaces() - if err != nil { - return "", err - } - - ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) - if err != nil { - return "", err - } - - ifAddrs, _, err = IfByFlag("forwardable", ifAddrs) - if err != nil { - return "", err - } - - ifAddrs, err = SortIfBy("+type,+size", ifAddrs) - if err != nil { - return "", err - } - - if len(ifAddrs) == 0 { - return "", err - } - - ip := ToIPAddr(ifAddrs[0].SockAddr) - if ip == nil { - return "", err - } - - return IPAddrAttr(*ip, "address"), nil -} - -// GetInterfaceIPs returns a string with all IPs, sorted by the size of the -// network (i.e. IP addresses with a smaller netmask, larger network size, are -// sorted first), on a named interface. This function is the `eval` equivalent -// of: -// -// ``` -// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | join "address" " "}}' -/// ``` -func GetInterfaceIPs(namedIfRE string) (string, error) { - ifAddrs, err := GetAllInterfaces() - if err != nil { - return "", err - } - - ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) - if err != nil { - return "", err - } - - ifAddrs, err = SortIfBy("+type,+size", ifAddrs) - if err != nil { - return "", err - } - - if len(ifAddrs) == 0 { - return "", err - } - - ips := make([]string, 0, len(ifAddrs)) - for _, ifAddr := range ifAddrs { - ip := *ToIPAddr(ifAddr.SockAddr) - s := ip.NetIP().String() - ips = append(ips, s) - } - - return strings.Join(ips, " "), nil -} - -// IfAddrAttrs returns a list of attributes supported by the IfAddr type -func IfAddrAttrs() []AttrName { - return ifAddrAttrs -} - -// IfAddrAttr returns a string representation of an attribute for the given -// IfAddr. -func IfAddrAttr(ifAddr IfAddr, attrName AttrName) string { - fn, found := ifAddrAttrMap[attrName] - if !found { - return "" - } - - return fn(ifAddr) -} - -// ifAddrAttrInit is called once at init() -func ifAddrAttrInit() { - // Sorted for human readability - ifAddrAttrs = []AttrName{ - "flags", - "name", - } - - ifAddrAttrMap = map[AttrName]func(ifAddr IfAddr) string{ - "flags": func(ifAddr IfAddr) string { - return ifAddr.Interface.Flags.String() - }, - "name": func(ifAddr IfAddr) string { - return ifAddr.Interface.Name - }, - } -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go deleted file mode 100644 index 80f61bef..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go +++ /dev/null @@ -1,1304 +0,0 @@ -package sockaddr - -import ( - "encoding/binary" - "errors" - "fmt" - "math/big" - "net" - "regexp" - "sort" - "strconv" - "strings" -) - -var ( - // Centralize all regexps and regexp.Copy() where necessary. - signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`) - whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`) - ifNameRE *regexp.Regexp = regexp.MustCompile(`^(?:Ethernet|Wireless LAN) adapter ([^:]+):`) - ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`) -) - -// IfAddrs is a slice of IfAddr -type IfAddrs []IfAddr - -func (ifs IfAddrs) Len() int { return len(ifs) } - -// CmpIfFunc is the function signature that must be met to be used in the -// OrderedIfAddrBy multiIfAddrSorter -type CmpIfAddrFunc func(p1, p2 *IfAddr) int - -// multiIfAddrSorter implements the Sort interface, sorting the IfAddrs within. -type multiIfAddrSorter struct { - ifAddrs IfAddrs - cmp []CmpIfAddrFunc -} - -// Sort sorts the argument slice according to the Cmp functions passed to -// OrderedIfAddrBy. -func (ms *multiIfAddrSorter) Sort(ifAddrs IfAddrs) { - ms.ifAddrs = ifAddrs - sort.Sort(ms) -} - -// OrderedIfAddrBy sorts SockAddr by the list of sort function pointers. -func OrderedIfAddrBy(cmpFuncs ...CmpIfAddrFunc) *multiIfAddrSorter { - return &multiIfAddrSorter{ - cmp: cmpFuncs, - } -} - -// Len is part of sort.Interface. -func (ms *multiIfAddrSorter) Len() int { - return len(ms.ifAddrs) -} - -// Less is part of sort.Interface. It is implemented by looping along the Cmp() -// functions until it finds a comparison that is either less than or greater -// than. A return value of 0 defers sorting to the next function in the -// multisorter (which means the results of sorting may leave the resutls in a -// non-deterministic order). -func (ms *multiIfAddrSorter) Less(i, j int) bool { - p, q := &ms.ifAddrs[i], &ms.ifAddrs[j] - // Try all but the last comparison. - var k int - for k = 0; k < len(ms.cmp)-1; k++ { - cmp := ms.cmp[k] - x := cmp(p, q) - switch x { - case -1: - // p < q, so we have a decision. - return true - case 1: - // p > q, so we have a decision. - return false - } - // p == q; try the next comparison. - } - // All comparisons to here said "equal", so just return whatever the - // final comparison reports. - switch ms.cmp[k](p, q) { - case -1: - return true - case 1: - return false - default: - // Still a tie! Now what? - return false - panic("undefined sort order for remaining items in the list") - } -} - -// Swap is part of sort.Interface. -func (ms *multiIfAddrSorter) Swap(i, j int) { - ms.ifAddrs[i], ms.ifAddrs[j] = ms.ifAddrs[j], ms.ifAddrs[i] -} - -// AscIfAddress is a sorting function to sort IfAddrs by their respective -// address type. Non-equal types are deferred in the sort. -func AscIfAddress(p1Ptr, p2Ptr *IfAddr) int { - return AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// AscIfDefault is a sorting function to sort IfAddrs by whether or not they -// have a default route or not. Non-equal types are deferred in the sort. -// -// FIXME: This is a particularly expensive sorting operation because of the -// non-memoized calls to NewRouteInfo(). In an ideal world the routeInfo data -// once at the start of the sort and pass it along as a context or by wrapping -// the IfAddr type with this information (this would also solve the inability to -// return errors and the possibility of failing silently). Fortunately, -// N*log(N) where N = 3 is only ~6.2 invocations. Not ideal, but not worth -// optimizing today. The common case is this gets called once or twice. -// Patches welcome. -func AscIfDefault(p1Ptr, p2Ptr *IfAddr) int { - ri, err := NewRouteInfo() - if err != nil { - return sortDeferDecision - } - - defaultIfName, err := ri.GetDefaultInterfaceName() - if err != nil { - return sortDeferDecision - } - - switch { - case p1Ptr.Interface.Name == defaultIfName && p2Ptr.Interface.Name == defaultIfName: - return sortDeferDecision - case p1Ptr.Interface.Name == defaultIfName: - return sortReceiverBeforeArg - case p2Ptr.Interface.Name == defaultIfName: - return sortArgBeforeReceiver - default: - return sortDeferDecision - } -} - -// AscIfName is a sorting function to sort IfAddrs by their interface names. -func AscIfName(p1Ptr, p2Ptr *IfAddr) int { - return strings.Compare(p1Ptr.Name, p2Ptr.Name) -} - -// AscIfNetworkSize is a sorting function to sort IfAddrs by their respective -// network mask size. -func AscIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { - return AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// AscIfPort is a sorting function to sort IfAddrs by their respective -// port type. Non-equal types are deferred in the sort. -func AscIfPort(p1Ptr, p2Ptr *IfAddr) int { - return AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// AscIfPrivate is a sorting function to sort IfAddrs by "private" values before -// "public" values. Both IPv4 and IPv6 are compared against RFC6890 (RFC6890 -// includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and IPv6 -// includes RFC4193). -func AscIfPrivate(p1Ptr, p2Ptr *IfAddr) int { - return AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// AscIfType is a sorting function to sort IfAddrs by their respective address -// type. Non-equal types are deferred in the sort. -func AscIfType(p1Ptr, p2Ptr *IfAddr) int { - return AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// DescIfAddress is identical to AscIfAddress but reverse ordered. -func DescIfAddress(p1Ptr, p2Ptr *IfAddr) int { - return -1 * AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// DescIfDefault is identical to AscIfDefault but reverse ordered. -func DescIfDefault(p1Ptr, p2Ptr *IfAddr) int { - return -1 * AscIfDefault(p1Ptr, p2Ptr) -} - -// DescIfName is identical to AscIfName but reverse ordered. -func DescIfName(p1Ptr, p2Ptr *IfAddr) int { - return -1 * strings.Compare(p1Ptr.Name, p2Ptr.Name) -} - -// DescIfNetworkSize is identical to AscIfNetworkSize but reverse ordered. -func DescIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { - return -1 * AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// DescIfPort is identical to AscIfPort but reverse ordered. -func DescIfPort(p1Ptr, p2Ptr *IfAddr) int { - return -1 * AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// DescIfPrivate is identical to AscIfPrivate but reverse ordered. -func DescIfPrivate(p1Ptr, p2Ptr *IfAddr) int { - return -1 * AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// DescIfType is identical to AscIfType but reverse ordered. -func DescIfType(p1Ptr, p2Ptr *IfAddr) int { - return -1 * AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) -} - -// FilterIfByType filters IfAddrs and returns a list of the matching type -func FilterIfByType(ifAddrs IfAddrs, type_ SockAddrType) (matchedIfs, excludedIfs IfAddrs) { - excludedIfs = make(IfAddrs, 0, len(ifAddrs)) - matchedIfs = make(IfAddrs, 0, len(ifAddrs)) - - for _, ifAddr := range ifAddrs { - if ifAddr.SockAddr.Type()&type_ != 0 { - matchedIfs = append(matchedIfs, ifAddr) - } else { - excludedIfs = append(excludedIfs, ifAddr) - } - } - return matchedIfs, excludedIfs -} - -// IfAttr forwards the selector to IfAttr.Attr() for resolution. If there is -// more than one IfAddr, only the first IfAddr is used. -func IfAttr(selectorName string, ifAddr IfAddr) (string, error) { - attrName := AttrName(strings.ToLower(selectorName)) - attrVal, err := ifAddr.Attr(attrName) - return attrVal, err -} - -// IfAttrs forwards the selector to IfAttrs.Attr() for resolution. If there is -// more than one IfAddr, only the first IfAddr is used. -func IfAttrs(selectorName string, ifAddrs IfAddrs) (string, error) { - if len(ifAddrs) == 0 { - return "", nil - } - - attrName := AttrName(strings.ToLower(selectorName)) - attrVal, err := ifAddrs[0].Attr(attrName) - return attrVal, err -} - -// GetAllInterfaces iterates over all available network interfaces and finds all -// available IP addresses on each interface and converts them to -// sockaddr.IPAddrs, and returning the result as an array of IfAddr. -func GetAllInterfaces() (IfAddrs, error) { - ifs, err := net.Interfaces() - if err != nil { - return nil, err - } - - ifAddrs := make(IfAddrs, 0, len(ifs)) - for _, intf := range ifs { - addrs, err := intf.Addrs() - if err != nil { - return nil, err - } - - for _, addr := range addrs { - var ipAddr IPAddr - ipAddr, err = NewIPAddr(addr.String()) - if err != nil { - return IfAddrs{}, fmt.Errorf("unable to create an IP address from %q", addr.String()) - } - - ifAddr := IfAddr{ - SockAddr: ipAddr, - Interface: intf, - } - ifAddrs = append(ifAddrs, ifAddr) - } - } - - return ifAddrs, nil -} - -// GetDefaultInterfaces returns IfAddrs of the addresses attached to the default -// route. -func GetDefaultInterfaces() (IfAddrs, error) { - ri, err := NewRouteInfo() - if err != nil { - return nil, err - } - - defaultIfName, err := ri.GetDefaultInterfaceName() - if err != nil { - return nil, err - } - - var defaultIfs, ifAddrs IfAddrs - ifAddrs, err = GetAllInterfaces() - for _, ifAddr := range ifAddrs { - if ifAddr.Name == defaultIfName { - defaultIfs = append(defaultIfs, ifAddr) - } - } - - return defaultIfs, nil -} - -// GetPrivateInterfaces returns an IfAddrs that are part of RFC 6890 and have a -// default route. If the system can't determine its IP address or find an RFC -// 6890 IP address, an empty IfAddrs will be returned instead. This function is -// the `eval` equivalent of: -// -// ``` -// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | include "RFC" "6890" }}' -/// ``` -func GetPrivateInterfaces() (IfAddrs, error) { - privateIfs, err := GetAllInterfaces() - if err != nil { - return IfAddrs{}, err - } - if len(privateIfs) == 0 { - return IfAddrs{}, nil - } - - privateIfs, _ = FilterIfByType(privateIfs, TypeIP) - if len(privateIfs) == 0 { - return IfAddrs{}, nil - } - - privateIfs, _, err = IfByFlag("forwardable", privateIfs) - if err != nil { - return IfAddrs{}, err - } - - privateIfs, _, err = IfByFlag("up", privateIfs) - if err != nil { - return IfAddrs{}, err - } - - if len(privateIfs) == 0 { - return IfAddrs{}, nil - } - - OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(privateIfs) - - privateIfs, _, err = IfByRFC("6890", privateIfs) - if err != nil { - return IfAddrs{}, err - } else if len(privateIfs) == 0 { - return IfAddrs{}, nil - } - - return privateIfs, nil -} - -// GetPublicInterfaces returns an IfAddrs that are NOT part of RFC 6890 and has a -// default route. If the system can't determine its IP address or find a non -// RFC 6890 IP address, an empty IfAddrs will be returned instead. This -// function is the `eval` equivalent of: -// -// ``` -// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | exclude "RFC" "6890" }}' -/// ``` -func GetPublicInterfaces() (IfAddrs, error) { - publicIfs, err := GetAllInterfaces() - if err != nil { - return IfAddrs{}, err - } - if len(publicIfs) == 0 { - return IfAddrs{}, nil - } - - publicIfs, _ = FilterIfByType(publicIfs, TypeIP) - if len(publicIfs) == 0 { - return IfAddrs{}, nil - } - - publicIfs, _, err = IfByFlag("forwardable", publicIfs) - if err != nil { - return IfAddrs{}, err - } - - publicIfs, _, err = IfByFlag("up", publicIfs) - if err != nil { - return IfAddrs{}, err - } - - if len(publicIfs) == 0 { - return IfAddrs{}, nil - } - - OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(publicIfs) - - _, publicIfs, err = IfByRFC("6890", publicIfs) - if err != nil { - return IfAddrs{}, err - } else if len(publicIfs) == 0 { - return IfAddrs{}, nil - } - - return publicIfs, nil -} - -// IfByAddress returns a list of matched and non-matched IfAddrs, or an error if -// the regexp fails to compile. -func IfByAddress(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { - re, err := regexp.Compile(inputRe) - if err != nil { - return nil, nil, fmt.Errorf("Unable to compile address regexp %+q: %v", inputRe, err) - } - - matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) - excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) - for _, addr := range ifAddrs { - if re.MatchString(addr.SockAddr.String()) { - matchedAddrs = append(matchedAddrs, addr) - } else { - excludedAddrs = append(excludedAddrs, addr) - } - } - - return matchedAddrs, excludedAddrs, nil -} - -// IfByName returns a list of matched and non-matched IfAddrs, or an error if -// the regexp fails to compile. -func IfByName(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { - re, err := regexp.Compile(inputRe) - if err != nil { - return nil, nil, fmt.Errorf("Unable to compile name regexp %+q: %v", inputRe, err) - } - - matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) - excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) - for _, addr := range ifAddrs { - if re.MatchString(addr.Name) { - matchedAddrs = append(matchedAddrs, addr) - } else { - excludedAddrs = append(excludedAddrs, addr) - } - } - - return matchedAddrs, excludedAddrs, nil -} - -// IfByPort returns a list of matched and non-matched IfAddrs, or an error if -// the regexp fails to compile. -func IfByPort(inputRe string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { - re, err := regexp.Compile(inputRe) - if err != nil { - return nil, nil, fmt.Errorf("Unable to compile port regexp %+q: %v", inputRe, err) - } - - ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) - matchedIfs = make(IfAddrs, 0, len(ipIfs)) - excludedIfs = append(IfAddrs(nil), nonIfs...) - for _, addr := range ipIfs { - ipAddr := ToIPAddr(addr.SockAddr) - if ipAddr == nil { - continue - } - - port := strconv.FormatInt(int64((*ipAddr).IPPort()), 10) - if re.MatchString(port) { - matchedIfs = append(matchedIfs, addr) - } else { - excludedIfs = append(excludedIfs, addr) - } - } - - return matchedIfs, excludedIfs, nil -} - -// IfByRFC returns a list of matched and non-matched IfAddrs that contain the -// relevant RFC-specified traits. -func IfByRFC(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { - inputRFC, err := strconv.ParseUint(selectorParam, 10, 64) - if err != nil { - return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to parse RFC number %q: %v", selectorParam, err) - } - - matchedIfAddrs := make(IfAddrs, 0, len(ifAddrs)) - remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) - - rfcNetMap := KnownRFCs() - rfcNets, ok := rfcNetMap[uint(inputRFC)] - if !ok { - return nil, nil, fmt.Errorf("unsupported RFC %d", inputRFC) - } - - for _, ifAddr := range ifAddrs { - var contained bool - for _, rfcNet := range rfcNets { - if rfcNet.Contains(ifAddr.SockAddr) { - matchedIfAddrs = append(matchedIfAddrs, ifAddr) - contained = true - break - } - } - if !contained { - remainingIfAddrs = append(remainingIfAddrs, ifAddr) - } - } - - return matchedIfAddrs, remainingIfAddrs, nil -} - -// IfByRFCs returns a list of matched and non-matched IfAddrs that contain the -// relevant RFC-specified traits. Multiple RFCs can be specified and separated -// by the `|` symbol. No protection is taken to ensure an IfAddr does not end -// up in both the included and excluded list. -func IfByRFCs(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { - var includedIfs, excludedIfs IfAddrs - for _, rfcStr := range strings.Split(selectorParam, "|") { - includedRFCIfs, excludedRFCIfs, err := IfByRFC(rfcStr, ifAddrs) - if err != nil { - return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to lookup RFC number %q: %v", rfcStr, err) - } - includedIfs = append(includedIfs, includedRFCIfs...) - excludedIfs = append(excludedIfs, excludedRFCIfs...) - } - - return includedIfs, excludedIfs, nil -} - -// IfByMaskSize returns a list of matched and non-matched IfAddrs that have the -// matching mask size. -func IfByMaskSize(selectorParam string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { - maskSize, err := strconv.ParseUint(selectorParam, 10, 64) - if err != nil { - return IfAddrs{}, IfAddrs{}, fmt.Errorf("invalid exclude size argument (%q): %v", selectorParam, err) - } - - ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) - matchedIfs = make(IfAddrs, 0, len(ipIfs)) - excludedIfs = append(IfAddrs(nil), nonIfs...) - for _, addr := range ipIfs { - ipAddr := ToIPAddr(addr.SockAddr) - if ipAddr == nil { - return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to filter mask sizes on non-IP type %s: %v", addr.SockAddr.Type().String(), addr.SockAddr.String()) - } - - switch { - case (*ipAddr).Type()&TypeIPv4 != 0 && maskSize > 32: - return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv4 address: %d", maskSize) - case (*ipAddr).Type()&TypeIPv6 != 0 && maskSize > 128: - return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv6 address: %d", maskSize) - } - - if (*ipAddr).Maskbits() == int(maskSize) { - matchedIfs = append(matchedIfs, addr) - } else { - excludedIfs = append(excludedIfs, addr) - } - } - - return matchedIfs, excludedIfs, nil -} - -// IfByType returns a list of matching and non-matching IfAddr that match the -// specified type. For instance: -// -// include "type" "IPv4,IPv6" -// -// will include any IfAddrs that is either an IPv4 or IPv6 address. Any -// addresses on those interfaces that don't match will be included in the -// remainder results. -func IfByType(inputTypes string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { - matchingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) - remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) - - ifTypes := strings.Split(strings.ToLower(inputTypes), "|") - for _, ifType := range ifTypes { - switch ifType { - case "ip", "ipv4", "ipv6", "unix": - // Valid types - default: - return nil, nil, fmt.Errorf("unsupported type %q %q", ifType, inputTypes) - } - } - - for _, ifAddr := range ifAddrs { - for _, ifType := range ifTypes { - var matched bool - switch { - case ifType == "ip" && ifAddr.SockAddr.Type()&TypeIP != 0: - matched = true - case ifType == "ipv4" && ifAddr.SockAddr.Type()&TypeIPv4 != 0: - matched = true - case ifType == "ipv6" && ifAddr.SockAddr.Type()&TypeIPv6 != 0: - matched = true - case ifType == "unix" && ifAddr.SockAddr.Type()&TypeUnix != 0: - matched = true - } - - if matched { - matchingIfAddrs = append(matchingIfAddrs, ifAddr) - } else { - remainingIfAddrs = append(remainingIfAddrs, ifAddr) - } - } - } - - return matchingIfAddrs, remainingIfAddrs, nil -} - -// IfByFlag returns a list of matching and non-matching IfAddrs that match the -// specified type. For instance: -// -// include "flag" "up,broadcast" -// -// will include any IfAddrs that have both the "up" and "broadcast" flags set. -// Any addresses on those interfaces that don't match will be omitted from the -// results. -func IfByFlag(inputFlags string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { - matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) - excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) - - var wantForwardable, - wantGlobalUnicast, - wantInterfaceLocalMulticast, - wantLinkLocalMulticast, - wantLinkLocalUnicast, - wantLoopback, - wantMulticast, - wantUnspecified bool - var ifFlags net.Flags - var checkFlags, checkAttrs bool - for _, flagName := range strings.Split(strings.ToLower(inputFlags), "|") { - switch flagName { - case "broadcast": - checkFlags = true - ifFlags = ifFlags | net.FlagBroadcast - case "down": - checkFlags = true - ifFlags = (ifFlags &^ net.FlagUp) - case "forwardable": - checkAttrs = true - wantForwardable = true - case "global unicast": - checkAttrs = true - wantGlobalUnicast = true - case "interface-local multicast": - checkAttrs = true - wantInterfaceLocalMulticast = true - case "link-local multicast": - checkAttrs = true - wantLinkLocalMulticast = true - case "link-local unicast": - checkAttrs = true - wantLinkLocalUnicast = true - case "loopback": - checkAttrs = true - checkFlags = true - ifFlags = ifFlags | net.FlagLoopback - wantLoopback = true - case "multicast": - checkAttrs = true - checkFlags = true - ifFlags = ifFlags | net.FlagMulticast - wantMulticast = true - case "point-to-point": - checkFlags = true - ifFlags = ifFlags | net.FlagPointToPoint - case "unspecified": - checkAttrs = true - wantUnspecified = true - case "up": - checkFlags = true - ifFlags = ifFlags | net.FlagUp - default: - return nil, nil, fmt.Errorf("Unknown interface flag: %+q", flagName) - } - } - - for _, ifAddr := range ifAddrs { - var matched bool - if checkFlags && ifAddr.Interface.Flags&ifFlags == ifFlags { - matched = true - } - if checkAttrs { - if ip := ToIPAddr(ifAddr.SockAddr); ip != nil { - netIP := (*ip).NetIP() - switch { - case wantGlobalUnicast && netIP.IsGlobalUnicast(): - matched = true - case wantInterfaceLocalMulticast && netIP.IsInterfaceLocalMulticast(): - matched = true - case wantLinkLocalMulticast && netIP.IsLinkLocalMulticast(): - matched = true - case wantLinkLocalUnicast && netIP.IsLinkLocalUnicast(): - matched = true - case wantLoopback && netIP.IsLoopback(): - matched = true - case wantMulticast && netIP.IsMulticast(): - matched = true - case wantUnspecified && netIP.IsUnspecified(): - matched = true - case wantForwardable && !IsRFC(ForwardingBlacklist, ifAddr.SockAddr): - matched = true - } - } - } - if matched { - matchedAddrs = append(matchedAddrs, ifAddr) - } else { - excludedAddrs = append(excludedAddrs, ifAddr) - } - } - return matchedAddrs, excludedAddrs, nil -} - -// IfByNetwork returns an IfAddrs that are equal to or included within the -// network passed in by selector. -func IfByNetwork(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, IfAddrs, error) { - var includedIfs, excludedIfs IfAddrs - for _, netStr := range strings.Split(selectorParam, "|") { - netAddr, err := NewIPAddr(netStr) - if err != nil { - return nil, nil, fmt.Errorf("unable to create an IP address from %+q: %v", netStr, err) - } - - for _, ifAddr := range inputIfAddrs { - if netAddr.Contains(ifAddr.SockAddr) { - includedIfs = append(includedIfs, ifAddr) - } else { - excludedIfs = append(excludedIfs, ifAddr) - } - } - } - - return includedIfs, excludedIfs, nil -} - -// IfAddrMath will return a new IfAddr struct with a mutated value. -func IfAddrMath(operation, value string, inputIfAddr IfAddr) (IfAddr, error) { - // Regexp used to enforce the sign being a required part of the grammar for - // some values. - signRe := signRE.Copy() - - switch strings.ToLower(operation) { - case "address": - // "address" operates on the IP address and is allowed to overflow or - // underflow networks, however it will wrap along the underlying address's - // underlying type. - - if !signRe.MatchString(value) { - return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) - } - - switch sockType := inputIfAddr.SockAddr.Type(); sockType { - case TypeIPv4: - // 33 == Accept any uint32 value - // TODO(seanc@): Add the ability to parse hex - i, err := strconv.ParseInt(value, 10, 33) - if err != nil { - return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) - } - - ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) - ipv4Uint32 := uint32(ipv4.Address) - ipv4Uint32 += uint32(i) - return IfAddr{ - SockAddr: IPv4Addr{ - Address: IPv4Address(ipv4Uint32), - Mask: ipv4.Mask, - }, - Interface: inputIfAddr.Interface, - }, nil - case TypeIPv6: - // 64 == Accept any int32 value - // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. - i, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) - } - - ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) - ipv6BigIntA := new(big.Int) - ipv6BigIntA.Set(ipv6.Address) - ipv6BigIntB := big.NewInt(i) - - ipv6Addr := ipv6BigIntA.Add(ipv6BigIntA, ipv6BigIntB) - ipv6Addr.And(ipv6Addr, ipv6HostMask) - - return IfAddr{ - SockAddr: IPv6Addr{ - Address: IPv6Address(ipv6Addr), - Mask: ipv6.Mask, - }, - Interface: inputIfAddr.Interface, - }, nil - default: - return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) - } - case "network": - // "network" operates on the network address. Positive values start at the - // network address and negative values wrap at the network address, which - // means a "-1" value on a network will be the broadcast address after - // wrapping is applied. - - if !signRe.MatchString(value) { - return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) - } - - switch sockType := inputIfAddr.SockAddr.Type(); sockType { - case TypeIPv4: - // 33 == Accept any uint32 value - // TODO(seanc@): Add the ability to parse hex - i, err := strconv.ParseInt(value, 10, 33) - if err != nil { - return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) - } - - ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) - ipv4Uint32 := uint32(ipv4.NetworkAddress()) - - // Wrap along network mask boundaries. EZ-mode wrapping made possible by - // use of int64 vs a uint. - var wrappedMask int64 - if i >= 0 { - wrappedMask = i - } else { - wrappedMask = 1 + i + int64(^uint32(ipv4.Mask)) - } - - ipv4Uint32 = ipv4Uint32 + (uint32(wrappedMask) &^ uint32(ipv4.Mask)) - - return IfAddr{ - SockAddr: IPv4Addr{ - Address: IPv4Address(ipv4Uint32), - Mask: ipv4.Mask, - }, - Interface: inputIfAddr.Interface, - }, nil - case TypeIPv6: - // 64 == Accept any int32 value - // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. - i, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) - } - - ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) - ipv6BigInt := new(big.Int) - ipv6BigInt.Set(ipv6.NetworkAddress()) - - mask := new(big.Int) - mask.Set(ipv6.Mask) - if i > 0 { - wrappedMask := new(big.Int) - wrappedMask.SetInt64(i) - - wrappedMask.AndNot(wrappedMask, mask) - ipv6BigInt.Add(ipv6BigInt, wrappedMask) - } else { - // Mask off any bits that exceed the network size. Subtract the - // wrappedMask from the last usable - 1 - wrappedMask := new(big.Int) - wrappedMask.SetInt64(-1 * i) - wrappedMask.Sub(wrappedMask, big.NewInt(1)) - - wrappedMask.AndNot(wrappedMask, mask) - - lastUsable := new(big.Int) - lastUsable.Set(ipv6.LastUsable().(IPv6Addr).Address) - - ipv6BigInt = lastUsable.Sub(lastUsable, wrappedMask) - } - - return IfAddr{ - SockAddr: IPv6Addr{ - Address: IPv6Address(ipv6BigInt), - Mask: ipv6.Mask, - }, - Interface: inputIfAddr.Interface, - }, nil - default: - return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) - } - case "mask": - // "mask" operates on the IP address and returns the IP address on - // which the given integer mask has been applied. If the applied mask - // corresponds to a larger network than the mask of the IP address, - // the latter will be replaced by the former. - switch sockType := inputIfAddr.SockAddr.Type(); sockType { - case TypeIPv4: - i, err := strconv.ParseUint(value, 10, 32) - if err != nil { - return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) - } - - if i > 32 { - return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv4 addresses must be between 0 and 32", operation) - } - - ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) - - ipv4Mask := net.CIDRMask(int(i), 32) - ipv4MaskUint32 := binary.BigEndian.Uint32(ipv4Mask) - - maskedIpv4 := ipv4.NetIP().Mask(ipv4Mask) - maskedIpv4Uint32 := binary.BigEndian.Uint32(maskedIpv4) - - maskedIpv4MaskUint32 := uint32(ipv4.Mask) - - if ipv4MaskUint32 < maskedIpv4MaskUint32 { - maskedIpv4MaskUint32 = ipv4MaskUint32 - } - - return IfAddr{ - SockAddr: IPv4Addr{ - Address: IPv4Address(maskedIpv4Uint32), - Mask: IPv4Mask(maskedIpv4MaskUint32), - }, - Interface: inputIfAddr.Interface, - }, nil - case TypeIPv6: - i, err := strconv.ParseUint(value, 10, 32) - if err != nil { - return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) - } - - if i > 128 { - return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv6 addresses must be between 0 and 64", operation) - } - - ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) - - ipv6Mask := net.CIDRMask(int(i), 128) - ipv6MaskBigInt := new(big.Int) - ipv6MaskBigInt.SetBytes(ipv6Mask) - - maskedIpv6 := ipv6.NetIP().Mask(ipv6Mask) - maskedIpv6BigInt := new(big.Int) - maskedIpv6BigInt.SetBytes(maskedIpv6) - - maskedIpv6MaskBigInt := new(big.Int) - maskedIpv6MaskBigInt.Set(ipv6.Mask) - - if ipv6MaskBigInt.Cmp(maskedIpv6MaskBigInt) == -1 { - maskedIpv6MaskBigInt = ipv6MaskBigInt - } - - return IfAddr{ - SockAddr: IPv6Addr{ - Address: IPv6Address(maskedIpv6BigInt), - Mask: IPv6Mask(maskedIpv6MaskBigInt), - }, - Interface: inputIfAddr.Interface, - }, nil - default: - return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) - } - default: - return IfAddr{}, fmt.Errorf("unsupported math operation: %q", operation) - } -} - -// IfAddrsMath will apply an IfAddrMath operation each IfAddr struct. Any -// failure will result in zero results. -func IfAddrsMath(operation, value string, inputIfAddrs IfAddrs) (IfAddrs, error) { - outputAddrs := make(IfAddrs, 0, len(inputIfAddrs)) - for _, ifAddr := range inputIfAddrs { - result, err := IfAddrMath(operation, value, ifAddr) - if err != nil { - return IfAddrs{}, fmt.Errorf("unable to perform an IPMath operation on %s: %v", ifAddr, err) - } - outputAddrs = append(outputAddrs, result) - } - return outputAddrs, nil -} - -// IncludeIfs returns an IfAddrs based on the passed in selector. -func IncludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { - var includedIfs IfAddrs - var err error - - switch strings.ToLower(selectorName) { - case "address": - includedIfs, _, err = IfByAddress(selectorParam, inputIfAddrs) - case "flag", "flags": - includedIfs, _, err = IfByFlag(selectorParam, inputIfAddrs) - case "name": - includedIfs, _, err = IfByName(selectorParam, inputIfAddrs) - case "network": - includedIfs, _, err = IfByNetwork(selectorParam, inputIfAddrs) - case "port": - includedIfs, _, err = IfByPort(selectorParam, inputIfAddrs) - case "rfc", "rfcs": - includedIfs, _, err = IfByRFCs(selectorParam, inputIfAddrs) - case "size": - includedIfs, _, err = IfByMaskSize(selectorParam, inputIfAddrs) - case "type": - includedIfs, _, err = IfByType(selectorParam, inputIfAddrs) - default: - return IfAddrs{}, fmt.Errorf("invalid include selector %q", selectorName) - } - - if err != nil { - return IfAddrs{}, err - } - - return includedIfs, nil -} - -// ExcludeIfs returns an IfAddrs based on the passed in selector. -func ExcludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { - var excludedIfs IfAddrs - var err error - - switch strings.ToLower(selectorName) { - case "address": - _, excludedIfs, err = IfByAddress(selectorParam, inputIfAddrs) - case "flag", "flags": - _, excludedIfs, err = IfByFlag(selectorParam, inputIfAddrs) - case "name": - _, excludedIfs, err = IfByName(selectorParam, inputIfAddrs) - case "network": - _, excludedIfs, err = IfByNetwork(selectorParam, inputIfAddrs) - case "port": - _, excludedIfs, err = IfByPort(selectorParam, inputIfAddrs) - case "rfc", "rfcs": - _, excludedIfs, err = IfByRFCs(selectorParam, inputIfAddrs) - case "size": - _, excludedIfs, err = IfByMaskSize(selectorParam, inputIfAddrs) - case "type": - _, excludedIfs, err = IfByType(selectorParam, inputIfAddrs) - default: - return IfAddrs{}, fmt.Errorf("invalid exclude selector %q", selectorName) - } - - if err != nil { - return IfAddrs{}, err - } - - return excludedIfs, nil -} - -// SortIfBy returns an IfAddrs sorted based on the passed in selector. Multiple -// sort clauses can be passed in as a comma delimited list without whitespace. -func SortIfBy(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { - sortedIfs := append(IfAddrs(nil), inputIfAddrs...) - - clauses := strings.Split(selectorParam, ",") - sortFuncs := make([]CmpIfAddrFunc, len(clauses)) - - for i, clause := range clauses { - switch strings.TrimSpace(strings.ToLower(clause)) { - case "+address", "address": - // The "address" selector returns an array of IfAddrs - // ordered by the network address. IfAddrs that are not - // comparable will be at the end of the list and in a - // non-deterministic order. - sortFuncs[i] = AscIfAddress - case "-address": - sortFuncs[i] = DescIfAddress - case "+default", "default": - sortFuncs[i] = AscIfDefault - case "-default": - sortFuncs[i] = DescIfDefault - case "+name", "name": - // The "name" selector returns an array of IfAddrs - // ordered by the interface name. - sortFuncs[i] = AscIfName - case "-name": - sortFuncs[i] = DescIfName - case "+port", "port": - // The "port" selector returns an array of IfAddrs - // ordered by the port, if included in the IfAddr. - // IfAddrs that are not comparable will be at the end of - // the list and in a non-deterministic order. - sortFuncs[i] = AscIfPort - case "-port": - sortFuncs[i] = DescIfPort - case "+private", "private": - // The "private" selector returns an array of IfAddrs - // ordered by private addresses first. IfAddrs that are - // not comparable will be at the end of the list and in - // a non-deterministic order. - sortFuncs[i] = AscIfPrivate - case "-private": - sortFuncs[i] = DescIfPrivate - case "+size", "size": - // The "size" selector returns an array of IfAddrs - // ordered by the size of the network mask, smaller mask - // (larger number of hosts per network) to largest - // (e.g. a /24 sorts before a /32). - sortFuncs[i] = AscIfNetworkSize - case "-size": - sortFuncs[i] = DescIfNetworkSize - case "+type", "type": - // The "type" selector returns an array of IfAddrs - // ordered by the type of the IfAddr. The sort order is - // Unix, IPv4, then IPv6. - sortFuncs[i] = AscIfType - case "-type": - sortFuncs[i] = DescIfType - default: - // Return an empty list for invalid sort types. - return IfAddrs{}, fmt.Errorf("unknown sort type: %q", clause) - } - } - - OrderedIfAddrBy(sortFuncs...).Sort(sortedIfs) - - return sortedIfs, nil -} - -// UniqueIfAddrsBy creates a unique set of IfAddrs based on the matching -// selector. UniqueIfAddrsBy assumes the input has already been sorted. -func UniqueIfAddrsBy(selectorName string, inputIfAddrs IfAddrs) (IfAddrs, error) { - attrName := strings.ToLower(selectorName) - - ifs := make(IfAddrs, 0, len(inputIfAddrs)) - var lastMatch string - for _, ifAddr := range inputIfAddrs { - var out string - switch attrName { - case "address": - out = ifAddr.SockAddr.String() - case "name": - out = ifAddr.Name - default: - return nil, fmt.Errorf("unsupported unique constraint %+q", selectorName) - } - - switch { - case lastMatch == "", lastMatch != out: - lastMatch = out - ifs = append(ifs, ifAddr) - case lastMatch == out: - continue - } - } - - return ifs, nil -} - -// JoinIfAddrs joins an IfAddrs and returns a string -func JoinIfAddrs(selectorName string, joinStr string, inputIfAddrs IfAddrs) (string, error) { - outputs := make([]string, 0, len(inputIfAddrs)) - attrName := AttrName(strings.ToLower(selectorName)) - - for _, ifAddr := range inputIfAddrs { - var attrVal string - var err error - attrVal, err = ifAddr.Attr(attrName) - if err != nil { - return "", err - } - outputs = append(outputs, attrVal) - } - return strings.Join(outputs, joinStr), nil -} - -// LimitIfAddrs returns a slice of IfAddrs based on the specified limit. -func LimitIfAddrs(lim uint, in IfAddrs) (IfAddrs, error) { - // Clamp the limit to the length of the array - if int(lim) > len(in) { - lim = uint(len(in)) - } - - return in[0:lim], nil -} - -// OffsetIfAddrs returns a slice of IfAddrs based on the specified offset. -func OffsetIfAddrs(off int, in IfAddrs) (IfAddrs, error) { - var end bool - if off < 0 { - end = true - off = off * -1 - } - - if off > len(in) { - return IfAddrs{}, fmt.Errorf("unable to seek past the end of the interface array: offset (%d) exceeds the number of interfaces (%d)", off, len(in)) - } - - if end { - return in[len(in)-off:], nil - } - return in[off:], nil -} - -func (ifAddr IfAddr) String() string { - return fmt.Sprintf("%s %v", ifAddr.SockAddr, ifAddr.Interface) -} - -// parseDefaultIfNameFromRoute parses standard route(8)'s output for the *BSDs -// and Solaris. -func parseDefaultIfNameFromRoute(routeOut string) (string, error) { - lines := strings.Split(routeOut, "\n") - for _, line := range lines { - kvs := strings.SplitN(line, ":", 2) - if len(kvs) != 2 { - continue - } - - if strings.TrimSpace(kvs[0]) == "interface" { - ifName := strings.TrimSpace(kvs[1]) - return ifName, nil - } - } - - return "", errors.New("No default interface found") -} - -// parseDefaultIfNameFromIPCmd parses the default interface from ip(8) for -// Linux. -func parseDefaultIfNameFromIPCmd(routeOut string) (string, error) { - parsedLines := parseIfNameFromIPCmd(routeOut) - for _, parsedLine := range parsedLines { - if parsedLine[0] == "default" && - parsedLine[1] == "via" && - parsedLine[3] == "dev" { - ifName := strings.TrimSpace(parsedLine[4]) - return ifName, nil - } - } - - return "", errors.New("No default interface found") -} - -// parseDefaultIfNameFromIPCmdAndroid parses the default interface from ip(8) for -// Android. -func parseDefaultIfNameFromIPCmdAndroid(routeOut string) (string, error) { - parsedLines := parseIfNameFromIPCmd(routeOut) - if (len(parsedLines) > 0) { - ifName := strings.TrimSpace(parsedLines[0][4]) - return ifName, nil - } - - return "", errors.New("No default interface found") -} - - -// parseIfNameFromIPCmd parses interfaces from ip(8) for -// Linux. -func parseIfNameFromIPCmd(routeOut string) [][]string { - lines := strings.Split(routeOut, "\n") - re := whitespaceRE.Copy() - parsedLines := make([][]string, 0, len(lines)) - for _, line := range lines { - kvs := re.Split(line, -1) - if len(kvs) < 5 { - continue - } - parsedLines = append(parsedLines, kvs) - } - return parsedLines -} - -// parseDefaultIfNameWindows parses the default interface from `netstat -rn` and -// `ipconfig` on Windows. -func parseDefaultIfNameWindows(routeOut, ipconfigOut string) (string, error) { - defaultIPAddr, err := parseDefaultIPAddrWindowsRoute(routeOut) - if err != nil { - return "", err - } - - ifName, err := parseDefaultIfNameWindowsIPConfig(defaultIPAddr, ipconfigOut) - if err != nil { - return "", err - } - - return ifName, nil -} - -// parseDefaultIPAddrWindowsRoute parses the IP address on the default interface -// `netstat -rn`. -// -// NOTES(sean): Only IPv4 addresses are parsed at this time. If you have an -// IPv6 connected host, submit an issue on github.com/hashicorp/go-sockaddr with -// the output from `netstat -rn`, `ipconfig`, and version of Windows to see IPv6 -// support added. -func parseDefaultIPAddrWindowsRoute(routeOut string) (string, error) { - lines := strings.Split(routeOut, "\n") - re := whitespaceRE.Copy() - for _, line := range lines { - kvs := re.Split(strings.TrimSpace(line), -1) - if len(kvs) < 3 { - continue - } - - if kvs[0] == "0.0.0.0" && kvs[1] == "0.0.0.0" { - defaultIPAddr := strings.TrimSpace(kvs[3]) - return defaultIPAddr, nil - } - } - - return "", errors.New("No IP on default interface found") -} - -// parseDefaultIfNameWindowsIPConfig parses the output of `ipconfig` to find the -// interface name forwarding traffic to the default gateway. -func parseDefaultIfNameWindowsIPConfig(defaultIPAddr, routeOut string) (string, error) { - lines := strings.Split(routeOut, "\n") - ifNameRe := ifNameRE.Copy() - ipAddrRe := ipAddrRE.Copy() - var ifName string - for _, line := range lines { - switch ifNameMatches := ifNameRe.FindStringSubmatch(line); { - case len(ifNameMatches) > 1: - ifName = ifNameMatches[1] - continue - } - - switch ipAddrMatches := ipAddrRe.FindStringSubmatch(line); { - case len(ipAddrMatches) > 1 && ipAddrMatches[1] == defaultIPAddr: - return ifName, nil - } - } - - return "", errors.New("No default interface found with matching IP") -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifattr.go b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go deleted file mode 100644 index 6984cb4a..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/ifattr.go +++ /dev/null @@ -1,65 +0,0 @@ -package sockaddr - -import ( - "fmt" - "net" -) - -// IfAddr is a union of a SockAddr and a net.Interface. -type IfAddr struct { - SockAddr - net.Interface -} - -// Attr returns the named attribute as a string -func (ifAddr IfAddr) Attr(attrName AttrName) (string, error) { - val := IfAddrAttr(ifAddr, attrName) - if val != "" { - return val, nil - } - - return Attr(ifAddr.SockAddr, attrName) -} - -// Attr returns the named attribute as a string -func Attr(sa SockAddr, attrName AttrName) (string, error) { - switch sockType := sa.Type(); { - case sockType&TypeIP != 0: - ip := *ToIPAddr(sa) - attrVal := IPAddrAttr(ip, attrName) - if attrVal != "" { - return attrVal, nil - } - - if sockType == TypeIPv4 { - ipv4 := *ToIPv4Addr(sa) - attrVal := IPv4AddrAttr(ipv4, attrName) - if attrVal != "" { - return attrVal, nil - } - } else if sockType == TypeIPv6 { - ipv6 := *ToIPv6Addr(sa) - attrVal := IPv6AddrAttr(ipv6, attrName) - if attrVal != "" { - return attrVal, nil - } - } - - case sockType == TypeUnix: - us := *ToUnixSock(sa) - attrVal := UnixSockAttr(us, attrName) - if attrVal != "" { - return attrVal, nil - } - } - - // Non type-specific attributes - switch attrName { - case "string": - return sa.String(), nil - case "type": - return sa.Type().String(), nil - } - - return "", fmt.Errorf("unsupported attribute name %q", attrName) -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go deleted file mode 100644 index b47d15c2..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go +++ /dev/null @@ -1,169 +0,0 @@ -package sockaddr - -import ( - "fmt" - "math/big" - "net" - "strings" -) - -// Constants for the sizes of IPv3, IPv4, and IPv6 address types. -const ( - IPv3len = 6 - IPv4len = 4 - IPv6len = 16 -) - -// IPAddr is a generic IP address interface for IPv4 and IPv6 addresses, -// networks, and socket endpoints. -type IPAddr interface { - SockAddr - AddressBinString() string - AddressHexString() string - Cmp(SockAddr) int - CmpAddress(SockAddr) int - CmpPort(SockAddr) int - FirstUsable() IPAddr - Host() IPAddr - IPPort() IPPort - LastUsable() IPAddr - Maskbits() int - NetIP() *net.IP - NetIPMask() *net.IPMask - NetIPNet() *net.IPNet - Network() IPAddr - Octets() []int -} - -// IPPort is the type for an IP port number for the TCP and UDP IP transports. -type IPPort uint16 - -// IPPrefixLen is a typed integer representing the prefix length for a given -// IPAddr. -type IPPrefixLen byte - -// ipAddrAttrMap is a map of the IPAddr type-specific attributes. -var ipAddrAttrMap map[AttrName]func(IPAddr) string -var ipAddrAttrs []AttrName - -func init() { - ipAddrInit() -} - -// NewIPAddr creates a new IPAddr from a string. Returns nil if the string is -// not an IPv4 or an IPv6 address. -func NewIPAddr(addr string) (IPAddr, error) { - ipv4Addr, err := NewIPv4Addr(addr) - if err == nil { - return ipv4Addr, nil - } - - ipv6Addr, err := NewIPv6Addr(addr) - if err == nil { - return ipv6Addr, nil - } - - return nil, fmt.Errorf("invalid IPAddr %v", addr) -} - -// IPAddrAttr returns a string representation of an attribute for the given -// IPAddr. -func IPAddrAttr(ip IPAddr, selector AttrName) string { - fn, found := ipAddrAttrMap[selector] - if !found { - return "" - } - - return fn(ip) -} - -// IPAttrs returns a list of attributes supported by the IPAddr type -func IPAttrs() []AttrName { - return ipAddrAttrs -} - -// MustIPAddr is a helper method that must return an IPAddr or panic on invalid -// input. -func MustIPAddr(addr string) IPAddr { - ip, err := NewIPAddr(addr) - if err != nil { - panic(fmt.Sprintf("Unable to create an IPAddr from %+q: %v", addr, err)) - } - return ip -} - -// ipAddrInit is called once at init() -func ipAddrInit() { - // Sorted for human readability - ipAddrAttrs = []AttrName{ - "host", - "address", - "port", - "netmask", - "network", - "mask_bits", - "binary", - "hex", - "first_usable", - "last_usable", - "octets", - } - - ipAddrAttrMap = map[AttrName]func(ip IPAddr) string{ - "address": func(ip IPAddr) string { - return ip.NetIP().String() - }, - "binary": func(ip IPAddr) string { - return ip.AddressBinString() - }, - "first_usable": func(ip IPAddr) string { - return ip.FirstUsable().String() - }, - "hex": func(ip IPAddr) string { - return ip.AddressHexString() - }, - "host": func(ip IPAddr) string { - return ip.Host().String() - }, - "last_usable": func(ip IPAddr) string { - return ip.LastUsable().String() - }, - "mask_bits": func(ip IPAddr) string { - return fmt.Sprintf("%d", ip.Maskbits()) - }, - "netmask": func(ip IPAddr) string { - switch v := ip.(type) { - case IPv4Addr: - ipv4Mask := IPv4Addr{ - Address: IPv4Address(v.Mask), - Mask: IPv4HostMask, - } - return ipv4Mask.String() - case IPv6Addr: - ipv6Mask := new(big.Int) - ipv6Mask.Set(v.Mask) - ipv6MaskAddr := IPv6Addr{ - Address: IPv6Address(ipv6Mask), - Mask: ipv6HostMask, - } - return ipv6MaskAddr.String() - default: - return fmt.Sprintf("", ip) - } - }, - "network": func(ip IPAddr) string { - return ip.Network().NetIP().String() - }, - "octets": func(ip IPAddr) string { - octets := ip.Octets() - octetStrs := make([]string, 0, len(octets)) - for _, octet := range octets { - octetStrs = append(octetStrs, fmt.Sprintf("%d", octet)) - } - return strings.Join(octetStrs, " ") - }, - "port": func(ip IPAddr) string { - return fmt.Sprintf("%d", ip.IPPort()) - }, - } -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go deleted file mode 100644 index 6eeb7ddd..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go +++ /dev/null @@ -1,98 +0,0 @@ -package sockaddr - -import "bytes" - -type IPAddrs []IPAddr - -func (s IPAddrs) Len() int { return len(s) } -func (s IPAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// // SortIPAddrsByCmp is a type that satisfies sort.Interface and can be used -// // by the routines in this package. The SortIPAddrsByCmp type is used to -// // sort IPAddrs by Cmp() -// type SortIPAddrsByCmp struct{ IPAddrs } - -// // Less reports whether the element with index i should sort before the -// // element with index j. -// func (s SortIPAddrsByCmp) Less(i, j int) bool { -// // Sort by Type, then address, then port number. -// return Less(s.IPAddrs[i], s.IPAddrs[j]) -// } - -// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and -// can be used by the routines in this package. The -// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest -// network (most specific to largest network). -type SortIPAddrsByNetworkSize struct{ IPAddrs } - -// Less reports whether the element with index i should sort before the -// element with index j. -func (s SortIPAddrsByNetworkSize) Less(i, j int) bool { - // Sort masks with a larger binary value (i.e. fewer hosts per network - // prefix) after masks with a smaller value (larger number of hosts per - // prefix). - switch bytes.Compare([]byte(*s.IPAddrs[i].NetIPMask()), []byte(*s.IPAddrs[j].NetIPMask())) { - case 0: - // Fall through to the second test if the net.IPMasks are the - // same. - break - case 1: - return true - case -1: - return false - default: - panic("bad, m'kay?") - } - - // Sort IPs based on the length (i.e. prefer IPv4 over IPv6). - iLen := len(*s.IPAddrs[i].NetIP()) - jLen := len(*s.IPAddrs[j].NetIP()) - if iLen != jLen { - return iLen > jLen - } - - // Sort IPs based on their network address from lowest to highest. - switch bytes.Compare(s.IPAddrs[i].NetIPNet().IP, s.IPAddrs[j].NetIPNet().IP) { - case 0: - break - case 1: - return false - case -1: - return true - default: - panic("lol wut?") - } - - // If a host does not have a port set, it always sorts after hosts - // that have a port (e.g. a host with a /32 and port number is more - // specific and should sort first over a host with a /32 but no port - // set). - if s.IPAddrs[i].IPPort() == 0 || s.IPAddrs[j].IPPort() == 0 { - return false - } - return s.IPAddrs[i].IPPort() < s.IPAddrs[j].IPPort() -} - -// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and -// can be used by the routines in this package. The -// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest -// network (most specific to largest network). -type SortIPAddrsBySpecificMaskLen struct{ IPAddrs } - -// Less reports whether the element with index i should sort before the -// element with index j. -func (s SortIPAddrsBySpecificMaskLen) Less(i, j int) bool { - return s.IPAddrs[i].Maskbits() > s.IPAddrs[j].Maskbits() -} - -// SortIPAddrsByBroadMaskLen is a type that satisfies sort.Interface and can -// be used by the routines in this package. The SortIPAddrsByBroadMaskLen -// type is used to sort IPAddrs by largest network (i.e. largest subnets -// first). -type SortIPAddrsByBroadMaskLen struct{ IPAddrs } - -// Less reports whether the element with index i should sort before the -// element with index j. -func (s SortIPAddrsByBroadMaskLen) Less(i, j int) bool { - return s.IPAddrs[i].Maskbits() < s.IPAddrs[j].Maskbits() -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go deleted file mode 100644 index 4d395dc9..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go +++ /dev/null @@ -1,516 +0,0 @@ -package sockaddr - -import ( - "encoding/binary" - "fmt" - "net" - "regexp" - "strconv" - "strings" -) - -type ( - // IPv4Address is a named type representing an IPv4 address. - IPv4Address uint32 - - // IPv4Network is a named type representing an IPv4 network. - IPv4Network uint32 - - // IPv4Mask is a named type representing an IPv4 network mask. - IPv4Mask uint32 -) - -// IPv4HostMask is a constant represents a /32 IPv4 Address -// (i.e. 255.255.255.255). -const IPv4HostMask = IPv4Mask(0xffffffff) - -// ipv4AddrAttrMap is a map of the IPv4Addr type-specific attributes. -var ipv4AddrAttrMap map[AttrName]func(IPv4Addr) string -var ipv4AddrAttrs []AttrName -var trailingHexNetmaskRE *regexp.Regexp - -// IPv4Addr implements a convenience wrapper around the union of Go's -// built-in net.IP and net.IPNet types. In UNIX-speak, IPv4Addr implements -// `sockaddr` when the the address family is set to AF_INET -// (i.e. `sockaddr_in`). -type IPv4Addr struct { - IPAddr - Address IPv4Address - Mask IPv4Mask - Port IPPort -} - -func init() { - ipv4AddrInit() - trailingHexNetmaskRE = regexp.MustCompile(`/([0f]{8})$`) -} - -// NewIPv4Addr creates an IPv4Addr from a string. String can be in the form -// of either an IPv4:port (e.g. `1.2.3.4:80`, in which case the mask is -// assumed to be a `/32`), an IPv4 address (e.g. `1.2.3.4`, also with a `/32` -// mask), or an IPv4 CIDR (e.g. `1.2.3.4/24`, which has its IP port -// initialized to zero). ipv4Str can not be a hostname. -// -// NOTE: Many net.*() routines will initialize and return an IPv6 address. -// To create uint32 values from net.IP, always test to make sure the address -// returned can be converted to a 4 byte array using To4(). -func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) { - // Strip off any bogus hex-encoded netmasks that will be mis-parsed by Go. In - // particular, clients with the Barracuda VPN client will see something like: - // `192.168.3.51/00ffffff` as their IP address. - trailingHexNetmaskRe := trailingHexNetmaskRE.Copy() - if match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil { - ipv4Str = ipv4Str[:match[0]] - } - - // Parse as an IPv4 CIDR - ipAddr, network, err := net.ParseCIDR(ipv4Str) - if err == nil { - ipv4 := ipAddr.To4() - if ipv4 == nil { - return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address", ipv4Str) - } - - // If we see an IPv6 netmask, convert it to an IPv4 mask. - netmaskSepPos := strings.LastIndexByte(ipv4Str, '/') - if netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) { - netMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8) - if err != nil { - return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v", ipv4Str, err) - } else if netMask > 128 { - return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: invalid CIDR netmask", ipv4Str) - } - - if netMask >= 96 { - // Convert the IPv6 netmask to an IPv4 netmask - network.Mask = net.CIDRMask(int(netMask-96), IPv4len*8) - } - } - ipv4Addr := IPv4Addr{ - Address: IPv4Address(binary.BigEndian.Uint32(ipv4)), - Mask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)), - } - return ipv4Addr, nil - } - - // Attempt to parse ipv4Str as a /32 host with a port number. - tcpAddr, err := net.ResolveTCPAddr("tcp4", ipv4Str) - if err == nil { - ipv4 := tcpAddr.IP.To4() - if ipv4 == nil { - return IPv4Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv4 address", ipv4Str) - } - - ipv4Uint32 := binary.BigEndian.Uint32(ipv4) - ipv4Addr := IPv4Addr{ - Address: IPv4Address(ipv4Uint32), - Mask: IPv4HostMask, - Port: IPPort(tcpAddr.Port), - } - - return ipv4Addr, nil - } - - // Parse as a naked IPv4 address - ip := net.ParseIP(ipv4Str) - if ip != nil { - ipv4 := ip.To4() - if ipv4 == nil { - return IPv4Addr{}, fmt.Errorf("Unable to string convert %+q to an IPv4 address", ipv4Str) - } - - ipv4Uint32 := binary.BigEndian.Uint32(ipv4) - ipv4Addr := IPv4Addr{ - Address: IPv4Address(ipv4Uint32), - Mask: IPv4HostMask, - } - return ipv4Addr, nil - } - - return IPv4Addr{}, fmt.Errorf("Unable to parse %+q to an IPv4 address: %v", ipv4Str, err) -} - -// AddressBinString returns a string with the IPv4Addr's Address represented -// as a sequence of '0' and '1' characters. This method is useful for -// debugging or by operators who want to inspect an address. -func (ipv4 IPv4Addr) AddressBinString() string { - return fmt.Sprintf("%032s", strconv.FormatUint(uint64(ipv4.Address), 2)) -} - -// AddressHexString returns a string with the IPv4Addr address represented as -// a sequence of hex characters. This method is useful for debugging or by -// operators who want to inspect an address. -func (ipv4 IPv4Addr) AddressHexString() string { - return fmt.Sprintf("%08s", strconv.FormatUint(uint64(ipv4.Address), 16)) -} - -// Broadcast is an IPv4Addr-only method that returns the broadcast address of -// the network. -// -// NOTE: IPv6 only supports multicast, so this method only exists for -// IPv4Addr. -func (ipv4 IPv4Addr) Broadcast() IPAddr { - // Nothing should listen on a broadcast address. - return IPv4Addr{ - Address: IPv4Address(ipv4.BroadcastAddress()), - Mask: IPv4HostMask, - } -} - -// BroadcastAddress returns a IPv4Network of the IPv4Addr's broadcast -// address. -func (ipv4 IPv4Addr) BroadcastAddress() IPv4Network { - return IPv4Network(uint32(ipv4.Address)&uint32(ipv4.Mask) | ^uint32(ipv4.Mask)) -} - -// CmpAddress follows the Cmp() standard protocol and returns: -// -// - -1 If the receiver should sort first because its address is lower than arg -// - 0 if the SockAddr arg is equal to the receiving IPv4Addr or the argument is -// of a different type. -// - 1 If the argument should sort first. -func (ipv4 IPv4Addr) CmpAddress(sa SockAddr) int { - ipv4b, ok := sa.(IPv4Addr) - if !ok { - return sortDeferDecision - } - - switch { - case ipv4.Address == ipv4b.Address: - return sortDeferDecision - case ipv4.Address < ipv4b.Address: - return sortReceiverBeforeArg - default: - return sortArgBeforeReceiver - } -} - -// CmpPort follows the Cmp() standard protocol and returns: -// -// - -1 If the receiver should sort first because its port is lower than arg -// - 0 if the SockAddr arg's port number is equal to the receiving IPv4Addr, -// regardless of type. -// - 1 If the argument should sort first. -func (ipv4 IPv4Addr) CmpPort(sa SockAddr) int { - var saPort IPPort - switch v := sa.(type) { - case IPv4Addr: - saPort = v.Port - case IPv6Addr: - saPort = v.Port - default: - return sortDeferDecision - } - - switch { - case ipv4.Port == saPort: - return sortDeferDecision - case ipv4.Port < saPort: - return sortReceiverBeforeArg - default: - return sortArgBeforeReceiver - } -} - -// CmpRFC follows the Cmp() standard protocol and returns: -// -// - -1 If the receiver should sort first because it belongs to the RFC and its -// arg does not -// - 0 if the receiver and arg both belong to the same RFC or neither do. -// - 1 If the arg belongs to the RFC but receiver does not. -func (ipv4 IPv4Addr) CmpRFC(rfcNum uint, sa SockAddr) int { - recvInRFC := IsRFC(rfcNum, ipv4) - ipv4b, ok := sa.(IPv4Addr) - if !ok { - // If the receiver is part of the desired RFC and the SockAddr - // argument is not, return -1 so that the receiver sorts before - // the non-IPv4 SockAddr. Conversely, if the receiver is not - // part of the RFC, punt on sorting and leave it for the next - // sorter. - if recvInRFC { - return sortReceiverBeforeArg - } else { - return sortDeferDecision - } - } - - argInRFC := IsRFC(rfcNum, ipv4b) - switch { - case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): - // If a and b both belong to the RFC, or neither belong to - // rfcNum, defer sorting to the next sorter. - return sortDeferDecision - case recvInRFC && !argInRFC: - return sortReceiverBeforeArg - default: - return sortArgBeforeReceiver - } -} - -// Contains returns true if the SockAddr is contained within the receiver. -func (ipv4 IPv4Addr) Contains(sa SockAddr) bool { - ipv4b, ok := sa.(IPv4Addr) - if !ok { - return false - } - - return ipv4.ContainsNetwork(ipv4b) -} - -// ContainsAddress returns true if the IPv4Address is contained within the -// receiver. -func (ipv4 IPv4Addr) ContainsAddress(x IPv4Address) bool { - return IPv4Address(ipv4.NetworkAddress()) <= x && - IPv4Address(ipv4.BroadcastAddress()) >= x -} - -// ContainsNetwork returns true if the network from IPv4Addr is contained -// within the receiver. -func (ipv4 IPv4Addr) ContainsNetwork(x IPv4Addr) bool { - return ipv4.NetworkAddress() <= x.NetworkAddress() && - ipv4.BroadcastAddress() >= x.BroadcastAddress() -} - -// DialPacketArgs returns the arguments required to be passed to -// net.DialUDP(). If the Mask of ipv4 is not a /32 or the Port is 0, -// DialPacketArgs() will fail. See Host() to create an IPv4Addr with its -// mask set to /32. -func (ipv4 IPv4Addr) DialPacketArgs() (network, dialArgs string) { - if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { - return "udp4", "" - } - return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) -} - -// DialStreamArgs returns the arguments required to be passed to -// net.DialTCP(). If the Mask of ipv4 is not a /32 or the Port is 0, -// DialStreamArgs() will fail. See Host() to create an IPv4Addr with its -// mask set to /32. -func (ipv4 IPv4Addr) DialStreamArgs() (network, dialArgs string) { - if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { - return "tcp4", "" - } - return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) -} - -// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. -func (ipv4 IPv4Addr) Equal(sa SockAddr) bool { - ipv4b, ok := sa.(IPv4Addr) - if !ok { - return false - } - - if ipv4.Port != ipv4b.Port { - return false - } - - if ipv4.Address != ipv4b.Address { - return false - } - - if ipv4.NetIPNet().String() != ipv4b.NetIPNet().String() { - return false - } - - return true -} - -// FirstUsable returns an IPv4Addr set to the first address following the -// network prefix. The first usable address in a network is normally the -// gateway and should not be used except by devices forwarding packets -// between two administratively distinct networks (i.e. a router). This -// function does not discriminate against first usable vs "first address that -// should be used." For example, FirstUsable() on "192.168.1.10/24" would -// return the address "192.168.1.1/24". -func (ipv4 IPv4Addr) FirstUsable() IPAddr { - addr := ipv4.NetworkAddress() - - // If /32, return the address itself. If /31 assume a point-to-point - // link and return the lower address. - if ipv4.Maskbits() < 31 { - addr++ - } - - return IPv4Addr{ - Address: IPv4Address(addr), - Mask: IPv4HostMask, - } -} - -// Host returns a copy of ipv4 with its mask set to /32 so that it can be -// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or -// ListenStreamArgs(). -func (ipv4 IPv4Addr) Host() IPAddr { - // Nothing should listen on a broadcast address. - return IPv4Addr{ - Address: ipv4.Address, - Mask: IPv4HostMask, - Port: ipv4.Port, - } -} - -// IPPort returns the Port number attached to the IPv4Addr -func (ipv4 IPv4Addr) IPPort() IPPort { - return ipv4.Port -} - -// LastUsable returns the last address before the broadcast address in a -// given network. -func (ipv4 IPv4Addr) LastUsable() IPAddr { - addr := ipv4.BroadcastAddress() - - // If /32, return the address itself. If /31 assume a point-to-point - // link and return the upper address. - if ipv4.Maskbits() < 31 { - addr-- - } - - return IPv4Addr{ - Address: IPv4Address(addr), - Mask: IPv4HostMask, - } -} - -// ListenPacketArgs returns the arguments required to be passed to -// net.ListenUDP(). If the Mask of ipv4 is not a /32, ListenPacketArgs() -// will fail. See Host() to create an IPv4Addr with its mask set to /32. -func (ipv4 IPv4Addr) ListenPacketArgs() (network, listenArgs string) { - if ipv4.Mask != IPv4HostMask { - return "udp4", "" - } - return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) -} - -// ListenStreamArgs returns the arguments required to be passed to -// net.ListenTCP(). If the Mask of ipv4 is not a /32, ListenStreamArgs() -// will fail. See Host() to create an IPv4Addr with its mask set to /32. -func (ipv4 IPv4Addr) ListenStreamArgs() (network, listenArgs string) { - if ipv4.Mask != IPv4HostMask { - return "tcp4", "" - } - return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) -} - -// Maskbits returns the number of network mask bits in a given IPv4Addr. For -// example, the Maskbits() of "192.168.1.1/24" would return 24. -func (ipv4 IPv4Addr) Maskbits() int { - mask := make(net.IPMask, IPv4len) - binary.BigEndian.PutUint32(mask, uint32(ipv4.Mask)) - maskOnes, _ := mask.Size() - return maskOnes -} - -// MustIPv4Addr is a helper method that must return an IPv4Addr or panic on -// invalid input. -func MustIPv4Addr(addr string) IPv4Addr { - ipv4, err := NewIPv4Addr(addr) - if err != nil { - panic(fmt.Sprintf("Unable to create an IPv4Addr from %+q: %v", addr, err)) - } - return ipv4 -} - -// NetIP returns the address as a net.IP (address is always presized to -// IPv4). -func (ipv4 IPv4Addr) NetIP() *net.IP { - x := make(net.IP, IPv4len) - binary.BigEndian.PutUint32(x, uint32(ipv4.Address)) - return &x -} - -// NetIPMask create a new net.IPMask from the IPv4Addr. -func (ipv4 IPv4Addr) NetIPMask() *net.IPMask { - ipv4Mask := net.IPMask{} - ipv4Mask = make(net.IPMask, IPv4len) - binary.BigEndian.PutUint32(ipv4Mask, uint32(ipv4.Mask)) - return &ipv4Mask -} - -// NetIPNet create a new net.IPNet from the IPv4Addr. -func (ipv4 IPv4Addr) NetIPNet() *net.IPNet { - ipv4net := &net.IPNet{} - ipv4net.IP = make(net.IP, IPv4len) - binary.BigEndian.PutUint32(ipv4net.IP, uint32(ipv4.NetworkAddress())) - ipv4net.Mask = *ipv4.NetIPMask() - return ipv4net -} - -// Network returns the network prefix or network address for a given network. -func (ipv4 IPv4Addr) Network() IPAddr { - return IPv4Addr{ - Address: IPv4Address(ipv4.NetworkAddress()), - Mask: ipv4.Mask, - } -} - -// NetworkAddress returns an IPv4Network of the IPv4Addr's network address. -func (ipv4 IPv4Addr) NetworkAddress() IPv4Network { - return IPv4Network(uint32(ipv4.Address) & uint32(ipv4.Mask)) -} - -// Octets returns a slice of the four octets in an IPv4Addr's Address. The -// order of the bytes is big endian. -func (ipv4 IPv4Addr) Octets() []int { - return []int{ - int(ipv4.Address >> 24), - int((ipv4.Address >> 16) & 0xff), - int((ipv4.Address >> 8) & 0xff), - int(ipv4.Address & 0xff), - } -} - -// String returns a string representation of the IPv4Addr -func (ipv4 IPv4Addr) String() string { - if ipv4.Port != 0 { - return fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) - } - - if ipv4.Maskbits() == 32 { - return ipv4.NetIP().String() - } - - return fmt.Sprintf("%s/%d", ipv4.NetIP().String(), ipv4.Maskbits()) -} - -// Type is used as a type switch and returns TypeIPv4 -func (IPv4Addr) Type() SockAddrType { - return TypeIPv4 -} - -// IPv4AddrAttr returns a string representation of an attribute for the given -// IPv4Addr. -func IPv4AddrAttr(ipv4 IPv4Addr, selector AttrName) string { - fn, found := ipv4AddrAttrMap[selector] - if !found { - return "" - } - - return fn(ipv4) -} - -// IPv4Attrs returns a list of attributes supported by the IPv4Addr type -func IPv4Attrs() []AttrName { - return ipv4AddrAttrs -} - -// ipv4AddrInit is called once at init() -func ipv4AddrInit() { - // Sorted for human readability - ipv4AddrAttrs = []AttrName{ - "size", // Same position as in IPv6 for output consistency - "broadcast", - "uint32", - } - - ipv4AddrAttrMap = map[AttrName]func(ipv4 IPv4Addr) string{ - "broadcast": func(ipv4 IPv4Addr) string { - return ipv4.Broadcast().String() - }, - "size": func(ipv4 IPv4Addr) string { - return fmt.Sprintf("%d", 1< 2 && ipv6Str[0] == '[' && ipv6Str[len(ipv6Str)-1] == ']' { - ipv6Str = ipv6Str[1 : len(ipv6Str)-1] - } - ip := net.ParseIP(ipv6Str) - if ip != nil { - ipv6 := ip.To16() - if ipv6 == nil { - return IPv6Addr{}, fmt.Errorf("Unable to string convert %+q to a 16byte IPv6 address", ipv6Str) - } - - ipv6BigIntAddr := new(big.Int) - ipv6BigIntAddr.SetBytes(ipv6) - - ipv6BigIntMask := new(big.Int) - ipv6BigIntMask.Set(ipv6HostMask) - - return IPv6Addr{ - Address: IPv6Address(ipv6BigIntAddr), - Mask: IPv6Mask(ipv6BigIntMask), - }, nil - } - - // Parse as an IPv6 CIDR - ipAddr, network, err := net.ParseCIDR(ipv6Str) - if err == nil { - ipv6 := ipAddr.To16() - if ipv6 == nil { - return IPv6Addr{}, fmt.Errorf("Unable to convert %+q to a 16byte IPv6 address", ipv6Str) - } - - ipv6BigIntAddr := new(big.Int) - ipv6BigIntAddr.SetBytes(ipv6) - - ipv6BigIntMask := new(big.Int) - ipv6BigIntMask.SetBytes(network.Mask) - - ipv6Addr := IPv6Addr{ - Address: IPv6Address(ipv6BigIntAddr), - Mask: IPv6Mask(ipv6BigIntMask), - } - return ipv6Addr, nil - } - - return IPv6Addr{}, fmt.Errorf("Unable to parse %+q to an IPv6 address: %v", ipv6Str, err) -} - -// AddressBinString returns a string with the IPv6Addr's Address represented -// as a sequence of '0' and '1' characters. This method is useful for -// debugging or by operators who want to inspect an address. -func (ipv6 IPv6Addr) AddressBinString() string { - bi := big.Int(*ipv6.Address) - return fmt.Sprintf("%0128s", bi.Text(2)) -} - -// AddressHexString returns a string with the IPv6Addr address represented as -// a sequence of hex characters. This method is useful for debugging or by -// operators who want to inspect an address. -func (ipv6 IPv6Addr) AddressHexString() string { - bi := big.Int(*ipv6.Address) - return fmt.Sprintf("%032s", bi.Text(16)) -} - -// CmpAddress follows the Cmp() standard protocol and returns: -// -// - -1 If the receiver should sort first because its address is lower than arg -// - 0 if the SockAddr arg equal to the receiving IPv6Addr or the argument is of a -// different type. -// - 1 If the argument should sort first. -func (ipv6 IPv6Addr) CmpAddress(sa SockAddr) int { - ipv6b, ok := sa.(IPv6Addr) - if !ok { - return sortDeferDecision - } - - ipv6aBigInt := new(big.Int) - ipv6aBigInt.Set(ipv6.Address) - ipv6bBigInt := new(big.Int) - ipv6bBigInt.Set(ipv6b.Address) - - return ipv6aBigInt.Cmp(ipv6bBigInt) -} - -// CmpPort follows the Cmp() standard protocol and returns: -// -// - -1 If the receiver should sort first because its port is lower than arg -// - 0 if the SockAddr arg's port number is equal to the receiving IPv6Addr, -// regardless of type. -// - 1 If the argument should sort first. -func (ipv6 IPv6Addr) CmpPort(sa SockAddr) int { - var saPort IPPort - switch v := sa.(type) { - case IPv4Addr: - saPort = v.Port - case IPv6Addr: - saPort = v.Port - default: - return sortDeferDecision - } - - switch { - case ipv6.Port == saPort: - return sortDeferDecision - case ipv6.Port < saPort: - return sortReceiverBeforeArg - default: - return sortArgBeforeReceiver - } -} - -// CmpRFC follows the Cmp() standard protocol and returns: -// -// - -1 If the receiver should sort first because it belongs to the RFC and its -// arg does not -// - 0 if the receiver and arg both belong to the same RFC or neither do. -// - 1 If the arg belongs to the RFC but receiver does not. -func (ipv6 IPv6Addr) CmpRFC(rfcNum uint, sa SockAddr) int { - recvInRFC := IsRFC(rfcNum, ipv6) - ipv6b, ok := sa.(IPv6Addr) - if !ok { - // If the receiver is part of the desired RFC and the SockAddr - // argument is not, sort receiver before the non-IPv6 SockAddr. - // Conversely, if the receiver is not part of the RFC, punt on - // sorting and leave it for the next sorter. - if recvInRFC { - return sortReceiverBeforeArg - } else { - return sortDeferDecision - } - } - - argInRFC := IsRFC(rfcNum, ipv6b) - switch { - case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): - // If a and b both belong to the RFC, or neither belong to - // rfcNum, defer sorting to the next sorter. - return sortDeferDecision - case recvInRFC && !argInRFC: - return sortReceiverBeforeArg - default: - return sortArgBeforeReceiver - } -} - -// Contains returns true if the SockAddr is contained within the receiver. -func (ipv6 IPv6Addr) Contains(sa SockAddr) bool { - ipv6b, ok := sa.(IPv6Addr) - if !ok { - return false - } - - return ipv6.ContainsNetwork(ipv6b) -} - -// ContainsAddress returns true if the IPv6Address is contained within the -// receiver. -func (ipv6 IPv6Addr) ContainsAddress(x IPv6Address) bool { - xAddr := IPv6Addr{ - Address: x, - Mask: ipv6HostMask, - } - - { - xIPv6 := xAddr.FirstUsable().(IPv6Addr) - yIPv6 := ipv6.FirstUsable().(IPv6Addr) - if xIPv6.CmpAddress(yIPv6) >= 1 { - return false - } - } - - { - xIPv6 := xAddr.LastUsable().(IPv6Addr) - yIPv6 := ipv6.LastUsable().(IPv6Addr) - if xIPv6.CmpAddress(yIPv6) <= -1 { - return false - } - } - return true -} - -// ContainsNetwork returns true if the network from IPv6Addr is contained within -// the receiver. -func (x IPv6Addr) ContainsNetwork(y IPv6Addr) bool { - { - xIPv6 := x.FirstUsable().(IPv6Addr) - yIPv6 := y.FirstUsable().(IPv6Addr) - if ret := xIPv6.CmpAddress(yIPv6); ret >= 1 { - return false - } - } - - { - xIPv6 := x.LastUsable().(IPv6Addr) - yIPv6 := y.LastUsable().(IPv6Addr) - if ret := xIPv6.CmpAddress(yIPv6); ret <= -1 { - return false - } - } - return true -} - -// DialPacketArgs returns the arguments required to be passed to -// net.DialUDP(). If the Mask of ipv6 is not a /128 or the Port is 0, -// DialPacketArgs() will fail. See Host() to create an IPv6Addr with its -// mask set to /128. -func (ipv6 IPv6Addr) DialPacketArgs() (network, dialArgs string) { - ipv6Mask := big.Int(*ipv6.Mask) - if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { - return "udp6", "" - } - return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) -} - -// DialStreamArgs returns the arguments required to be passed to -// net.DialTCP(). If the Mask of ipv6 is not a /128 or the Port is 0, -// DialStreamArgs() will fail. See Host() to create an IPv6Addr with its -// mask set to /128. -func (ipv6 IPv6Addr) DialStreamArgs() (network, dialArgs string) { - ipv6Mask := big.Int(*ipv6.Mask) - if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { - return "tcp6", "" - } - return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) -} - -// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. -func (ipv6a IPv6Addr) Equal(sa SockAddr) bool { - ipv6b, ok := sa.(IPv6Addr) - if !ok { - return false - } - - if ipv6a.NetIP().String() != ipv6b.NetIP().String() { - return false - } - - if ipv6a.NetIPNet().String() != ipv6b.NetIPNet().String() { - return false - } - - if ipv6a.Port != ipv6b.Port { - return false - } - - return true -} - -// FirstUsable returns an IPv6Addr set to the first address following the -// network prefix. The first usable address in a network is normally the -// gateway and should not be used except by devices forwarding packets -// between two administratively distinct networks (i.e. a router). This -// function does not discriminate against first usable vs "first address that -// should be used." For example, FirstUsable() on "2001:0db8::0003/64" would -// return "2001:0db8::00011". -func (ipv6 IPv6Addr) FirstUsable() IPAddr { - return IPv6Addr{ - Address: IPv6Address(ipv6.NetworkAddress()), - Mask: ipv6HostMask, - } -} - -// Host returns a copy of ipv6 with its mask set to /128 so that it can be -// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or -// ListenStreamArgs(). -func (ipv6 IPv6Addr) Host() IPAddr { - // Nothing should listen on a broadcast address. - return IPv6Addr{ - Address: ipv6.Address, - Mask: ipv6HostMask, - Port: ipv6.Port, - } -} - -// IPPort returns the Port number attached to the IPv6Addr -func (ipv6 IPv6Addr) IPPort() IPPort { - return ipv6.Port -} - -// LastUsable returns the last address in a given network. -func (ipv6 IPv6Addr) LastUsable() IPAddr { - addr := new(big.Int) - addr.Set(ipv6.Address) - - mask := new(big.Int) - mask.Set(ipv6.Mask) - - negMask := new(big.Int) - negMask.Xor(ipv6HostMask, mask) - - lastAddr := new(big.Int) - lastAddr.And(addr, mask) - lastAddr.Or(lastAddr, negMask) - - return IPv6Addr{ - Address: IPv6Address(lastAddr), - Mask: ipv6HostMask, - } -} - -// ListenPacketArgs returns the arguments required to be passed to -// net.ListenUDP(). If the Mask of ipv6 is not a /128, ListenPacketArgs() -// will fail. See Host() to create an IPv6Addr with its mask set to /128. -func (ipv6 IPv6Addr) ListenPacketArgs() (network, listenArgs string) { - ipv6Mask := big.Int(*ipv6.Mask) - if ipv6Mask.Cmp(ipv6HostMask) != 0 { - return "udp6", "" - } - return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) -} - -// ListenStreamArgs returns the arguments required to be passed to -// net.ListenTCP(). If the Mask of ipv6 is not a /128, ListenStreamArgs() -// will fail. See Host() to create an IPv6Addr with its mask set to /128. -func (ipv6 IPv6Addr) ListenStreamArgs() (network, listenArgs string) { - ipv6Mask := big.Int(*ipv6.Mask) - if ipv6Mask.Cmp(ipv6HostMask) != 0 { - return "tcp6", "" - } - return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) -} - -// Maskbits returns the number of network mask bits in a given IPv6Addr. For -// example, the Maskbits() of "2001:0db8::0003/64" would return 64. -func (ipv6 IPv6Addr) Maskbits() int { - maskOnes, _ := ipv6.NetIPNet().Mask.Size() - - return maskOnes -} - -// MustIPv6Addr is a helper method that must return an IPv6Addr or panic on -// invalid input. -func MustIPv6Addr(addr string) IPv6Addr { - ipv6, err := NewIPv6Addr(addr) - if err != nil { - panic(fmt.Sprintf("Unable to create an IPv6Addr from %+q: %v", addr, err)) - } - return ipv6 -} - -// NetIP returns the address as a net.IP. -func (ipv6 IPv6Addr) NetIP() *net.IP { - return bigIntToNetIPv6(ipv6.Address) -} - -// NetIPMask create a new net.IPMask from the IPv6Addr. -func (ipv6 IPv6Addr) NetIPMask() *net.IPMask { - ipv6Mask := make(net.IPMask, IPv6len) - m := big.Int(*ipv6.Mask) - copy(ipv6Mask, m.Bytes()) - return &ipv6Mask -} - -// Network returns a pointer to the net.IPNet within IPv4Addr receiver. -func (ipv6 IPv6Addr) NetIPNet() *net.IPNet { - ipv6net := &net.IPNet{} - ipv6net.IP = make(net.IP, IPv6len) - copy(ipv6net.IP, *ipv6.NetIP()) - ipv6net.Mask = *ipv6.NetIPMask() - return ipv6net -} - -// Network returns the network prefix or network address for a given network. -func (ipv6 IPv6Addr) Network() IPAddr { - return IPv6Addr{ - Address: IPv6Address(ipv6.NetworkAddress()), - Mask: ipv6.Mask, - } -} - -// NetworkAddress returns an IPv6Network of the IPv6Addr's network address. -func (ipv6 IPv6Addr) NetworkAddress() IPv6Network { - addr := new(big.Int) - addr.SetBytes((*ipv6.Address).Bytes()) - - mask := new(big.Int) - mask.SetBytes(*ipv6.NetIPMask()) - - netAddr := new(big.Int) - netAddr.And(addr, mask) - - return IPv6Network(netAddr) -} - -// Octets returns a slice of the 16 octets in an IPv6Addr's Address. The -// order of the bytes is big endian. -func (ipv6 IPv6Addr) Octets() []int { - x := make([]int, IPv6len) - for i, b := range *bigIntToNetIPv6(ipv6.Address) { - x[i] = int(b) - } - - return x -} - -// String returns a string representation of the IPv6Addr -func (ipv6 IPv6Addr) String() string { - if ipv6.Port != 0 { - return fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) - } - - if ipv6.Maskbits() == 128 { - return ipv6.NetIP().String() - } - - return fmt.Sprintf("%s/%d", ipv6.NetIP().String(), ipv6.Maskbits()) -} - -// Type is used as a type switch and returns TypeIPv6 -func (IPv6Addr) Type() SockAddrType { - return TypeIPv6 -} - -// IPv6Attrs returns a list of attributes supported by the IPv6Addr type -func IPv6Attrs() []AttrName { - return ipv6AddrAttrs -} - -// IPv6AddrAttr returns a string representation of an attribute for the given -// IPv6Addr. -func IPv6AddrAttr(ipv6 IPv6Addr, selector AttrName) string { - fn, found := ipv6AddrAttrMap[selector] - if !found { - return "" - } - - return fn(ipv6) -} - -// ipv6AddrInit is called once at init() -func ipv6AddrInit() { - // Sorted for human readability - ipv6AddrAttrs = []AttrName{ - "size", // Same position as in IPv6 for output consistency - "uint128", - } - - ipv6AddrAttrMap = map[AttrName]func(ipv6 IPv6Addr) string{ - "size": func(ipv6 IPv6Addr) string { - netSize := big.NewInt(1) - netSize = netSize.Lsh(netSize, uint(IPv6len*8-ipv6.Maskbits())) - return netSize.Text(10) - }, - "uint128": func(ipv6 IPv6Addr) string { - b := big.Int(*ipv6.Address) - return b.Text(10) - }, - } -} - -// bigIntToNetIPv6 is a helper function that correctly returns a net.IP with the -// correctly padded values. -func bigIntToNetIPv6(bi *big.Int) *net.IP { - x := make(net.IP, IPv6len) - ipv6Bytes := bi.Bytes() - - // It's possibe for ipv6Bytes to be less than IPv6len bytes in size. If - // they are different sizes we to pad the size of response. - if len(ipv6Bytes) < IPv6len { - buf := new(bytes.Buffer) - buf.Grow(IPv6len) - - for i := len(ipv6Bytes); i < IPv6len; i++ { - if err := binary.Write(buf, binary.BigEndian, byte(0)); err != nil { - panic(fmt.Sprintf("Unable to pad byte %d of input %v: %v", i, bi, err)) - } - } - - for _, b := range ipv6Bytes { - if err := binary.Write(buf, binary.BigEndian, b); err != nil { - panic(fmt.Sprintf("Unable to preserve endianness of input %v: %v", bi, err)) - } - } - - ipv6Bytes = buf.Bytes() - } - i := copy(x, ipv6Bytes) - if i != IPv6len { - panic("IPv6 wrong size") - } - return &x -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/rfc.go b/vendor/github.com/hashicorp/go-sockaddr/rfc.go deleted file mode 100644 index 02e188f6..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/rfc.go +++ /dev/null @@ -1,948 +0,0 @@ -package sockaddr - -// ForwardingBlacklist is a faux RFC that includes a list of non-forwardable IP -// blocks. -const ForwardingBlacklist = 4294967295 -const ForwardingBlacklistRFC = "4294967295" - -// IsRFC tests to see if an SockAddr matches the specified RFC -func IsRFC(rfcNum uint, sa SockAddr) bool { - rfcNetMap := KnownRFCs() - rfcNets, ok := rfcNetMap[rfcNum] - if !ok { - return false - } - - var contained bool - for _, rfcNet := range rfcNets { - if rfcNet.Contains(sa) { - contained = true - break - } - } - return contained -} - -// KnownRFCs returns an initial set of known RFCs. -// -// NOTE (sean@): As this list evolves over time, please submit patches to keep -// this list current. If something isn't right, inquire, as it may just be a -// bug on my part. Some of the inclusions were based on my judgement as to what -// would be a useful value (e.g. RFC3330). -// -// Useful resources: -// -// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml -// * https://www.iana.org/assignments/ipv6-unicast-address-assignments/ipv6-unicast-address-assignments.xhtml -// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml -func KnownRFCs() map[uint]SockAddrs { - // NOTE(sean@): Multiple SockAddrs per RFC lend themselves well to a - // RADIX tree, but `ENOTIME`. Patches welcome. - return map[uint]SockAddrs{ - 919: { - // [RFC919] Broadcasting Internet Datagrams - MustIPv4Addr("255.255.255.255/32"), // [RFC1122], §7 Broadcast IP Addressing - Proposed Standards - }, - 1122: { - // [RFC1122] Requirements for Internet Hosts -- Communication Layers - MustIPv4Addr("0.0.0.0/8"), // [RFC1122], §3.2.1.3 - MustIPv4Addr("127.0.0.0/8"), // [RFC1122], §3.2.1.3 - }, - 1112: { - // [RFC1112] Host Extensions for IP Multicasting - MustIPv4Addr("224.0.0.0/4"), // [RFC1112], §4 Host Group Addresses - }, - 1918: { - // [RFC1918] Address Allocation for Private Internets - MustIPv4Addr("10.0.0.0/8"), - MustIPv4Addr("172.16.0.0/12"), - MustIPv4Addr("192.168.0.0/16"), - }, - 2544: { - // [RFC2544] Benchmarking Methodology for Network - // Interconnect Devices - MustIPv4Addr("198.18.0.0/15"), - }, - 2765: { - // [RFC2765] Stateless IP/ICMP Translation Algorithm - // (SIIT) (obsoleted by RFCs 6145, which itself was - // later obsoleted by 7915). - - // [RFC2765], §2.1 Addresses - MustIPv6Addr("0:0:0:0:0:ffff:0:0/96"), - }, - 2928: { - // [RFC2928] Initial IPv6 Sub-TLA ID Assignments - MustIPv6Addr("2001::/16"), // Superblock - //MustIPv6Addr("2001:0000::/23"), // IANA - //MustIPv6Addr("2001:0200::/23"), // APNIC - //MustIPv6Addr("2001:0400::/23"), // ARIN - //MustIPv6Addr("2001:0600::/23"), // RIPE NCC - //MustIPv6Addr("2001:0800::/23"), // (future assignment) - // ... - //MustIPv6Addr("2001:FE00::/23"), // (future assignment) - }, - 3056: { // 6to4 address - // [RFC3056] Connection of IPv6 Domains via IPv4 Clouds - - // [RFC3056], §2 IPv6 Prefix Allocation - MustIPv6Addr("2002::/16"), - }, - 3068: { - // [RFC3068] An Anycast Prefix for 6to4 Relay Routers - // (obsolete by RFC7526) - - // [RFC3068], § 6to4 Relay anycast address - MustIPv4Addr("192.88.99.0/24"), - - // [RFC3068], §2.5 6to4 IPv6 relay anycast address - // - // NOTE: /120 == 128-(32-24) - MustIPv6Addr("2002:c058:6301::/120"), - }, - 3171: { - // [RFC3171] IANA Guidelines for IPv4 Multicast Address Assignments - MustIPv4Addr("224.0.0.0/4"), - }, - 3330: { - // [RFC3330] Special-Use IPv4 Addresses - - // Addresses in this block refer to source hosts on - // "this" network. Address 0.0.0.0/32 may be used as a - // source address for this host on this network; other - // addresses within 0.0.0.0/8 may be used to refer to - // specified hosts on this network [RFC1700, page 4]. - MustIPv4Addr("0.0.0.0/8"), - - // 10.0.0.0/8 - This block is set aside for use in - // private networks. Its intended use is documented in - // [RFC1918]. Addresses within this block should not - // appear on the public Internet. - MustIPv4Addr("10.0.0.0/8"), - - // 14.0.0.0/8 - This block is set aside for assignments - // to the international system of Public Data Networks - // [RFC1700, page 181]. The registry of assignments - // within this block can be accessed from the "Public - // Data Network Numbers" link on the web page at - // http://www.iana.org/numbers.html. Addresses within - // this block are assigned to users and should be - // treated as such. - - // 24.0.0.0/8 - This block was allocated in early 1996 - // for use in provisioning IP service over cable - // television systems. Although the IANA initially was - // involved in making assignments to cable operators, - // this responsibility was transferred to American - // Registry for Internet Numbers (ARIN) in May 2001. - // Addresses within this block are assigned in the - // normal manner and should be treated as such. - - // 39.0.0.0/8 - This block was used in the "Class A - // Subnet Experiment" that commenced in May 1995, as - // documented in [RFC1797]. The experiment has been - // completed and this block has been returned to the - // pool of addresses reserved for future allocation or - // assignment. This block therefore no longer has a - // special use and is subject to allocation to a - // Regional Internet Registry for assignment in the - // normal manner. - - // 127.0.0.0/8 - This block is assigned for use as the Internet host - // loopback address. A datagram sent by a higher level protocol to an - // address anywhere within this block should loop back inside the host. - // This is ordinarily implemented using only 127.0.0.1/32 for loopback, - // but no addresses within this block should ever appear on any network - // anywhere [RFC1700, page 5]. - MustIPv4Addr("127.0.0.0/8"), - - // 128.0.0.0/16 - This block, corresponding to the - // numerically lowest of the former Class B addresses, - // was initially and is still reserved by the IANA. - // Given the present classless nature of the IP address - // space, the basis for the reservation no longer - // applies and addresses in this block are subject to - // future allocation to a Regional Internet Registry for - // assignment in the normal manner. - - // 169.254.0.0/16 - This is the "link local" block. It - // is allocated for communication between hosts on a - // single link. Hosts obtain these addresses by - // auto-configuration, such as when a DHCP server may - // not be found. - MustIPv4Addr("169.254.0.0/16"), - - // 172.16.0.0/12 - This block is set aside for use in - // private networks. Its intended use is documented in - // [RFC1918]. Addresses within this block should not - // appear on the public Internet. - MustIPv4Addr("172.16.0.0/12"), - - // 191.255.0.0/16 - This block, corresponding to the numerically highest - // to the former Class B addresses, was initially and is still reserved - // by the IANA. Given the present classless nature of the IP address - // space, the basis for the reservation no longer applies and addresses - // in this block are subject to future allocation to a Regional Internet - // Registry for assignment in the normal manner. - - // 192.0.0.0/24 - This block, corresponding to the - // numerically lowest of the former Class C addresses, - // was initially and is still reserved by the IANA. - // Given the present classless nature of the IP address - // space, the basis for the reservation no longer - // applies and addresses in this block are subject to - // future allocation to a Regional Internet Registry for - // assignment in the normal manner. - - // 192.0.2.0/24 - This block is assigned as "TEST-NET" for use in - // documentation and example code. It is often used in conjunction with - // domain names example.com or example.net in vendor and protocol - // documentation. Addresses within this block should not appear on the - // public Internet. - MustIPv4Addr("192.0.2.0/24"), - - // 192.88.99.0/24 - This block is allocated for use as 6to4 relay - // anycast addresses, according to [RFC3068]. - MustIPv4Addr("192.88.99.0/24"), - - // 192.168.0.0/16 - This block is set aside for use in private networks. - // Its intended use is documented in [RFC1918]. Addresses within this - // block should not appear on the public Internet. - MustIPv4Addr("192.168.0.0/16"), - - // 198.18.0.0/15 - This block has been allocated for use - // in benchmark tests of network interconnect devices. - // Its use is documented in [RFC2544]. - MustIPv4Addr("198.18.0.0/15"), - - // 223.255.255.0/24 - This block, corresponding to the - // numerically highest of the former Class C addresses, - // was initially and is still reserved by the IANA. - // Given the present classless nature of the IP address - // space, the basis for the reservation no longer - // applies and addresses in this block are subject to - // future allocation to a Regional Internet Registry for - // assignment in the normal manner. - - // 224.0.0.0/4 - This block, formerly known as the Class - // D address space, is allocated for use in IPv4 - // multicast address assignments. The IANA guidelines - // for assignments from this space are described in - // [RFC3171]. - MustIPv4Addr("224.0.0.0/4"), - - // 240.0.0.0/4 - This block, formerly known as the Class E address - // space, is reserved. The "limited broadcast" destination address - // 255.255.255.255 should never be forwarded outside the (sub-)net of - // the source. The remainder of this space is reserved - // for future use. [RFC1700, page 4] - MustIPv4Addr("240.0.0.0/4"), - }, - 3849: { - // [RFC3849] IPv6 Address Prefix Reserved for Documentation - MustIPv6Addr("2001:db8::/32"), // [RFC3849], §4 IANA Considerations - }, - 3927: { - // [RFC3927] Dynamic Configuration of IPv4 Link-Local Addresses - MustIPv4Addr("169.254.0.0/16"), // [RFC3927], §2.1 Link-Local Address Selection - }, - 4038: { - // [RFC4038] Application Aspects of IPv6 Transition - - // [RFC4038], §4.2. IPv6 Applications in a Dual-Stack Node - MustIPv6Addr("0:0:0:0:0:ffff::/96"), - }, - 4193: { - // [RFC4193] Unique Local IPv6 Unicast Addresses - MustIPv6Addr("fc00::/7"), - }, - 4291: { - // [RFC4291] IP Version 6 Addressing Architecture - - // [RFC4291], §2.5.2 The Unspecified Address - MustIPv6Addr("::/128"), - - // [RFC4291], §2.5.3 The Loopback Address - MustIPv6Addr("::1/128"), - - // [RFC4291], §2.5.5.1. IPv4-Compatible IPv6 Address - MustIPv6Addr("::/96"), - - // [RFC4291], §2.5.5.2. IPv4-Mapped IPv6 Address - MustIPv6Addr("::ffff:0:0/96"), - - // [RFC4291], §2.5.6 Link-Local IPv6 Unicast Addresses - MustIPv6Addr("fe80::/10"), - - // [RFC4291], §2.5.7 Site-Local IPv6 Unicast Addresses - // (depreciated) - MustIPv6Addr("fec0::/10"), - - // [RFC4291], §2.7 Multicast Addresses - MustIPv6Addr("ff00::/8"), - - // IPv6 Multicast Information. - // - // In the following "table" below, `ff0x` is replaced - // with the following values depending on the scope of - // the query: - // - // IPv6 Multicast Scopes: - // * ff00/9 // reserved - // * ff01/9 // interface-local - // * ff02/9 // link-local - // * ff03/9 // realm-local - // * ff04/9 // admin-local - // * ff05/9 // site-local - // * ff08/9 // organization-local - // * ff0e/9 // global - // * ff0f/9 // reserved - // - // IPv6 Multicast Addresses: - // * ff0x::2 // All routers - // * ff02::5 // OSPFIGP - // * ff02::6 // OSPFIGP Designated Routers - // * ff02::9 // RIP Routers - // * ff02::a // EIGRP Routers - // * ff02::d // All PIM Routers - // * ff02::1a // All RPL Routers - // * ff0x::fb // mDNSv6 - // * ff0x::101 // All Network Time Protocol (NTP) servers - // * ff02::1:1 // Link Name - // * ff02::1:2 // All-dhcp-agents - // * ff02::1:3 // Link-local Multicast Name Resolution - // * ff05::1:3 // All-dhcp-servers - // * ff02::1:ff00:0/104 // Solicited-node multicast address. - // * ff02::2:ff00:0/104 // Node Information Queries - }, - 4380: { - // [RFC4380] Teredo: Tunneling IPv6 over UDP through - // Network Address Translations (NATs) - - // [RFC4380], §2.6 Global Teredo IPv6 Service Prefix - MustIPv6Addr("2001:0000::/32"), - }, - 4773: { - // [RFC4773] Administration of the IANA Special Purpose IPv6 Address Block - MustIPv6Addr("2001:0000::/23"), // IANA - }, - 4843: { - // [RFC4843] An IPv6 Prefix for Overlay Routable Cryptographic Hash Identifiers (ORCHID) - MustIPv6Addr("2001:10::/28"), // [RFC4843], §7 IANA Considerations - }, - 5180: { - // [RFC5180] IPv6 Benchmarking Methodology for Network Interconnect Devices - MustIPv6Addr("2001:0200::/48"), // [RFC5180], §8 IANA Considerations - }, - 5735: { - // [RFC5735] Special Use IPv4 Addresses - MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 - MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 - MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 - MustIPv4Addr("198.18.0.0/15"), // Benchmarks - }, - 5737: { - // [RFC5737] IPv4 Address Blocks Reserved for Documentation - MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 - MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 - MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 - }, - 6052: { - // [RFC6052] IPv6 Addressing of IPv4/IPv6 Translators - MustIPv6Addr("64:ff9b::/96"), // [RFC6052], §2.1. Well-Known Prefix - }, - 6333: { - // [RFC6333] Dual-Stack Lite Broadband Deployments Following IPv4 Exhaustion - MustIPv4Addr("192.0.0.0/29"), // [RFC6333], §5.7 Well-Known IPv4 Address - }, - 6598: { - // [RFC6598] IANA-Reserved IPv4 Prefix for Shared Address Space - MustIPv4Addr("100.64.0.0/10"), - }, - 6666: { - // [RFC6666] A Discard Prefix for IPv6 - MustIPv6Addr("0100::/64"), - }, - 6890: { - // [RFC6890] Special-Purpose IP Address Registries - - // From "RFC6890 §2.2.1 Information Requirements": - /* - The IPv4 and IPv6 Special-Purpose Address Registries maintain the - following information regarding each entry: - - o Address Block - A block of IPv4 or IPv6 addresses that has been - registered for a special purpose. - - o Name - A descriptive name for the special-purpose address block. - - o RFC - The RFC through which the special-purpose address block was - requested. - - o Allocation Date - The date upon which the special-purpose address - block was allocated. - - o Termination Date - The date upon which the allocation is to be - terminated. This field is applicable for limited-use allocations - only. - - o Source - A boolean value indicating whether an address from the - allocated special-purpose address block is valid when used as the - source address of an IP datagram that transits two devices. - - o Destination - A boolean value indicating whether an address from - the allocated special-purpose address block is valid when used as - the destination address of an IP datagram that transits two - devices. - - o Forwardable - A boolean value indicating whether a router may - forward an IP datagram whose destination address is drawn from the - allocated special-purpose address block between external - interfaces. - - o Global - A boolean value indicating whether an IP datagram whose - destination address is drawn from the allocated special-purpose - address block is forwardable beyond a specified administrative - domain. - - o Reserved-by-Protocol - A boolean value indicating whether the - special-purpose address block is reserved by IP, itself. This - value is "TRUE" if the RFC that created the special-purpose - address block requires all compliant IP implementations to behave - in a special way when processing packets either to or from - addresses contained by the address block. - - If the value of "Destination" is FALSE, the values of "Forwardable" - and "Global" must also be false. - */ - - /*+----------------------+----------------------------+ - * | Attribute | Value | - * +----------------------+----------------------------+ - * | Address Block | 0.0.0.0/8 | - * | Name | "This host on this network"| - * | RFC | [RFC1122], Section 3.2.1.3 | - * | Allocation Date | September 1981 | - * | Termination Date | N/A | - * | Source | True | - * | Destination | False | - * | Forwardable | False | - * | Global | False | - * | Reserved-by-Protocol | True | - * +----------------------+----------------------------+*/ - MustIPv4Addr("0.0.0.0/8"), - - /*+----------------------+---------------+ - * | Attribute | Value | - * +----------------------+---------------+ - * | Address Block | 10.0.0.0/8 | - * | Name | Private-Use | - * | RFC | [RFC1918] | - * | Allocation Date | February 1996 | - * | Termination Date | N/A | - * | Source | True | - * | Destination | True | - * | Forwardable | True | - * | Global | False | - * | Reserved-by-Protocol | False | - * +----------------------+---------------+ */ - MustIPv4Addr("10.0.0.0/8"), - - /*+----------------------+----------------------+ - | Attribute | Value | - +----------------------+----------------------+ - | Address Block | 100.64.0.0/10 | - | Name | Shared Address Space | - | RFC | [RFC6598] | - | Allocation Date | April 2012 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+----------------------+*/ - MustIPv4Addr("100.64.0.0/10"), - - /*+----------------------+----------------------------+ - | Attribute | Value | - +----------------------+----------------------------+ - | Address Block | 127.0.0.0/8 | - | Name | Loopback | - | RFC | [RFC1122], Section 3.2.1.3 | - | Allocation Date | September 1981 | - | Termination Date | N/A | - | Source | False [1] | - | Destination | False [1] | - | Forwardable | False [1] | - | Global | False [1] | - | Reserved-by-Protocol | True | - +----------------------+----------------------------+*/ - // [1] Several protocols have been granted exceptions to - // this rule. For examples, see [RFC4379] and - // [RFC5884]. - MustIPv4Addr("127.0.0.0/8"), - - /*+----------------------+----------------+ - | Attribute | Value | - +----------------------+----------------+ - | Address Block | 169.254.0.0/16 | - | Name | Link Local | - | RFC | [RFC3927] | - | Allocation Date | May 2005 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | True | - +----------------------+----------------+*/ - MustIPv4Addr("169.254.0.0/16"), - - /*+----------------------+---------------+ - | Attribute | Value | - +----------------------+---------------+ - | Address Block | 172.16.0.0/12 | - | Name | Private-Use | - | RFC | [RFC1918] | - | Allocation Date | February 1996 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+---------------+*/ - MustIPv4Addr("172.16.0.0/12"), - - /*+----------------------+---------------------------------+ - | Attribute | Value | - +----------------------+---------------------------------+ - | Address Block | 192.0.0.0/24 [2] | - | Name | IETF Protocol Assignments | - | RFC | Section 2.1 of this document | - | Allocation Date | January 2010 | - | Termination Date | N/A | - | Source | False | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+---------------------------------+*/ - // [2] Not usable unless by virtue of a more specific - // reservation. - MustIPv4Addr("192.0.0.0/24"), - - /*+----------------------+--------------------------------+ - | Attribute | Value | - +----------------------+--------------------------------+ - | Address Block | 192.0.0.0/29 | - | Name | IPv4 Service Continuity Prefix | - | RFC | [RFC6333], [RFC7335] | - | Allocation Date | June 2011 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+--------------------------------+*/ - MustIPv4Addr("192.0.0.0/29"), - - /*+----------------------+----------------------------+ - | Attribute | Value | - +----------------------+----------------------------+ - | Address Block | 192.0.2.0/24 | - | Name | Documentation (TEST-NET-1) | - | RFC | [RFC5737] | - | Allocation Date | January 2010 | - | Termination Date | N/A | - | Source | False | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+----------------------------+*/ - MustIPv4Addr("192.0.2.0/24"), - - /*+----------------------+--------------------+ - | Attribute | Value | - +----------------------+--------------------+ - | Address Block | 192.88.99.0/24 | - | Name | 6to4 Relay Anycast | - | RFC | [RFC3068] | - | Allocation Date | June 2001 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | True | - | Reserved-by-Protocol | False | - +----------------------+--------------------+*/ - MustIPv4Addr("192.88.99.0/24"), - - /*+----------------------+----------------+ - | Attribute | Value | - +----------------------+----------------+ - | Address Block | 192.168.0.0/16 | - | Name | Private-Use | - | RFC | [RFC1918] | - | Allocation Date | February 1996 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+----------------+*/ - MustIPv4Addr("192.168.0.0/16"), - - /*+----------------------+---------------+ - | Attribute | Value | - +----------------------+---------------+ - | Address Block | 198.18.0.0/15 | - | Name | Benchmarking | - | RFC | [RFC2544] | - | Allocation Date | March 1999 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+---------------+*/ - MustIPv4Addr("198.18.0.0/15"), - - /*+----------------------+----------------------------+ - | Attribute | Value | - +----------------------+----------------------------+ - | Address Block | 198.51.100.0/24 | - | Name | Documentation (TEST-NET-2) | - | RFC | [RFC5737] | - | Allocation Date | January 2010 | - | Termination Date | N/A | - | Source | False | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+----------------------------+*/ - MustIPv4Addr("198.51.100.0/24"), - - /*+----------------------+----------------------------+ - | Attribute | Value | - +----------------------+----------------------------+ - | Address Block | 203.0.113.0/24 | - | Name | Documentation (TEST-NET-3) | - | RFC | [RFC5737] | - | Allocation Date | January 2010 | - | Termination Date | N/A | - | Source | False | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+----------------------------+*/ - MustIPv4Addr("203.0.113.0/24"), - - /*+----------------------+----------------------+ - | Attribute | Value | - +----------------------+----------------------+ - | Address Block | 240.0.0.0/4 | - | Name | Reserved | - | RFC | [RFC1112], Section 4 | - | Allocation Date | August 1989 | - | Termination Date | N/A | - | Source | False | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | True | - +----------------------+----------------------+*/ - MustIPv4Addr("240.0.0.0/4"), - - /*+----------------------+----------------------+ - | Attribute | Value | - +----------------------+----------------------+ - | Address Block | 255.255.255.255/32 | - | Name | Limited Broadcast | - | RFC | [RFC0919], Section 7 | - | Allocation Date | October 1984 | - | Termination Date | N/A | - | Source | False | - | Destination | True | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+----------------------+*/ - MustIPv4Addr("255.255.255.255/32"), - - /*+----------------------+------------------+ - | Attribute | Value | - +----------------------+------------------+ - | Address Block | ::1/128 | - | Name | Loopback Address | - | RFC | [RFC4291] | - | Allocation Date | February 2006 | - | Termination Date | N/A | - | Source | False | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | True | - +----------------------+------------------+*/ - MustIPv6Addr("::1/128"), - - /*+----------------------+---------------------+ - | Attribute | Value | - +----------------------+---------------------+ - | Address Block | ::/128 | - | Name | Unspecified Address | - | RFC | [RFC4291] | - | Allocation Date | February 2006 | - | Termination Date | N/A | - | Source | True | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | True | - +----------------------+---------------------+*/ - MustIPv6Addr("::/128"), - - /*+----------------------+---------------------+ - | Attribute | Value | - +----------------------+---------------------+ - | Address Block | 64:ff9b::/96 | - | Name | IPv4-IPv6 Translat. | - | RFC | [RFC6052] | - | Allocation Date | October 2010 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | True | - | Reserved-by-Protocol | False | - +----------------------+---------------------+*/ - MustIPv6Addr("64:ff9b::/96"), - - /*+----------------------+---------------------+ - | Attribute | Value | - +----------------------+---------------------+ - | Address Block | ::ffff:0:0/96 | - | Name | IPv4-mapped Address | - | RFC | [RFC4291] | - | Allocation Date | February 2006 | - | Termination Date | N/A | - | Source | False | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | True | - +----------------------+---------------------+*/ - MustIPv6Addr("::ffff:0:0/96"), - - /*+----------------------+----------------------------+ - | Attribute | Value | - +----------------------+----------------------------+ - | Address Block | 100::/64 | - | Name | Discard-Only Address Block | - | RFC | [RFC6666] | - | Allocation Date | June 2012 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+----------------------------+*/ - MustIPv6Addr("100::/64"), - - /*+----------------------+---------------------------+ - | Attribute | Value | - +----------------------+---------------------------+ - | Address Block | 2001::/23 | - | Name | IETF Protocol Assignments | - | RFC | [RFC2928] | - | Allocation Date | September 2000 | - | Termination Date | N/A | - | Source | False[1] | - | Destination | False[1] | - | Forwardable | False[1] | - | Global | False[1] | - | Reserved-by-Protocol | False | - +----------------------+---------------------------+*/ - // [1] Unless allowed by a more specific allocation. - MustIPv6Addr("2001::/16"), - - /*+----------------------+----------------+ - | Attribute | Value | - +----------------------+----------------+ - | Address Block | 2001::/32 | - | Name | TEREDO | - | RFC | [RFC4380] | - | Allocation Date | January 2006 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+----------------+*/ - // Covered by previous entry, included for completeness. - // - // MustIPv6Addr("2001::/16"), - - /*+----------------------+----------------+ - | Attribute | Value | - +----------------------+----------------+ - | Address Block | 2001:2::/48 | - | Name | Benchmarking | - | RFC | [RFC5180] | - | Allocation Date | April 2008 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+----------------+*/ - // Covered by previous entry, included for completeness. - // - // MustIPv6Addr("2001:2::/48"), - - /*+----------------------+---------------+ - | Attribute | Value | - +----------------------+---------------+ - | Address Block | 2001:db8::/32 | - | Name | Documentation | - | RFC | [RFC3849] | - | Allocation Date | July 2004 | - | Termination Date | N/A | - | Source | False | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+---------------+*/ - // Covered by previous entry, included for completeness. - // - // MustIPv6Addr("2001:db8::/32"), - - /*+----------------------+--------------+ - | Attribute | Value | - +----------------------+--------------+ - | Address Block | 2001:10::/28 | - | Name | ORCHID | - | RFC | [RFC4843] | - | Allocation Date | March 2007 | - | Termination Date | March 2014 | - | Source | False | - | Destination | False | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+--------------+*/ - // Covered by previous entry, included for completeness. - // - // MustIPv6Addr("2001:10::/28"), - - /*+----------------------+---------------+ - | Attribute | Value | - +----------------------+---------------+ - | Address Block | 2002::/16 [2] | - | Name | 6to4 | - | RFC | [RFC3056] | - | Allocation Date | February 2001 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | N/A [2] | - | Reserved-by-Protocol | False | - +----------------------+---------------+*/ - // [2] See [RFC3056] for details. - MustIPv6Addr("2002::/16"), - - /*+----------------------+--------------+ - | Attribute | Value | - +----------------------+--------------+ - | Address Block | fc00::/7 | - | Name | Unique-Local | - | RFC | [RFC4193] | - | Allocation Date | October 2005 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | True | - | Global | False | - | Reserved-by-Protocol | False | - +----------------------+--------------+*/ - MustIPv6Addr("fc00::/7"), - - /*+----------------------+-----------------------+ - | Attribute | Value | - +----------------------+-----------------------+ - | Address Block | fe80::/10 | - | Name | Linked-Scoped Unicast | - | RFC | [RFC4291] | - | Allocation Date | February 2006 | - | Termination Date | N/A | - | Source | True | - | Destination | True | - | Forwardable | False | - | Global | False | - | Reserved-by-Protocol | True | - +----------------------+-----------------------+*/ - MustIPv6Addr("fe80::/10"), - }, - 7335: { - // [RFC7335] IPv4 Service Continuity Prefix - MustIPv4Addr("192.0.0.0/29"), // [RFC7335], §6 IANA Considerations - }, - ForwardingBlacklist: { // Pseudo-RFC - // Blacklist of non-forwardable IP blocks taken from RFC6890 - // - // TODO: the attributes for forwardable should be - // searcahble and embedded in the main list of RFCs - // above. - MustIPv4Addr("0.0.0.0/8"), - MustIPv4Addr("127.0.0.0/8"), - MustIPv4Addr("169.254.0.0/16"), - MustIPv4Addr("192.0.0.0/24"), - MustIPv4Addr("192.0.2.0/24"), - MustIPv4Addr("198.51.100.0/24"), - MustIPv4Addr("203.0.113.0/24"), - MustIPv4Addr("240.0.0.0/4"), - MustIPv4Addr("255.255.255.255/32"), - MustIPv6Addr("::1/128"), - MustIPv6Addr("::/128"), - MustIPv6Addr("::ffff:0:0/96"), - - // There is no way of expressing a whitelist per RFC2928 - // atm without creating a negative mask, which I don't - // want to do atm. - //MustIPv6Addr("2001::/23"), - - MustIPv6Addr("2001:db8::/32"), - MustIPv6Addr("2001:10::/28"), - MustIPv6Addr("fe80::/10"), - }, - } -} - -// VisitAllRFCs iterates over all known RFCs and calls the visitor -func VisitAllRFCs(fn func(rfcNum uint, sockaddrs SockAddrs)) { - rfcNetMap := KnownRFCs() - - // Blacklist of faux-RFCs. Don't show the world that we're abusing the - // RFC system in this library. - rfcBlacklist := map[uint]struct{}{ - ForwardingBlacklist: {}, - } - - for rfcNum, sas := range rfcNetMap { - if _, found := rfcBlacklist[rfcNum]; !found { - fn(rfcNum, sas) - } - } -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info.go b/vendor/github.com/hashicorp/go-sockaddr/route_info.go deleted file mode 100644 index 2a3ee1db..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/route_info.go +++ /dev/null @@ -1,19 +0,0 @@ -package sockaddr - -// RouteInterface specifies an interface for obtaining memoized route table and -// network information from a given OS. -type RouteInterface interface { - // GetDefaultInterfaceName returns the name of the interface that has a - // default route or an error and an empty string if a problem was - // encountered. - GetDefaultInterfaceName() (string, error) -} - -// VisitCommands visits each command used by the platform-specific RouteInfo -// implementation. -func (ri routeInfo) VisitCommands(fn func(name string, cmd []string)) { - for k, v := range ri.cmds { - cmds := append([]string(nil), v...) - fn(k, cmds) - } -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go deleted file mode 100644 index 9885915a..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go +++ /dev/null @@ -1,34 +0,0 @@ -package sockaddr - -import ( - "errors" - "os/exec" -) - -type routeInfo struct { - cmds map[string][]string -} - -// NewRouteInfo returns a Android-specific implementation of the RouteInfo -// interface. -func NewRouteInfo() (routeInfo, error) { - return routeInfo{ - cmds: map[string][]string{"ip": {"/system/bin/ip", "route", "get", "8.8.8.8"}}, - }, nil -} - -// GetDefaultInterfaceName returns the interface name attached to the default -// route on the default interface. -func (ri routeInfo) GetDefaultInterfaceName() (string, error) { - out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output() - if err != nil { - return "", err - } - - - var ifName string - if ifName, err = parseDefaultIfNameFromIPCmdAndroid(string(out)); err != nil { - return "", errors.New("No default interface found") - } - return ifName, nil -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go deleted file mode 100644 index 705757ab..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build darwin dragonfly freebsd netbsd openbsd - -package sockaddr - -import "os/exec" - -var cmds map[string][]string = map[string][]string{ - "route": {"/sbin/route", "-n", "get", "default"}, -} - -type routeInfo struct { - cmds map[string][]string -} - -// NewRouteInfo returns a BSD-specific implementation of the RouteInfo -// interface. -func NewRouteInfo() (routeInfo, error) { - return routeInfo{ - cmds: cmds, - }, nil -} - -// GetDefaultInterfaceName returns the interface name attached to the default -// route on the default interface. -func (ri routeInfo) GetDefaultInterfaceName() (string, error) { - out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() - if err != nil { - return "", err - } - - var ifName string - if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { - return "", err - } - return ifName, nil -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go deleted file mode 100644 index d1b009f6..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build android nacl plan9 - -package sockaddr - -import "errors" - -// getDefaultIfName is the default interface function for unsupported platforms. -func getDefaultIfName() (string, error) { - return "", errors.New("No default interface found (unsupported platform)") -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go deleted file mode 100644 index b62ce6ec..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !android - -package sockaddr - -import ( - "errors" - "os/exec" -) - -type routeInfo struct { - cmds map[string][]string -} - -// NewRouteInfo returns a Linux-specific implementation of the RouteInfo -// interface. -func NewRouteInfo() (routeInfo, error) { - // CoreOS Container Linux moved ip to /usr/bin/ip, so look it up on - // $PATH and fallback to /sbin/ip on error. - path, _ := exec.LookPath("ip") - if path == "" { - path = "/sbin/ip" - } - - return routeInfo{ - cmds: map[string][]string{"ip": {path, "route"}}, - }, nil -} - -// GetDefaultInterfaceName returns the interface name attached to the default -// route on the default interface. -func (ri routeInfo) GetDefaultInterfaceName() (string, error) { - out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output() - if err != nil { - return "", err - } - - var ifName string - if ifName, err = parseDefaultIfNameFromIPCmd(string(out)); err != nil { - return "", errors.New("No default interface found") - } - return ifName, nil -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go deleted file mode 100644 index ee8e7984..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -package sockaddr - -import ( - "errors" - "os/exec" -) - -var cmds map[string][]string = map[string][]string{ - "route": {"/usr/sbin/route", "-n", "get", "default"}, -} - -type routeInfo struct { - cmds map[string][]string -} - -// NewRouteInfo returns a BSD-specific implementation of the RouteInfo -// interface. -func NewRouteInfo() (routeInfo, error) { - return routeInfo{ - cmds: cmds, - }, nil -} - -// GetDefaultInterfaceName returns the interface name attached to the default -// route on the default interface. -func (ri routeInfo) GetDefaultInterfaceName() (string, error) { - out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() - if err != nil { - return "", err - } - - var ifName string - if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { - return "", errors.New("No default interface found") - } - return ifName, nil -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go deleted file mode 100644 index 3da97288..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -package sockaddr - -import "os/exec" - -var cmds map[string][]string = map[string][]string{ - "netstat": {"netstat", "-rn"}, - "ipconfig": {"ipconfig"}, -} - -type routeInfo struct { - cmds map[string][]string -} - -// NewRouteInfo returns a BSD-specific implementation of the RouteInfo -// interface. -func NewRouteInfo() (routeInfo, error) { - return routeInfo{ - cmds: cmds, - }, nil -} - -// GetDefaultInterfaceName returns the interface name attached to the default -// route on the default interface. -func (ri routeInfo) GetDefaultInterfaceName() (string, error) { - ifNameOut, err := exec.Command(cmds["netstat"][0], cmds["netstat"][1:]...).Output() - if err != nil { - return "", err - } - - ipconfigOut, err := exec.Command(cmds["ipconfig"][0], cmds["ipconfig"][1:]...).Output() - if err != nil { - return "", err - } - - ifName, err := parseDefaultIfNameWindows(string(ifNameOut), string(ipconfigOut)) - if err != nil { - return "", err - } - - return ifName, nil -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go deleted file mode 100644 index 826c91c2..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go +++ /dev/null @@ -1,206 +0,0 @@ -package sockaddr - -import ( - "encoding/json" - "fmt" - "strings" -) - -type SockAddrType int -type AttrName string - -const ( - TypeUnknown SockAddrType = 0x0 - TypeUnix = 0x1 - TypeIPv4 = 0x2 - TypeIPv6 = 0x4 - - // TypeIP is the union of TypeIPv4 and TypeIPv6 - TypeIP = 0x6 -) - -type SockAddr interface { - // CmpRFC returns 0 if SockAddr exactly matches one of the matched RFC - // networks, -1 if the receiver is contained within the RFC network, or - // 1 if the address is not contained within the RFC. - CmpRFC(rfcNum uint, sa SockAddr) int - - // Contains returns true if the SockAddr arg is contained within the - // receiver - Contains(SockAddr) bool - - // Equal allows for the comparison of two SockAddrs - Equal(SockAddr) bool - - DialPacketArgs() (string, string) - DialStreamArgs() (string, string) - ListenPacketArgs() (string, string) - ListenStreamArgs() (string, string) - - // String returns the string representation of SockAddr - String() string - - // Type returns the SockAddrType - Type() SockAddrType -} - -// sockAddrAttrMap is a map of the SockAddr type-specific attributes. -var sockAddrAttrMap map[AttrName]func(SockAddr) string -var sockAddrAttrs []AttrName - -func init() { - sockAddrInit() -} - -// New creates a new SockAddr from the string. The order in which New() -// attempts to construct a SockAddr is: IPv4Addr, IPv6Addr, SockAddrUnix. -// -// NOTE: New() relies on the heuristic wherein if the path begins with either a -// '.' or '/' character before creating a new UnixSock. For UNIX sockets that -// are absolute paths or are nested within a sub-directory, this works as -// expected, however if the UNIX socket is contained in the current working -// directory, this will fail unless the path begins with "./" -// (e.g. "./my-local-socket"). Calls directly to NewUnixSock() do not suffer -// this limitation. Invalid IP addresses such as "256.0.0.0/-1" will run afoul -// of this heuristic and be assumed to be a valid UNIX socket path (which they -// are, but it is probably not what you want and you won't realize it until you -// stat(2) the file system to discover it doesn't exist). -func NewSockAddr(s string) (SockAddr, error) { - ipv4Addr, err := NewIPv4Addr(s) - if err == nil { - return ipv4Addr, nil - } - - ipv6Addr, err := NewIPv6Addr(s) - if err == nil { - return ipv6Addr, nil - } - - // Check to make sure the string begins with either a '.' or '/', or - // contains a '/'. - if len(s) > 1 && (strings.IndexAny(s[0:1], "./") != -1 || strings.IndexByte(s, '/') != -1) { - unixSock, err := NewUnixSock(s) - if err == nil { - return unixSock, nil - } - } - - return nil, fmt.Errorf("Unable to convert %q to an IPv4 or IPv6 address, or a UNIX Socket", s) -} - -// ToIPAddr returns an IPAddr type or nil if the type conversion fails. -func ToIPAddr(sa SockAddr) *IPAddr { - ipa, ok := sa.(IPAddr) - if !ok { - return nil - } - return &ipa -} - -// ToIPv4Addr returns an IPv4Addr type or nil if the type conversion fails. -func ToIPv4Addr(sa SockAddr) *IPv4Addr { - switch v := sa.(type) { - case IPv4Addr: - return &v - default: - return nil - } -} - -// ToIPv6Addr returns an IPv6Addr type or nil if the type conversion fails. -func ToIPv6Addr(sa SockAddr) *IPv6Addr { - switch v := sa.(type) { - case IPv6Addr: - return &v - default: - return nil - } -} - -// ToUnixSock returns a UnixSock type or nil if the type conversion fails. -func ToUnixSock(sa SockAddr) *UnixSock { - switch v := sa.(type) { - case UnixSock: - return &v - default: - return nil - } -} - -// SockAddrAttr returns a string representation of an attribute for the given -// SockAddr. -func SockAddrAttr(sa SockAddr, selector AttrName) string { - fn, found := sockAddrAttrMap[selector] - if !found { - return "" - } - - return fn(sa) -} - -// String() for SockAddrType returns a string representation of the -// SockAddrType (e.g. "IPv4", "IPv6", "UNIX", "IP", or "unknown"). -func (sat SockAddrType) String() string { - switch sat { - case TypeIPv4: - return "IPv4" - case TypeIPv6: - return "IPv6" - // There is no concrete "IP" type. Leaving here as a reminder. - // case TypeIP: - // return "IP" - case TypeUnix: - return "UNIX" - default: - panic("unsupported type") - } -} - -// sockAddrInit is called once at init() -func sockAddrInit() { - sockAddrAttrs = []AttrName{ - "type", // type should be first - "string", - } - - sockAddrAttrMap = map[AttrName]func(sa SockAddr) string{ - "string": func(sa SockAddr) string { - return sa.String() - }, - "type": func(sa SockAddr) string { - return sa.Type().String() - }, - } -} - -// UnixSockAttrs returns a list of attributes supported by the UnixSock type -func SockAddrAttrs() []AttrName { - return sockAddrAttrs -} - -// Although this is pretty trivial to do in a program, having the logic here is -// useful all around. Note that this marshals into a *string* -- the underlying -// string representation of the sockaddr. If you then unmarshal into this type -// in Go, all will work as expected, but externally you can take what comes out -// and use the string value directly. -type SockAddrMarshaler struct { - SockAddr -} - -func (s *SockAddrMarshaler) MarshalJSON() ([]byte, error) { - return json.Marshal(s.SockAddr.String()) -} - -func (s *SockAddrMarshaler) UnmarshalJSON(in []byte) error { - var str string - err := json.Unmarshal(in, &str) - if err != nil { - return err - } - sa, err := NewSockAddr(str) - if err != nil { - return err - } - s.SockAddr = sa - return nil -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go deleted file mode 100644 index 75fbffb1..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go +++ /dev/null @@ -1,193 +0,0 @@ -package sockaddr - -import ( - "bytes" - "sort" -) - -// SockAddrs is a slice of SockAddrs -type SockAddrs []SockAddr - -func (s SockAddrs) Len() int { return len(s) } -func (s SockAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// CmpAddrFunc is the function signature that must be met to be used in the -// OrderedAddrBy multiAddrSorter -type CmpAddrFunc func(p1, p2 *SockAddr) int - -// multiAddrSorter implements the Sort interface, sorting the SockAddrs within. -type multiAddrSorter struct { - addrs SockAddrs - cmp []CmpAddrFunc -} - -// Sort sorts the argument slice according to the Cmp functions passed to -// OrderedAddrBy. -func (ms *multiAddrSorter) Sort(sockAddrs SockAddrs) { - ms.addrs = sockAddrs - sort.Sort(ms) -} - -// OrderedAddrBy sorts SockAddr by the list of sort function pointers. -func OrderedAddrBy(cmpFuncs ...CmpAddrFunc) *multiAddrSorter { - return &multiAddrSorter{ - cmp: cmpFuncs, - } -} - -// Len is part of sort.Interface. -func (ms *multiAddrSorter) Len() int { - return len(ms.addrs) -} - -// Less is part of sort.Interface. It is implemented by looping along the -// Cmp() functions until it finds a comparison that is either less than, -// equal to, or greater than. -func (ms *multiAddrSorter) Less(i, j int) bool { - p, q := &ms.addrs[i], &ms.addrs[j] - // Try all but the last comparison. - var k int - for k = 0; k < len(ms.cmp)-1; k++ { - cmp := ms.cmp[k] - x := cmp(p, q) - switch x { - case -1: - // p < q, so we have a decision. - return true - case 1: - // p > q, so we have a decision. - return false - } - // p == q; try the next comparison. - } - // All comparisons to here said "equal", so just return whatever the - // final comparison reports. - switch ms.cmp[k](p, q) { - case -1: - return true - case 1: - return false - default: - // Still a tie! Now what? - return false - } -} - -// Swap is part of sort.Interface. -func (ms *multiAddrSorter) Swap(i, j int) { - ms.addrs[i], ms.addrs[j] = ms.addrs[j], ms.addrs[i] -} - -const ( - // NOTE (sean@): These constants are here for code readability only and - // are sprucing up the code for readability purposes. Some of the - // Cmp*() variants have confusing logic (especially when dealing with - // mixed-type comparisons) and this, I think, has made it easier to grok - // the code faster. - sortReceiverBeforeArg = -1 - sortDeferDecision = 0 - sortArgBeforeReceiver = 1 -) - -// AscAddress is a sorting function to sort SockAddrs by their respective -// address type. Non-equal types are deferred in the sort. -func AscAddress(p1Ptr, p2Ptr *SockAddr) int { - p1 := *p1Ptr - p2 := *p2Ptr - - switch v := p1.(type) { - case IPv4Addr: - return v.CmpAddress(p2) - case IPv6Addr: - return v.CmpAddress(p2) - case UnixSock: - return v.CmpAddress(p2) - default: - return sortDeferDecision - } -} - -// AscPort is a sorting function to sort SockAddrs by their respective address -// type. Non-equal types are deferred in the sort. -func AscPort(p1Ptr, p2Ptr *SockAddr) int { - p1 := *p1Ptr - p2 := *p2Ptr - - switch v := p1.(type) { - case IPv4Addr: - return v.CmpPort(p2) - case IPv6Addr: - return v.CmpPort(p2) - default: - return sortDeferDecision - } -} - -// AscPrivate is a sorting function to sort "more secure" private values before -// "more public" values. Both IPv4 and IPv6 are compared against RFC6890 -// (RFC6890 includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and -// IPv6 includes RFC4193). -func AscPrivate(p1Ptr, p2Ptr *SockAddr) int { - p1 := *p1Ptr - p2 := *p2Ptr - - switch v := p1.(type) { - case IPv4Addr, IPv6Addr: - return v.CmpRFC(6890, p2) - default: - return sortDeferDecision - } -} - -// AscNetworkSize is a sorting function to sort SockAddrs based on their network -// size. Non-equal types are deferred in the sort. -func AscNetworkSize(p1Ptr, p2Ptr *SockAddr) int { - p1 := *p1Ptr - p2 := *p2Ptr - p1Type := p1.Type() - p2Type := p2.Type() - - // Network size operations on non-IP types make no sense - if p1Type != p2Type && p1Type != TypeIP { - return sortDeferDecision - } - - ipA := p1.(IPAddr) - ipB := p2.(IPAddr) - - return bytes.Compare([]byte(*ipA.NetIPMask()), []byte(*ipB.NetIPMask())) -} - -// AscType is a sorting function to sort "more secure" types before -// "less-secure" types. -func AscType(p1Ptr, p2Ptr *SockAddr) int { - p1 := *p1Ptr - p2 := *p2Ptr - p1Type := p1.Type() - p2Type := p2.Type() - switch { - case p1Type < p2Type: - return sortReceiverBeforeArg - case p1Type == p2Type: - return sortDeferDecision - case p1Type > p2Type: - return sortArgBeforeReceiver - default: - return sortDeferDecision - } -} - -// FilterByType returns two lists: a list of matched and unmatched SockAddrs -func (sas SockAddrs) FilterByType(type_ SockAddrType) (matched, excluded SockAddrs) { - matched = make(SockAddrs, 0, len(sas)) - excluded = make(SockAddrs, 0, len(sas)) - - for _, sa := range sas { - if sa.Type()&type_ != 0 { - matched = append(matched, sa) - } else { - excluded = append(excluded, sa) - } - } - return matched, excluded -} diff --git a/vendor/github.com/hashicorp/go-sockaddr/unixsock.go b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go deleted file mode 100644 index f3be3f67..00000000 --- a/vendor/github.com/hashicorp/go-sockaddr/unixsock.go +++ /dev/null @@ -1,135 +0,0 @@ -package sockaddr - -import ( - "fmt" - "strings" -) - -type UnixSock struct { - SockAddr - path string -} -type UnixSocks []*UnixSock - -// unixAttrMap is a map of the UnixSockAddr type-specific attributes. -var unixAttrMap map[AttrName]func(UnixSock) string -var unixAttrs []AttrName - -func init() { - unixAttrInit() -} - -// NewUnixSock creates an UnixSock from a string path. String can be in the -// form of either URI-based string (e.g. `file:///etc/passwd`), an absolute -// path (e.g. `/etc/passwd`), or a relative path (e.g. `./foo`). -func NewUnixSock(s string) (ret UnixSock, err error) { - ret.path = s - return ret, nil -} - -// CmpAddress follows the Cmp() standard protocol and returns: -// -// - -1 If the receiver should sort first because its name lexically sorts before arg -// - 0 if the SockAddr arg is not a UnixSock, or is a UnixSock with the same path. -// - 1 If the argument should sort first. -func (us UnixSock) CmpAddress(sa SockAddr) int { - usb, ok := sa.(UnixSock) - if !ok { - return sortDeferDecision - } - - return strings.Compare(us.Path(), usb.Path()) -} - -// DialPacketArgs returns the arguments required to be passed to net.DialUnix() -// with the `unixgram` network type. -func (us UnixSock) DialPacketArgs() (network, dialArgs string) { - return "unixgram", us.path -} - -// DialStreamArgs returns the arguments required to be passed to net.DialUnix() -// with the `unix` network type. -func (us UnixSock) DialStreamArgs() (network, dialArgs string) { - return "unix", us.path -} - -// Equal returns true if a SockAddr is equal to the receiving UnixSock. -func (us UnixSock) Equal(sa SockAddr) bool { - usb, ok := sa.(UnixSock) - if !ok { - return false - } - - if us.Path() != usb.Path() { - return false - } - - return true -} - -// ListenPacketArgs returns the arguments required to be passed to -// net.ListenUnixgram() with the `unixgram` network type. -func (us UnixSock) ListenPacketArgs() (network, dialArgs string) { - return "unixgram", us.path -} - -// ListenStreamArgs returns the arguments required to be passed to -// net.ListenUnix() with the `unix` network type. -func (us UnixSock) ListenStreamArgs() (network, dialArgs string) { - return "unix", us.path -} - -// MustUnixSock is a helper method that must return an UnixSock or panic on -// invalid input. -func MustUnixSock(addr string) UnixSock { - us, err := NewUnixSock(addr) - if err != nil { - panic(fmt.Sprintf("Unable to create a UnixSock from %+q: %v", addr, err)) - } - return us -} - -// Path returns the given path of the UnixSock -func (us UnixSock) Path() string { - return us.path -} - -// String returns the path of the UnixSock -func (us UnixSock) String() string { - return fmt.Sprintf("%+q", us.path) -} - -// Type is used as a type switch and returns TypeUnix -func (UnixSock) Type() SockAddrType { - return TypeUnix -} - -// UnixSockAttrs returns a list of attributes supported by the UnixSockAddr type -func UnixSockAttrs() []AttrName { - return unixAttrs -} - -// UnixSockAttr returns a string representation of an attribute for the given -// UnixSock. -func UnixSockAttr(us UnixSock, attrName AttrName) string { - fn, found := unixAttrMap[attrName] - if !found { - return "" - } - - return fn(us) -} - -// unixAttrInit is called once at init() -func unixAttrInit() { - // Sorted for human readability - unixAttrs = []AttrName{ - "path", - } - - unixAttrMap = map[AttrName]func(us UnixSock) string{ - "path": func(us UnixSock) string { - return us.Path() - }, - } -} diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md new file mode 100644 index 00000000..6d48174b --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -0,0 +1,64 @@ +# 1.7.0 (May 24, 2024) + +ENHANCEMENTS: + +- Remove `reflect` dependency ([#91](https://github.com/hashicorp/go-version/pull/91)) +- Implement the `database/sql.Scanner` and `database/sql/driver.Value` interfaces for `Version` ([#133](https://github.com/hashicorp/go-version/pull/133)) + +INTERNAL: + +- [COMPLIANCE] Add Copyright and License Headers ([#115](https://github.com/hashicorp/go-version/pull/115)) +- [COMPLIANCE] Update MPL-2.0 LICENSE ([#105](https://github.com/hashicorp/go-version/pull/105)) +- Bump actions/cache from 3.0.11 to 3.2.5 ([#116](https://github.com/hashicorp/go-version/pull/116)) +- Bump actions/checkout from 3.2.0 to 3.3.0 ([#111](https://github.com/hashicorp/go-version/pull/111)) +- Bump actions/upload-artifact from 3.1.1 to 3.1.2 ([#112](https://github.com/hashicorp/go-version/pull/112)) +- GHA Migration ([#103](https://github.com/hashicorp/go-version/pull/103)) +- github: Pin external GitHub Actions to hashes ([#107](https://github.com/hashicorp/go-version/pull/107)) +- SEC-090: Automated trusted workflow pinning (2023-04-05) ([#124](https://github.com/hashicorp/go-version/pull/124)) +- update readme ([#104](https://github.com/hashicorp/go-version/pull/104)) + +# 1.6.0 (June 28, 2022) + +FEATURES: + +- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100)) + +# 1.5.0 (May 18, 2022) + +FEATURES: + +- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95)) +- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93)) + +# 1.4.0 (January 5, 2022) + +FEATURES: + + - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) + - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) + +# 1.3.0 (March 31, 2021) + +Please note that CHANGELOG.md does not exist in the source code prior to this release. + +FEATURES: + - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) + +# 1.2.1 (June 17, 2020) + +BUG FIXES: + - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) + +# 1.2.0 (April 23, 2019) + +FEATURES: + - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) + +# 1.1.0 (Jan 07, 2019) + +FEATURES: + - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) + +# 1.0.0 (August 24, 2018) + +Initial release. diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 00000000..1409d6ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,356 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 00000000..4b7806cd --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,66 @@ +# Versioning Library for Go +![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 00000000..29bdc4d2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,298 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package version + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + op operator + check *Version + original string +} + +func (c *Constraint) Equals(con *Constraint) bool { + return c.op == con.op && c.check.Equal(con.check) +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintOperation + +type constraintOperation struct { + op operator + f constraintFunc +} + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintOperation{ + "": {op: equal, f: constraintEqual}, + "=": {op: equal, f: constraintEqual}, + "!=": {op: notEqual, f: constraintNotEqual}, + ">": {op: greaterThan, f: constraintGreaterThan}, + "<": {op: lessThan, f: constraintLessThan}, + ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, + "<=": {op: lessThanEqual, f: constraintLessThanEqual}, + "~>": {op: pessimistic, f: constraintPessimistic}, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// MustConstraints is a helper that wraps a call to a function +// returning (Constraints, error) and panics if error is non-nil. +func MustConstraints(c Constraints, err error) Constraints { + if err != nil { + panic(err) + } + + return c +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Equals compares Constraints with other Constraints +// for equality. This may not represent logical equivalence +// of compared constraints. +// e.g. even though '>0.1,>0.2' is logically equivalent +// to '>0.2' it is *NOT* treated as equal. +// +// Missing operator is treated as equal to '=', whitespaces +// are ignored and constraints are sorted before comaparison. +func (cs Constraints) Equals(c Constraints) bool { + if len(cs) != len(c) { + return false + } + + // make copies to retain order of the original slices + left := make(Constraints, len(cs)) + copy(left, cs) + sort.Stable(left) + right := make(Constraints, len(c)) + copy(right, c) + sort.Stable(right) + + // compare sorted slices + for i, con := range left { + if !con.Equals(right[i]) { + return false + } + } + + return true +} + +func (cs Constraints) Len() int { + return len(cs) +} + +func (cs Constraints) Less(i, j int) bool { + if cs[i].op < cs[j].op { + return true + } + if cs[i].op > cs[j].op { + return false + } + + return cs[i].check.LessThan(cs[j].check) +} + +func (cs Constraints) Swap(i, j int) { + cs[i], cs[j] = cs[j], cs[i] +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +// Prerelease returns true if the version underlying this constraint +// contains a prerelease field. +func (c *Constraint) Prerelease() bool { + return len(c.check.Prerelease()) > 0 +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + cop := constraintOperators[matches[1]] + + return &Constraint{ + f: cop.f, + op: cop.op, + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return v.equalSegments(c) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +type operator rune + +const ( + equal operator = '=' + notEqual operator = '≠' + greaterThan operator = '>' + lessThan operator = '<' + greaterThanEqual operator = '≥' + lessThanEqual operator = '≤' + pessimistic operator = '~' +) + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 00000000..7c683c28 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,441 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package version + +import ( + "bytes" + "database/sql/driver" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = val + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: len(segmentsStr), + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + // If the segments are the same, we must compare on prerelease info + if v.equalSegments(other) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + // if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func (v *Version) equalSegments(other *Version) bool { + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + if len(segmentsSelf) != len(segmentsOther) { + return false + } + for i, v := range segmentsSelf { + if v != segmentsOther[i] { + return false + } + } + return true +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Core returns a new version constructed from only the MAJOR.MINOR.PATCH +// segments of the version, without prerelease or metadata. +func (v *Version) Core() *Version { + segments := v.Segments64() + segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) + return Must(NewVersion(segmentsOnly)) +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + if v == nil || o == nil { + return v == o + } + + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqual tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqual tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} + +// UnmarshalText implements encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(b []byte) error { + temp, err := NewVersion(string(b)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the sql.Scanner interface. +func (v *Version) Scan(src interface{}) error { + switch src := src.(type) { + case string: + return v.UnmarshalText([]byte(src)) + case nil: + return nil + default: + return fmt.Errorf("cannot scan %T as Version", src) + } +} + +// Value implements the driver.Valuer interface. +func (v *Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 00000000..83547fe1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 00000000..955dc0be --- /dev/null +++ b/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 00000000..15556530 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 00000000..449e67cd --- /dev/null +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 00000000..c8a9fbb3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 00000000..313a0f88 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 00000000..2cf4f5ab --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000..c589addf --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,85 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 00000000..92d2cc4a --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString is a convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 00000000..f6b8aeab --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 00000000..0449e9aa --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 00000000..9452324a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 00000000..35fdb094 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 00000000..1b56f399 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 00000000..c440d72b --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 00000000..1d859eac --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 00000000..d04cb54c --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 00000000..9d1e901a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 00000000..c44ef5c9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 00000000..1f12f661 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 00000000..656bbd33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 00000000..7df2fce3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 00000000..b45ef688 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 00000000..2adcdc3b --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) + iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + stream.WriteRaw("null") + } else { + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 00000000..3095662b --- /dev/null +++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 00000000..29b31cf7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,349 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + depth int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + depth: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + depth: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + depth: 0, + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + iter.depth = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + iter.depth = 0 + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 00000000..204fe0e0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,64 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + return iter.decrementDepth() + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 00000000..8a3d8b6f --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,342 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + if value > maxFloat64 { + return iter.readFloat64SlowPath() + } + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 00000000..d786a89f --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 +const maxFloat64 = 1<<53 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < iter.tail && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 00000000..58ee89c8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 00000000..e91eefb1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,130 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() +} + +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + return append(captured, remaining...) +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 00000000..9303de41 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,163 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + if !iter.incrementDepth() { + return + } + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case ']': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + if !iter.incrementDepth() { + return + } + + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case '}': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 00000000..6cf66d04 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,99 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import ( + "fmt" + "io" +) + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 00000000..adc487ea --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 00000000..c2934f91 --- /dev/null +++ b/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 00000000..e2389b56 --- /dev/null +++ b/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 00000000..39acb320 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,337 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ == nil || typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 00000000..13a0b7b0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 00000000..8b6bc8b4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 00000000..74a97bfe --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { + continue + } + if tag == "-" || field.Name() == "_" { + continue + } + tagParts := strings.Split(tag, ",") + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 00000000..98d45c1e --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 00000000..eba434f2 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,76 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*json.RawMessage)(ptr)) = nil + } else { + *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*json.RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + } +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*RawMessage)(ptr)) = nil + } else { + *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) + } +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 00000000..58296713 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subStream.Attachment = stream.Attachment + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + key, elem := mapIter.UnsafeNext() + subStreamIndex := subStream.Buffered() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer()[subStreamIndex:] + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer()[subStreamIndex:], + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + if subStream.Error != nil && stream.Error == nil { + stream.Error = subStream.Error + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 00000000..3e21f375 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,225 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 00000000..f88722d1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,453 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + if codec.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + src := *((*[]byte)(ptr)) + encoding := base64.StdEncoding + stream.writeByte('"') + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 00000000..fa71f474 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,129 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 00000000..9441d79d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 00000000..92ae912d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1097 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } + iter.decrementDepth() +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + if decoder.disallowUnknownFields { + msg := "found unknown field: " + field + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NilValue { + decoder.elemDecoder.Decode(ptr, iter) + return + } + + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 00000000..152e3ef5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + tempStream.Attachment = stream.Attachment + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 00000000..23d8a3ad --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + _, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[:0] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 00000000..826aa594 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,111 @@ +package jsoniter + +import ( + "fmt" + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 00000000..d1059ee4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 00000000..54c2ba0b --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML