From 0fd655e0a14a27f72d7b2b645d2dd4a2028ef302 Mon Sep 17 00:00:00 2001 From: starComingup <1225067236@qq.com> Date: Fri, 5 Aug 2022 17:05:46 +0800 Subject: [PATCH 1/3] add feature k0s init, import some dependencies form k0sproject. Signed-off-by: starComingup <1225067236@qq.com> --- go.mod | 27 +- go.sum | 375 +-- pkg/runtime/k0s/common.go | 22 + pkg/runtime/k0s/init.go | 81 + pkg/runtime/k0s/k0sctl/common.go | 28 + pkg/runtime/k0s/k0sctl/k0sctl_config.go | 138 + pkg/runtime/k0s/k0sctl/k0sctl_config_test.go | 424 +++ pkg/runtime/k0s/k0sctl/v1beta1/types.go | 63 + pkg/runtime/k0s/registry.go | 124 + pkg/runtime/k0s/runtime.go | 194 ++ pkg/runtime/k0s/runtime_test.go | 115 + .../github.com/Azure/go-ntlmssp/.travis.yml | 17 + vendor/github.com/Azure/go-ntlmssp/LICENSE | 21 + vendor/github.com/Azure/go-ntlmssp/README.md | 29 + .../Azure/go-ntlmssp/authenticate_message.go | 183 ++ .../github.com/Azure/go-ntlmssp/authheader.go | 66 + vendor/github.com/Azure/go-ntlmssp/avids.go | 17 + .../Azure/go-ntlmssp/challenge_message.go | 82 + .../Azure/go-ntlmssp/messageheader.go | 21 + .../Azure/go-ntlmssp/negotiate_flags.go | 52 + .../Azure/go-ntlmssp/negotiate_message.go | 64 + .../github.com/Azure/go-ntlmssp/negotiator.go | 144 + vendor/github.com/Azure/go-ntlmssp/nlmp.go | 51 + vendor/github.com/Azure/go-ntlmssp/unicode.go | 29 + .../github.com/Azure/go-ntlmssp/varfield.go | 40 + vendor/github.com/Azure/go-ntlmssp/version.go | 20 + .../ChrisTrenkamp/goxpath/.travis.yml | 16 + .../github.com/ChrisTrenkamp/goxpath/LICENSE | 22 + .../ChrisTrenkamp/goxpath/README.md | 1 + .../ChrisTrenkamp/goxpath/coverage.sh | 17 + .../ChrisTrenkamp/goxpath/goxpath.go | 117 + .../goxpath/internal/execxp/execxp.go | 27 + .../internal/execxp/findutil/findUtil.go | 307 +++ .../goxpath/internal/execxp/intfns/boolfns.go | 74 + .../goxpath/internal/execxp/intfns/intfns.go | 41 + .../internal/execxp/intfns/nodesetfns.go | 131 + .../goxpath/internal/execxp/intfns/numfns.go | 71 + .../internal/execxp/intfns/stringfns.go | 141 + .../goxpath/internal/execxp/operators.go | 212 ++ .../goxpath/internal/execxp/paths.go | 396 +++ .../goxpath/internal/xsort/xsort.go | 20 + .../ChrisTrenkamp/goxpath/lexer/lexer.go | 419 +++ .../ChrisTrenkamp/goxpath/lexer/paths.go | 219 ++ .../ChrisTrenkamp/goxpath/lexer/xmlchars.go | 316 +++ .../ChrisTrenkamp/goxpath/marshal.go | 105 + .../ChrisTrenkamp/goxpath/parser/ast.go | 113 + .../ChrisTrenkamp/goxpath/parser/parser.go | 200 ++ .../goxpath/parser/pathexpr/pathexpr.go | 11 + .../ChrisTrenkamp/goxpath/tree/interfaces.go | 18 + .../ChrisTrenkamp/goxpath/tree/tree.go | 221 ++ .../ChrisTrenkamp/goxpath/tree/xfn.go | 52 + .../tree/xmltree/xmlbuilder/xmlbuilder.go | 25 + .../goxpath/tree/xmltree/xmlele/xmlele.go | 106 + .../goxpath/tree/xmltree/xmlnode/xmlnode.go | 43 + .../goxpath/tree/xmltree/xmltree.go | 158 ++ .../ChrisTrenkamp/goxpath/tree/xtypes.go | 113 + .../ChrisTrenkamp/goxpath/xconst/xconst.go | 66 + .../github.com/Masterminds/semver/.travis.yml | 29 + .../Masterminds/semver/CHANGELOG.md | 109 + .../github.com/Masterminds/semver/LICENSE.txt | 19 + vendor/github.com/Masterminds/semver/Makefile | 36 + .../github.com/Masterminds/semver/README.md | 194 ++ .../Masterminds/semver/appveyor.yml | 44 + .../Masterminds/semver/collection.go | 24 + .../Masterminds/semver/constraints.go | 423 +++ vendor/github.com/Masterminds/semver/doc.go | 115 + .../github.com/Masterminds/semver/version.go | 425 +++ .../Masterminds/semver/version_fuzz.go | 10 + .../stripansi}/LICENSE | 2 +- .../github.com/acarl005/stripansi/README.md | 30 + .../acarl005/stripansi/stripansi.go | 13 + .../github.com/alessio/shellescape/.gitignore | 28 + .../alessio/shellescape/.golangci.yml | 64 + .../alessio/shellescape/.goreleaser.yml | 33 + vendor/github.com/alessio/shellescape/AUTHORS | 1 + .../alessio/shellescape/CODE_OF_CONDUCT.md | 76 + vendor/github.com/alessio/shellescape/LICENSE | 21 + .../github.com/alessio/shellescape/README.md | 61 + vendor/github.com/alessio/shellescape/go.mod | 3 + .../alessio/shellescape/shellescape.go | 66 + .../asaskevich/govalidator/.gitignore | 15 + .../asaskevich/govalidator/.travis.yml | 18 + .../asaskevich/govalidator/CONTRIBUTING.md | 63 + .../github.com/asaskevich/govalidator/LICENSE | 21 + .../asaskevich/govalidator/README.md | 616 +++++ .../asaskevich/govalidator/arrays.go | 58 + .../asaskevich/govalidator/converter.go | 64 + .../github.com/asaskevich/govalidator/doc.go | 3 + .../asaskevich/govalidator/error.go | 47 + .../github.com/asaskevich/govalidator/go.mod | 3 + .../asaskevich/govalidator/numerics.go | 97 + .../asaskevich/govalidator/patterns.go | 105 + .../asaskevich/govalidator/types.go | 653 +++++ .../asaskevich/govalidator/utils.go | 270 ++ .../asaskevich/govalidator/validator.go | 1530 +++++++++++ .../asaskevich/govalidator/wercker.yml | 15 + vendor/github.com/avast/retry-go/.gitignore | 21 + .../github.com/avast/retry-go/.godocdown.tmpl | 37 + vendor/github.com/avast/retry-go/.travis.yml | 20 + vendor/github.com/avast/retry-go/Gopkg.toml | 3 + vendor/github.com/avast/retry-go/LICENSE | 21 + vendor/github.com/avast/retry-go/Makefile | 65 + vendor/github.com/avast/retry-go/README.md | 361 +++ vendor/github.com/avast/retry-go/VERSION | 1 + vendor/github.com/avast/retry-go/appveyor.yml | 19 + vendor/github.com/avast/retry-go/options.go | 198 ++ vendor/github.com/avast/retry-go/retry.go | 225 ++ .../bmatcuk/doublestar/v4/.gitignore | 32 + .../bmatcuk/doublestar/v4/.travis.yml | 18 + .../github.com/bmatcuk/doublestar/v4/LICENSE | 22 + .../bmatcuk/doublestar/v4/README.md | 278 ++ .../bmatcuk/doublestar/v4/UPGRADING.md | 63 + .../bmatcuk/doublestar/v4/doublestar.go | 8 + .../github.com/bmatcuk/doublestar/v4/glob.go | 395 +++ .../bmatcuk/doublestar/v4/globwalk.go | 275 ++ .../github.com/bmatcuk/doublestar/v4/go.mod | 3 + .../github.com/bmatcuk/doublestar/v4/match.go | 374 +++ .../github.com/bmatcuk/doublestar/v4/utils.go | 69 + .../bmatcuk/doublestar/v4/validate.go | 82 + .../cpuguy83/go-md2man/v2/md2man/roff.go | 105 +- vendor/github.com/creasty/defaults/.gitignore | 1 + .../github.com/creasty/defaults/.travis.yml | 20 + vendor/github.com/creasty/defaults/LICENSE | 22 + vendor/github.com/creasty/defaults/Makefile | 30 + vendor/github.com/creasty/defaults/README.md | 69 + .../github.com/creasty/defaults/defaults.go | 198 ++ vendor/github.com/creasty/defaults/go.mod | 3 + vendor/github.com/creasty/defaults/setter.go | 12 + vendor/github.com/go-logr/logr/CHANGELOG.md | 6 + .../github.com/go-logr/logr/CONTRIBUTING.md | 17 + vendor/github.com/go-logr/logr/README.md | 204 +- vendor/github.com/go-logr/logr/discard.go | 35 +- vendor/github.com/go-logr/logr/go.mod | 2 +- vendor/github.com/go-logr/logr/logr.go | 513 +++- .../go-ozzo/ozzo-validation/v4/.gitignore | 28 + .../go-ozzo/ozzo-validation/v4/.travis.yml | 17 + .../go-ozzo/ozzo-validation/v4/LICENSE | 17 + .../go-ozzo/ozzo-validation/v4/README.md | 703 +++++ .../go-ozzo/ozzo-validation/v4/UPGRADE.md | 64 + .../go-ozzo/ozzo-validation/v4/absent.go | 67 + .../go-ozzo/ozzo-validation/v4/date.go | 102 + .../go-ozzo/ozzo-validation/v4/each.go | 97 + .../go-ozzo/ozzo-validation/v4/error.go | 180 ++ .../go-ozzo/ozzo-validation/v4/go.mod | 8 + .../go-ozzo/ozzo-validation/v4/go.sum | 14 + .../go-ozzo/ozzo-validation/v4/in.go | 57 + .../go-ozzo/ozzo-validation/v4/is/rules.go | 283 ++ .../go-ozzo/ozzo-validation/v4/length.go | 104 + .../go-ozzo/ozzo-validation/v4/map.go | 144 + .../go-ozzo/ozzo-validation/v4/match.go | 56 + .../go-ozzo/ozzo-validation/v4/minmax.go | 195 ++ .../go-ozzo/ozzo-validation/v4/multipleof.go | 65 + .../go-ozzo/ozzo-validation/v4/not_in.go | 51 + .../go-ozzo/ozzo-validation/v4/not_nil.go | 44 + .../go-ozzo/ozzo-validation/v4/required.go | 74 + .../go-ozzo/ozzo-validation/v4/string.go | 64 + .../go-ozzo/ozzo-validation/v4/struct.go | 169 ++ .../go-ozzo/ozzo-validation/v4/util.go | 163 ++ .../go-ozzo/ozzo-validation/v4/validation.go | 272 ++ .../go-ozzo/ozzo-validation/v4/when.go | 47 + .../go-playground/locales/.gitignore | 24 + .../go-playground/locales/.travis.yml | 26 + .../github.com/go-playground/locales/LICENSE | 21 + .../go-playground/locales/README.md | 172 ++ .../locales/currency/currency.go | 311 +++ .../github.com/go-playground/locales/go.mod | 5 + .../github.com/go-playground/locales/go.sum | 3 + .../github.com/go-playground/locales/logo.png | Bin 0 -> 37360 bytes .../github.com/go-playground/locales/rules.go | 293 ++ .../universal-translator/.gitignore | 25 + .../universal-translator/.travis.yml | 27 + .../universal-translator/LICENSE | 21 + .../universal-translator/Makefile | 18 + .../universal-translator/README.md | 89 + .../universal-translator/errors.go | 148 + .../go-playground/universal-translator/go.mod | 5 + .../go-playground/universal-translator/go.sum | 7 + .../universal-translator/import_export.go | 276 ++ .../universal-translator/logo.png | Bin 0 -> 16598 bytes .../universal-translator/translator.go | 420 +++ .../universal_translator.go | 113 + .../go-playground/validator/v10/.gitignore | 30 + .../go-playground/validator/v10/LICENSE | 22 + .../validator/v10/MAINTAINERS.md | 16 + .../go-playground/validator/v10/Makefile | 18 + .../go-playground/validator/v10/README.md | 323 +++ .../go-playground/validator/v10/baked_in.go | 2409 +++++++++++++++++ .../go-playground/validator/v10/cache.go | 322 +++ .../validator/v10/country_codes.go | 1132 ++++++++ .../validator/v10/currency_codes.go | 79 + .../go-playground/validator/v10/doc.go | 1342 +++++++++ .../go-playground/validator/v10/errors.go | 275 ++ .../validator/v10/field_level.go | 120 + .../go-playground/validator/v10/go.mod | 19 + .../go-playground/validator/v10/go.sum | 50 + .../go-playground/validator/v10/logo.png | Bin 0 -> 13443 bytes .../validator/v10/postcode_regexes.go | 173 ++ .../go-playground/validator/v10/regexes.go | 105 + .../validator/v10/struct_level.go | 175 ++ .../validator/v10/translations.go | 11 + .../go-playground/validator/v10/util.go | 288 ++ .../go-playground/validator/v10/validator.go | 477 ++++ .../validator/v10/validator_instance.go | 658 +++++ vendor/github.com/gofrs/uuid/.gitignore | 15 + vendor/github.com/gofrs/uuid/LICENSE | 20 + vendor/github.com/gofrs/uuid/README.md | 109 + vendor/github.com/gofrs/uuid/codec.go | 212 ++ vendor/github.com/gofrs/uuid/fuzz.go | 47 + vendor/github.com/gofrs/uuid/generator.go | 573 ++++ vendor/github.com/gofrs/uuid/sql.go | 109 + vendor/github.com/gofrs/uuid/uuid.go | 292 ++ vendor/github.com/google/gofuzz/.travis.yml | 11 +- .../github.com/google/gofuzz/CONTRIBUTING.md | 2 +- vendor/github.com/google/gofuzz/README.md | 18 + .../google/gofuzz/bytesource/bytesource.go | 81 + vendor/github.com/google/gofuzz/fuzz.go | 137 +- .../github.com/hashicorp/go-uuid/.travis.yml | 12 + vendor/github.com/hashicorp/go-uuid/LICENSE | 363 +++ vendor/github.com/hashicorp/go-uuid/README.md | 8 + vendor/github.com/hashicorp/go-uuid/go.mod | 1 + vendor/github.com/hashicorp/go-uuid/uuid.go | 83 + .../hashicorp/go-version/CHANGELOG.md | 32 + .../github.com/hashicorp/go-version/LICENSE | 354 +++ .../github.com/hashicorp/go-version/README.md | 66 + .../hashicorp/go-version/constraint.go | 290 ++ vendor/github.com/hashicorp/go-version/go.mod | 1 + .../hashicorp/go-version/version.go | 390 +++ .../go-version/version_collection.go | 17 + vendor/github.com/jcmturner/aescts/v2/LICENSE | 201 ++ .../github.com/jcmturner/aescts/v2/aescts.go | 186 ++ vendor/github.com/jcmturner/aescts/v2/go.mod | 5 + vendor/github.com/jcmturner/aescts/v2/go.sum | 10 + .../github.com/jcmturner/dnsutils/v2/LICENSE | 201 ++ .../github.com/jcmturner/dnsutils/v2/go.mod | 5 + .../github.com/jcmturner/dnsutils/v2/go.sum | 10 + .../github.com/jcmturner/dnsutils/v2/srv.go | 95 + vendor/github.com/jcmturner/gofork/LICENSE | 27 + .../jcmturner/gofork/encoding/asn1/README.md | 5 + .../jcmturner/gofork/encoding/asn1/asn1.go | 1003 +++++++ .../jcmturner/gofork/encoding/asn1/common.go | 173 ++ .../jcmturner/gofork/encoding/asn1/marshal.go | 659 +++++ .../gofork/x/crypto/pbkdf2/pbkdf2.go | 98 + .../jcmturner/goidentity/v6/LICENSE | 201 ++ .../jcmturner/goidentity/v6/README.md | 7 + .../jcmturner/goidentity/v6/authenticator.go | 6 + .../github.com/jcmturner/goidentity/v6/go.mod | 8 + .../github.com/jcmturner/goidentity/v6/go.sum | 12 + .../jcmturner/goidentity/v6/identity.go | 52 + .../jcmturner/goidentity/v6/user.go | 172 ++ vendor/github.com/jcmturner/gokrb5/v8/LICENSE | 201 ++ .../jcmturner/gokrb5/v8/asn1tools/tools.go | 86 + .../jcmturner/gokrb5/v8/client/ASExchange.go | 182 ++ .../jcmturner/gokrb5/v8/client/TGSExchange.go | 103 + .../jcmturner/gokrb5/v8/client/cache.go | 134 + .../jcmturner/gokrb5/v8/client/client.go | 329 +++ .../jcmturner/gokrb5/v8/client/network.go | 218 ++ .../jcmturner/gokrb5/v8/client/passwd.go | 75 + .../jcmturner/gokrb5/v8/client/session.go | 295 ++ .../jcmturner/gokrb5/v8/client/settings.go | 93 + .../jcmturner/gokrb5/v8/config/error.go | 30 + .../jcmturner/gokrb5/v8/config/hosts.go | 141 + .../jcmturner/gokrb5/v8/config/krb5conf.go | 728 +++++ .../jcmturner/gokrb5/v8/credentials/ccache.go | 333 +++ .../gokrb5/v8/credentials/credentials.go | 405 +++ .../v8/crypto/aes128-cts-hmac-sha1-96.go | 129 + .../v8/crypto/aes128-cts-hmac-sha256-128.go | 132 + .../v8/crypto/aes256-cts-hmac-sha1-96.go | 129 + .../v8/crypto/aes256-cts-hmac-sha384-192.go | 132 + .../gokrb5/v8/crypto/common/common.go | 132 + .../jcmturner/gokrb5/v8/crypto/crypto.go | 175 ++ .../gokrb5/v8/crypto/des3-cbc-sha1-kd.go | 139 + .../jcmturner/gokrb5/v8/crypto/etype/etype.go | 29 + .../jcmturner/gokrb5/v8/crypto/rc4-hmac.go | 133 + .../gokrb5/v8/crypto/rfc3961/encryption.go | 119 + .../gokrb5/v8/crypto/rfc3961/keyDerivation.go | 169 ++ .../gokrb5/v8/crypto/rfc3961/nfold.go | 107 + .../gokrb5/v8/crypto/rfc3962/encryption.go | 89 + .../gokrb5/v8/crypto/rfc3962/keyDerivation.go | 51 + .../gokrb5/v8/crypto/rfc4757/checksum.go | 40 + .../gokrb5/v8/crypto/rfc4757/encryption.go | 80 + .../gokrb5/v8/crypto/rfc4757/keyDerivation.go | 40 + .../gokrb5/v8/crypto/rfc4757/msgtype.go | 20 + .../gokrb5/v8/crypto/rfc8009/encryption.go | 125 + .../gokrb5/v8/crypto/rfc8009/keyDerivation.go | 135 + .../jcmturner/gokrb5/v8/gssapi/MICToken.go | 174 ++ .../jcmturner/gokrb5/v8/gssapi/README.md | 20 + .../gokrb5/v8/gssapi/contextFlags.go | 27 + .../jcmturner/gokrb5/v8/gssapi/gssapi.go | 202 ++ .../jcmturner/gokrb5/v8/gssapi/wrapToken.go | 195 ++ .../gokrb5/v8/iana/addrtype/constants.go | 15 + .../gokrb5/v8/iana/adtype/constants.go | 23 + .../gokrb5/v8/iana/asnAppTag/constants.go | 24 + .../gokrb5/v8/iana/chksumtype/constants.go | 32 + .../jcmturner/gokrb5/v8/iana/constants.go | 5 + .../gokrb5/v8/iana/errorcode/constants.go | 155 ++ .../gokrb5/v8/iana/etypeID/constants.go | 101 + .../gokrb5/v8/iana/flags/constants.go | 36 + .../gokrb5/v8/iana/keyusage/constants.go | 42 + .../gokrb5/v8/iana/msgtype/constants.go | 18 + .../gokrb5/v8/iana/nametype/constants.go | 15 + .../gokrb5/v8/iana/patype/constants.go | 77 + .../gokrb5/v8/kadmin/changepasswddata.go | 23 + .../jcmturner/gokrb5/v8/kadmin/message.go | 114 + .../jcmturner/gokrb5/v8/kadmin/passwd.go | 68 + .../jcmturner/gokrb5/v8/keytab/keytab.go | 530 ++++ .../jcmturner/gokrb5/v8/krberror/error.go | 67 + .../jcmturner/gokrb5/v8/messages/APRep.go | 49 + .../jcmturner/gokrb5/v8/messages/APReq.go | 199 ++ .../jcmturner/gokrb5/v8/messages/KDCRep.go | 360 +++ .../jcmturner/gokrb5/v8/messages/KDCReq.go | 432 +++ .../jcmturner/gokrb5/v8/messages/KRBCred.go | 102 + .../jcmturner/gokrb5/v8/messages/KRBError.go | 94 + .../jcmturner/gokrb5/v8/messages/KRBPriv.go | 108 + .../jcmturner/gokrb5/v8/messages/KRBSafe.go | 43 + .../jcmturner/gokrb5/v8/messages/Ticket.go | 262 ++ .../jcmturner/gokrb5/v8/pac/client_claims.go | 34 + .../jcmturner/gokrb5/v8/pac/client_info.go | 31 + .../gokrb5/v8/pac/credentials_info.go | 86 + .../jcmturner/gokrb5/v8/pac/device_claims.go | 34 + .../jcmturner/gokrb5/v8/pac/device_info.go | 32 + .../gokrb5/v8/pac/kerb_validation_info.go | 110 + .../jcmturner/gokrb5/v8/pac/pac_type.go | 251 ++ .../gokrb5/v8/pac/s4u_delegation_info.go | 26 + .../jcmturner/gokrb5/v8/pac/signature_data.go | 67 + .../gokrb5/v8/pac/supplemental_cred.go | 87 + .../jcmturner/gokrb5/v8/pac/upn_dns_info.go | 73 + .../jcmturner/gokrb5/v8/service/APExchange.go | 61 + .../gokrb5/v8/service/authenticator.go | 118 + .../jcmturner/gokrb5/v8/service/cache.go | 128 + .../jcmturner/gokrb5/v8/service/settings.go | 163 ++ .../jcmturner/gokrb5/v8/spnego/http.go | 401 +++ .../jcmturner/gokrb5/v8/spnego/krb5Token.go | 218 ++ .../gokrb5/v8/spnego/negotiationToken.go | 302 +++ .../jcmturner/gokrb5/v8/spnego/spnego.go | 203 ++ .../gokrb5/v8/types/Authenticator.go | 81 + .../gokrb5/v8/types/AuthorizationData.go | 55 + .../jcmturner/gokrb5/v8/types/Cryptosystem.go | 72 + .../jcmturner/gokrb5/v8/types/HostAddress.go | 180 ++ .../gokrb5/v8/types/KerberosFlags.go | 68 + .../jcmturner/gokrb5/v8/types/PAData.go | 155 ++ .../gokrb5/v8/types/PrincipalName.go | 67 + .../jcmturner/gokrb5/v8/types/TypedData.go | 18 + vendor/github.com/jcmturner/rpc/v2/LICENSE | 201 ++ .../jcmturner/rpc/v2/mstypes/claims.go | 152 ++ .../jcmturner/rpc/v2/mstypes/common.go | 12 + .../jcmturner/rpc/v2/mstypes/filetime.go | 52 + .../rpc/v2/mstypes/group_membership.go | 19 + .../rpc/v2/mstypes/kerb_sid_and_attributes.go | 23 + .../jcmturner/rpc/v2/mstypes/reader.go | 109 + .../rpc/v2/mstypes/rpc_unicode_string.go | 13 + .../jcmturner/rpc/v2/mstypes/sid.go | 36 + .../rpc/v2/mstypes/user_session_key.go | 11 + .../github.com/jcmturner/rpc/v2/ndr/arrays.go | 413 +++ .../jcmturner/rpc/v2/ndr/decoder.go | 393 +++ .../github.com/jcmturner/rpc/v2/ndr/error.go | 18 + .../github.com/jcmturner/rpc/v2/ndr/header.go | 116 + .../github.com/jcmturner/rpc/v2/ndr/pipe.go | 31 + .../jcmturner/rpc/v2/ndr/primitives.go | 211 ++ .../jcmturner/rpc/v2/ndr/rawbytes.go | 61 + .../jcmturner/rpc/v2/ndr/strings.go | 70 + .../github.com/jcmturner/rpc/v2/ndr/tags.go | 69 + .../github.com/jcmturner/rpc/v2/ndr/union.go | 57 + vendor/github.com/json-iterator/go/README.md | 2 - vendor/github.com/json-iterator/go/go.mod | 2 +- vendor/github.com/json-iterator/go/go.sum | 5 +- vendor/github.com/k0sproject/dig/LICENSE | 201 ++ vendor/github.com/k0sproject/dig/README.md | 61 + vendor/github.com/k0sproject/dig/go.mod | 8 + vendor/github.com/k0sproject/dig/go.sum | 13 + vendor/github.com/k0sproject/dig/mapping.go | 143 + vendor/github.com/k0sproject/k0sctl/LICENSE | 209 ++ .../v1beta1/cluster/flags.go | 124 + .../v1beta1/cluster/hook.go | 12 + .../v1beta1/cluster/host.go | 541 ++++ .../v1beta1/cluster/hosts.go | 136 + .../v1beta1/cluster/k0s.go | 216 ++ .../v1beta1/cluster/spec.go | 82 + .../v1beta1/cluster/uploadfile.go | 213 ++ .../k0sproject/k0sctl/version/version.go | 17 + vendor/github.com/k0sproject/rig/LICENSE | 191 ++ vendor/github.com/k0sproject/rig/README.md | 76 + .../github.com/k0sproject/rig/connection.go | 334 +++ vendor/github.com/k0sproject/rig/exec/exec.go | 285 ++ vendor/github.com/k0sproject/rig/go.mod | 36 + vendor/github.com/k0sproject/rig/go.sum | 91 + vendor/github.com/k0sproject/rig/localhost.go | 194 ++ vendor/github.com/k0sproject/rig/log.go | 12 + vendor/github.com/k0sproject/rig/log/log.go | 48 + .../github.com/k0sproject/rig/os/fileinfo.go | 35 + vendor/github.com/k0sproject/rig/os/host.go | 16 + .../k0sproject/rig/os/initsystem/host.go | 8 + .../k0sproject/rig/os/initsystem/openrc.go | 67 + .../k0sproject/rig/os/initsystem/systemd.go | 73 + vendor/github.com/k0sproject/rig/os/linux.go | 394 +++ .../k0sproject/rig/os/registry/registry.go | 34 + .../github.com/k0sproject/rig/os/windows.go | 320 +++ vendor/github.com/k0sproject/rig/osversion.go | 17 + .../k0sproject/rig/powershell/powershell.go | 123 + vendor/github.com/k0sproject/rig/resolver.go | 136 + vendor/github.com/k0sproject/rig/signals.go | 52 + .../k0sproject/rig/signals_windows.go | 26 + vendor/github.com/k0sproject/rig/ssh.go | 591 ++++ vendor/github.com/k0sproject/rig/winrm.go | 440 +++ vendor/github.com/k0sproject/version/LICENSE | 201 ++ .../github.com/k0sproject/version/README.md | 47 + .../k0sproject/version/collection.go | 31 + vendor/github.com/k0sproject/version/go.mod | 14 + vendor/github.com/k0sproject/version/go.sum | 13 + .../github.com/k0sproject/version/latest.go | 73 + .../github.com/k0sproject/version/version.go | 121 + .../github.com/kballard/go-shellquote/LICENSE | 19 + .../github.com/kballard/go-shellquote/README | 36 + .../github.com/kballard/go-shellquote/doc.go | 3 + .../kballard/go-shellquote/quote.go | 102 + .../kballard/go-shellquote/unquote.go | 156 ++ vendor/github.com/leodido/go-urn/.gitignore | 11 + vendor/github.com/leodido/go-urn/.travis.yml | 16 + vendor/github.com/leodido/go-urn/LICENSE | 21 + vendor/github.com/leodido/go-urn/README.md | 55 + vendor/github.com/leodido/go-urn/go.mod | 5 + vendor/github.com/leodido/go-urn/go.sum | 11 + vendor/github.com/leodido/go-urn/machine.go | 1691 ++++++++++++ .../github.com/leodido/go-urn/machine.go.rl | 159 ++ vendor/github.com/leodido/go-urn/makefile | 39 + vendor/github.com/leodido/go-urn/urn.go | 86 + vendor/github.com/masterzen/simplexml/LICENSE | 202 ++ .../masterzen/simplexml/dom/document.go | 35 + .../masterzen/simplexml/dom/element.go | 200 ++ .../masterzen/simplexml/dom/namespace.go | 10 + vendor/github.com/masterzen/winrm/.gitignore | 28 + vendor/github.com/masterzen/winrm/.travis.yml | 29 + vendor/github.com/masterzen/winrm/LICENSE | 202 ++ vendor/github.com/masterzen/winrm/Makefile | 34 + vendor/github.com/masterzen/winrm/README.md | 330 +++ vendor/github.com/masterzen/winrm/auth.go | 123 + vendor/github.com/masterzen/winrm/client.go | 236 ++ vendor/github.com/masterzen/winrm/command.go | 261 ++ vendor/github.com/masterzen/winrm/endpoint.go | 63 + vendor/github.com/masterzen/winrm/error.go | 13 + vendor/github.com/masterzen/winrm/go.mod | 23 + vendor/github.com/masterzen/winrm/go.sum | 88 + vendor/github.com/masterzen/winrm/http.go | 130 + vendor/github.com/masterzen/winrm/kerberos.go | 129 + vendor/github.com/masterzen/winrm/ntlm.go | 45 + .../github.com/masterzen/winrm/parameters.go | 27 + .../github.com/masterzen/winrm/powershell.go | 27 + vendor/github.com/masterzen/winrm/request.go | 161 ++ vendor/github.com/masterzen/winrm/response.go | 128 + vendor/github.com/masterzen/winrm/shell.go | 36 + .../github.com/masterzen/winrm/soap/header.go | 182 ++ .../masterzen/winrm/soap/message.go | 73 + .../masterzen/winrm/soap/namespaces.go | 81 + .../github.com/modern-go/reflect2/.travis.yml | 2 +- .../github.com/modern-go/reflect2/Gopkg.lock | 8 +- .../github.com/modern-go/reflect2/Gopkg.toml | 4 - vendor/github.com/modern-go/reflect2/go.mod | 3 + .../modern-go/reflect2/go_above_118.go | 23 + .../modern-go/reflect2/go_above_17.go | 8 - .../modern-go/reflect2/go_above_19.go | 3 + .../modern-go/reflect2/go_below_118.go | 21 + .../modern-go/reflect2/go_below_17.go | 9 - .../modern-go/reflect2/go_below_19.go | 14 - .../github.com/modern-go/reflect2/reflect2.go | 20 +- vendor/github.com/modern-go/reflect2/test.sh | 12 - .../github.com/modern-go/reflect2/type_map.go | 51 +- .../modern-go/reflect2/unsafe_link.go | 26 +- .../modern-go/reflect2/unsafe_map.go | 8 - .../russross/blackfriday/v2/README.md | 90 +- .../russross/blackfriday/v2/block.go | 30 +- .../github.com/russross/blackfriday/v2/doc.go | 28 + .../russross/blackfriday/v2/entities.go | 2236 +++++++++++++++ .../github.com/russross/blackfriday/v2/esc.go | 42 +- .../russross/blackfriday/v2/html.go | 9 +- .../russross/blackfriday/v2/inline.go | 2 +- .../russross/blackfriday/v2/node.go | 12 +- .../sanitized_anchor_name/.travis.yml | 16 - .../shurcooL/sanitized_anchor_name/README.md | 36 - .../shurcooL/sanitized_anchor_name/go.mod | 1 - .../shurcooL/sanitized_anchor_name/main.go | 29 - vendor/golang.org/x/crypto/md4/md4.go | 122 + vendor/golang.org/x/crypto/md4/md4block.go | 89 + vendor/golang.org/x/crypto/sha3/doc.go | 66 + vendor/golang.org/x/crypto/sha3/hashes.go | 97 + .../x/crypto/sha3/hashes_generic.go | 28 + vendor/golang.org/x/crypto/sha3/keccakf.go | 413 +++ .../golang.org/x/crypto/sha3/keccakf_amd64.go | 14 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 391 +++ vendor/golang.org/x/crypto/sha3/register.go | 19 + vendor/golang.org/x/crypto/sha3/sha3.go | 193 ++ vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 285 ++ vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 34 + vendor/golang.org/x/crypto/sha3/shake.go | 173 ++ .../golang.org/x/crypto/sha3/shake_generic.go | 20 + vendor/golang.org/x/crypto/sha3/xor.go | 24 + .../golang.org/x/crypto/sha3/xor_generic.go | 28 + .../golang.org/x/crypto/sha3/xor_unaligned.go | 68 + vendor/golang.org/x/oauth2/README.md | 10 +- vendor/golang.org/x/oauth2/go.mod | 7 +- vendor/golang.org/x/oauth2/go.sum | 359 ++- .../x/oauth2/internal/client_appengine.go | 1 + vendor/golang.org/x/term/codereview.cfg | 1 + vendor/golang.org/x/term/go.mod | 4 +- vendor/golang.org/x/term/go.sum | 4 +- vendor/golang.org/x/term/term.go | 6 +- vendor/golang.org/x/term/term_solaris.go | 111 - vendor/golang.org/x/term/term_unix.go | 4 +- vendor/golang.org/x/term/term_unix_linux.go | 10 - .../{term_unix_aix.go => term_unix_other.go} | 5 +- vendor/golang.org/x/term/term_unix_zos.go | 10 - .../x/text/internal/language/language.go | 43 +- .../x/text/internal/language/parse.go | 7 + vendor/golang.org/x/text/language/parse.go | 22 + vendor/golang.org/x/time/rate/rate.go | 20 +- vendor/k8s.io/klog/v2/OWNERS | 2 +- vendor/k8s.io/klog/v2/go.mod | 2 +- vendor/k8s.io/klog/v2/go.sum | 6 +- vendor/k8s.io/klog/v2/klog.go | 106 +- vendor/modules.txt | 172 +- .../v4/typed/reconcile_schema.go | 71 +- .../structured-merge-diff/v4/typed/remove.go | 37 +- 520 files changed, 65340 insertions(+), 1083 deletions(-) create mode 100644 pkg/runtime/k0s/common.go create mode 100644 pkg/runtime/k0s/init.go create mode 100644 pkg/runtime/k0s/k0sctl/common.go create mode 100644 pkg/runtime/k0s/k0sctl/k0sctl_config.go create mode 100644 pkg/runtime/k0s/k0sctl/k0sctl_config_test.go create mode 100644 pkg/runtime/k0s/k0sctl/v1beta1/types.go create mode 100644 pkg/runtime/k0s/registry.go create mode 100644 pkg/runtime/k0s/runtime.go create mode 100644 pkg/runtime/k0s/runtime_test.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/.travis.yml create mode 100644 vendor/github.com/Azure/go-ntlmssp/LICENSE create mode 100644 vendor/github.com/Azure/go-ntlmssp/README.md create mode 100644 vendor/github.com/Azure/go-ntlmssp/authenticate_message.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/authheader.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/avids.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/challenge_message.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/messageheader.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/negotiate_message.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/negotiator.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/nlmp.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/unicode.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/varfield.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/version.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/.travis.yml create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/LICENSE create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/README.md create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/coverage.sh create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/boolfns.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/intfns.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/nodesetfns.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/numfns.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/stringfns.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/operators.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/internal/xsort/xsort.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/lexer/lexer.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/lexer/xmlchars.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/marshal.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/parser/pathexpr/pathexpr.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/tree/interfaces.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/tree/tree.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/tree/xfn.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder/xmlbuilder.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele/xmlele.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode/xmlnode.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmltree.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/tree/xtypes.go create mode 100644 vendor/github.com/ChrisTrenkamp/goxpath/xconst/xconst.go create mode 100644 vendor/github.com/Masterminds/semver/.travis.yml create mode 100644 vendor/github.com/Masterminds/semver/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/semver/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/semver/Makefile create mode 100644 vendor/github.com/Masterminds/semver/README.md create mode 100644 vendor/github.com/Masterminds/semver/appveyor.yml create mode 100644 vendor/github.com/Masterminds/semver/collection.go create mode 100644 vendor/github.com/Masterminds/semver/constraints.go create mode 100644 vendor/github.com/Masterminds/semver/doc.go create mode 100644 vendor/github.com/Masterminds/semver/version.go create mode 100644 vendor/github.com/Masterminds/semver/version_fuzz.go rename vendor/github.com/{shurcooL/sanitized_anchor_name => acarl005/stripansi}/LICENSE (96%) create mode 100644 vendor/github.com/acarl005/stripansi/README.md create mode 100644 vendor/github.com/acarl005/stripansi/stripansi.go create mode 100644 vendor/github.com/alessio/shellescape/.gitignore create mode 100644 vendor/github.com/alessio/shellescape/.golangci.yml create mode 100644 vendor/github.com/alessio/shellescape/.goreleaser.yml create mode 100644 vendor/github.com/alessio/shellescape/AUTHORS create mode 100644 vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/alessio/shellescape/LICENSE create mode 100644 vendor/github.com/alessio/shellescape/README.md create mode 100644 vendor/github.com/alessio/shellescape/go.mod create mode 100644 vendor/github.com/alessio/shellescape/shellescape.go create mode 100644 vendor/github.com/asaskevich/govalidator/.gitignore create mode 100644 vendor/github.com/asaskevich/govalidator/.travis.yml create mode 100644 vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md create mode 100644 vendor/github.com/asaskevich/govalidator/LICENSE create mode 100644 vendor/github.com/asaskevich/govalidator/README.md create mode 100644 vendor/github.com/asaskevich/govalidator/arrays.go create mode 100644 vendor/github.com/asaskevich/govalidator/converter.go create mode 100644 vendor/github.com/asaskevich/govalidator/doc.go create mode 100644 vendor/github.com/asaskevich/govalidator/error.go create mode 100644 vendor/github.com/asaskevich/govalidator/go.mod create mode 100644 vendor/github.com/asaskevich/govalidator/numerics.go create mode 100644 vendor/github.com/asaskevich/govalidator/patterns.go create mode 100644 vendor/github.com/asaskevich/govalidator/types.go create mode 100644 vendor/github.com/asaskevich/govalidator/utils.go create mode 100644 vendor/github.com/asaskevich/govalidator/validator.go create mode 100644 vendor/github.com/asaskevich/govalidator/wercker.yml create mode 100644 vendor/github.com/avast/retry-go/.gitignore create mode 100644 vendor/github.com/avast/retry-go/.godocdown.tmpl create mode 100644 vendor/github.com/avast/retry-go/.travis.yml create mode 100644 vendor/github.com/avast/retry-go/Gopkg.toml create mode 100644 vendor/github.com/avast/retry-go/LICENSE create mode 100644 vendor/github.com/avast/retry-go/Makefile create mode 100644 vendor/github.com/avast/retry-go/README.md create mode 100644 vendor/github.com/avast/retry-go/VERSION create mode 100644 vendor/github.com/avast/retry-go/appveyor.yml create mode 100644 vendor/github.com/avast/retry-go/options.go create mode 100644 vendor/github.com/avast/retry-go/retry.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/.gitignore create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/.travis.yml create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/LICENSE create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/README.md create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/doublestar.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/glob.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/globwalk.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/go.mod create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/match.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/utils.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/validate.go create mode 100644 vendor/github.com/creasty/defaults/.gitignore create mode 100644 vendor/github.com/creasty/defaults/.travis.yml create mode 100644 vendor/github.com/creasty/defaults/LICENSE create mode 100644 vendor/github.com/creasty/defaults/Makefile create mode 100644 vendor/github.com/creasty/defaults/README.md create mode 100644 vendor/github.com/creasty/defaults/defaults.go create mode 100644 vendor/github.com/creasty/defaults/go.mod create mode 100644 vendor/github.com/creasty/defaults/setter.go create mode 100644 vendor/github.com/go-logr/logr/CHANGELOG.md create mode 100644 vendor/github.com/go-logr/logr/CONTRIBUTING.md create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/.gitignore create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/.travis.yml create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/LICENSE create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/README.md create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/UPGRADE.md create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/absent.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/date.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/each.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/error.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/go.mod create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/go.sum create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/in.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/is/rules.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/length.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/map.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/match.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/minmax.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/multipleof.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/not_in.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/not_nil.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/required.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/string.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/struct.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/util.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/validation.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/v4/when.go create mode 100644 vendor/github.com/go-playground/locales/.gitignore create mode 100644 vendor/github.com/go-playground/locales/.travis.yml create mode 100644 vendor/github.com/go-playground/locales/LICENSE create mode 100644 vendor/github.com/go-playground/locales/README.md create mode 100644 vendor/github.com/go-playground/locales/currency/currency.go create mode 100644 vendor/github.com/go-playground/locales/go.mod create mode 100644 vendor/github.com/go-playground/locales/go.sum create mode 100644 vendor/github.com/go-playground/locales/logo.png create mode 100644 vendor/github.com/go-playground/locales/rules.go create mode 100644 vendor/github.com/go-playground/universal-translator/.gitignore create mode 100644 vendor/github.com/go-playground/universal-translator/.travis.yml create mode 100644 vendor/github.com/go-playground/universal-translator/LICENSE create mode 100644 vendor/github.com/go-playground/universal-translator/Makefile create mode 100644 vendor/github.com/go-playground/universal-translator/README.md create mode 100644 vendor/github.com/go-playground/universal-translator/errors.go create mode 100644 vendor/github.com/go-playground/universal-translator/go.mod create mode 100644 vendor/github.com/go-playground/universal-translator/go.sum create mode 100644 vendor/github.com/go-playground/universal-translator/import_export.go create mode 100644 vendor/github.com/go-playground/universal-translator/logo.png create mode 100644 vendor/github.com/go-playground/universal-translator/translator.go create mode 100644 vendor/github.com/go-playground/universal-translator/universal_translator.go create mode 100644 vendor/github.com/go-playground/validator/v10/.gitignore create mode 100644 vendor/github.com/go-playground/validator/v10/LICENSE create mode 100644 vendor/github.com/go-playground/validator/v10/MAINTAINERS.md create mode 100644 vendor/github.com/go-playground/validator/v10/Makefile create mode 100644 vendor/github.com/go-playground/validator/v10/README.md create mode 100644 vendor/github.com/go-playground/validator/v10/baked_in.go create mode 100644 vendor/github.com/go-playground/validator/v10/cache.go create mode 100644 vendor/github.com/go-playground/validator/v10/country_codes.go create mode 100644 vendor/github.com/go-playground/validator/v10/currency_codes.go create mode 100644 vendor/github.com/go-playground/validator/v10/doc.go create mode 100644 vendor/github.com/go-playground/validator/v10/errors.go create mode 100644 vendor/github.com/go-playground/validator/v10/field_level.go create mode 100644 vendor/github.com/go-playground/validator/v10/go.mod create mode 100644 vendor/github.com/go-playground/validator/v10/go.sum create mode 100644 vendor/github.com/go-playground/validator/v10/logo.png create mode 100644 vendor/github.com/go-playground/validator/v10/postcode_regexes.go create mode 100644 vendor/github.com/go-playground/validator/v10/regexes.go create mode 100644 vendor/github.com/go-playground/validator/v10/struct_level.go create mode 100644 vendor/github.com/go-playground/validator/v10/translations.go create mode 100644 vendor/github.com/go-playground/validator/v10/util.go create mode 100644 vendor/github.com/go-playground/validator/v10/validator.go create mode 100644 vendor/github.com/go-playground/validator/v10/validator_instance.go create mode 100644 vendor/github.com/gofrs/uuid/.gitignore create mode 100644 vendor/github.com/gofrs/uuid/LICENSE create mode 100644 vendor/github.com/gofrs/uuid/README.md create mode 100644 vendor/github.com/gofrs/uuid/codec.go create mode 100644 vendor/github.com/gofrs/uuid/fuzz.go create mode 100644 vendor/github.com/gofrs/uuid/generator.go create mode 100644 vendor/github.com/gofrs/uuid/sql.go create mode 100644 vendor/github.com/gofrs/uuid/uuid.go create mode 100644 vendor/github.com/google/gofuzz/bytesource/bytesource.go create mode 100644 vendor/github.com/hashicorp/go-uuid/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-uuid/LICENSE create mode 100644 vendor/github.com/hashicorp/go-uuid/README.md create mode 100644 vendor/github.com/hashicorp/go-uuid/go.mod create mode 100644 vendor/github.com/hashicorp/go-uuid/uuid.go create mode 100644 vendor/github.com/hashicorp/go-version/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 vendor/github.com/hashicorp/go-version/README.md create mode 100644 vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 vendor/github.com/hashicorp/go-version/go.mod create mode 100644 vendor/github.com/hashicorp/go-version/version.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go create mode 100644 vendor/github.com/jcmturner/aescts/v2/LICENSE create mode 100644 vendor/github.com/jcmturner/aescts/v2/aescts.go create mode 100644 vendor/github.com/jcmturner/aescts/v2/go.mod create mode 100644 vendor/github.com/jcmturner/aescts/v2/go.sum create mode 100644 vendor/github.com/jcmturner/dnsutils/v2/LICENSE create mode 100644 vendor/github.com/jcmturner/dnsutils/v2/go.mod create mode 100644 vendor/github.com/jcmturner/dnsutils/v2/go.sum create mode 100644 vendor/github.com/jcmturner/dnsutils/v2/srv.go create mode 100644 vendor/github.com/jcmturner/gofork/LICENSE create mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/README.md create mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go create mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/common.go create mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go create mode 100644 vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/github.com/jcmturner/goidentity/v6/LICENSE create mode 100644 vendor/github.com/jcmturner/goidentity/v6/README.md create mode 100644 vendor/github.com/jcmturner/goidentity/v6/authenticator.go create mode 100644 vendor/github.com/jcmturner/goidentity/v6/go.mod create mode 100644 vendor/github.com/jcmturner/goidentity/v6/go.sum create mode 100644 vendor/github.com/jcmturner/goidentity/v6/identity.go create mode 100644 vendor/github.com/jcmturner/goidentity/v6/user.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/LICENSE create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/cache.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/client.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/network.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/session.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/settings.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/config/error.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/upn_dns_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/service/APExchange.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/service/authenticator.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/service/cache.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/service/settings.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/spnego/http.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/spnego/krb5Token.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/spnego/negotiationToken.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/spnego/spnego.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/PAData.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/PrincipalName.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/LICENSE create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/common.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/error.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/header.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/strings.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/tags.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/union.go create mode 100644 vendor/github.com/k0sproject/dig/LICENSE create mode 100644 vendor/github.com/k0sproject/dig/README.md create mode 100644 vendor/github.com/k0sproject/dig/go.mod create mode 100644 vendor/github.com/k0sproject/dig/go.sum create mode 100644 vendor/github.com/k0sproject/dig/mapping.go create mode 100644 vendor/github.com/k0sproject/k0sctl/LICENSE create mode 100644 vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags.go create mode 100644 vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hook.go create mode 100644 vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go create mode 100644 vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hosts.go create mode 100644 vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go create mode 100644 vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec.go create mode 100644 vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/uploadfile.go create mode 100644 vendor/github.com/k0sproject/k0sctl/version/version.go create mode 100644 vendor/github.com/k0sproject/rig/LICENSE create mode 100644 vendor/github.com/k0sproject/rig/README.md create mode 100644 vendor/github.com/k0sproject/rig/connection.go create mode 100644 vendor/github.com/k0sproject/rig/exec/exec.go create mode 100644 vendor/github.com/k0sproject/rig/go.mod create mode 100644 vendor/github.com/k0sproject/rig/go.sum create mode 100644 vendor/github.com/k0sproject/rig/localhost.go create mode 100644 vendor/github.com/k0sproject/rig/log.go create mode 100644 vendor/github.com/k0sproject/rig/log/log.go create mode 100644 vendor/github.com/k0sproject/rig/os/fileinfo.go create mode 100644 vendor/github.com/k0sproject/rig/os/host.go create mode 100644 vendor/github.com/k0sproject/rig/os/initsystem/host.go create mode 100644 vendor/github.com/k0sproject/rig/os/initsystem/openrc.go create mode 100644 vendor/github.com/k0sproject/rig/os/initsystem/systemd.go create mode 100644 vendor/github.com/k0sproject/rig/os/linux.go create mode 100644 vendor/github.com/k0sproject/rig/os/registry/registry.go create mode 100644 vendor/github.com/k0sproject/rig/os/windows.go create mode 100644 vendor/github.com/k0sproject/rig/osversion.go create mode 100644 vendor/github.com/k0sproject/rig/powershell/powershell.go create mode 100644 vendor/github.com/k0sproject/rig/resolver.go create mode 100644 vendor/github.com/k0sproject/rig/signals.go create mode 100644 vendor/github.com/k0sproject/rig/signals_windows.go create mode 100644 vendor/github.com/k0sproject/rig/ssh.go create mode 100644 vendor/github.com/k0sproject/rig/winrm.go create mode 100644 vendor/github.com/k0sproject/version/LICENSE create mode 100644 vendor/github.com/k0sproject/version/README.md create mode 100644 vendor/github.com/k0sproject/version/collection.go create mode 100644 vendor/github.com/k0sproject/version/go.mod create mode 100644 vendor/github.com/k0sproject/version/go.sum create mode 100644 vendor/github.com/k0sproject/version/latest.go create mode 100644 vendor/github.com/k0sproject/version/version.go create mode 100644 vendor/github.com/kballard/go-shellquote/LICENSE create mode 100644 vendor/github.com/kballard/go-shellquote/README create mode 100644 vendor/github.com/kballard/go-shellquote/doc.go create mode 100644 vendor/github.com/kballard/go-shellquote/quote.go create mode 100644 vendor/github.com/kballard/go-shellquote/unquote.go create mode 100644 vendor/github.com/leodido/go-urn/.gitignore create mode 100644 vendor/github.com/leodido/go-urn/.travis.yml create mode 100644 vendor/github.com/leodido/go-urn/LICENSE create mode 100644 vendor/github.com/leodido/go-urn/README.md create mode 100644 vendor/github.com/leodido/go-urn/go.mod create mode 100644 vendor/github.com/leodido/go-urn/go.sum create mode 100644 vendor/github.com/leodido/go-urn/machine.go create mode 100644 vendor/github.com/leodido/go-urn/machine.go.rl create mode 100644 vendor/github.com/leodido/go-urn/makefile create mode 100644 vendor/github.com/leodido/go-urn/urn.go create mode 100644 vendor/github.com/masterzen/simplexml/LICENSE create mode 100644 vendor/github.com/masterzen/simplexml/dom/document.go create mode 100644 vendor/github.com/masterzen/simplexml/dom/element.go create mode 100644 vendor/github.com/masterzen/simplexml/dom/namespace.go create mode 100644 vendor/github.com/masterzen/winrm/.gitignore create mode 100644 vendor/github.com/masterzen/winrm/.travis.yml create mode 100644 vendor/github.com/masterzen/winrm/LICENSE create mode 100644 vendor/github.com/masterzen/winrm/Makefile create mode 100644 vendor/github.com/masterzen/winrm/README.md create mode 100644 vendor/github.com/masterzen/winrm/auth.go create mode 100644 vendor/github.com/masterzen/winrm/client.go create mode 100644 vendor/github.com/masterzen/winrm/command.go create mode 100644 vendor/github.com/masterzen/winrm/endpoint.go create mode 100644 vendor/github.com/masterzen/winrm/error.go create mode 100644 vendor/github.com/masterzen/winrm/go.mod create mode 100644 vendor/github.com/masterzen/winrm/go.sum create mode 100644 vendor/github.com/masterzen/winrm/http.go create mode 100644 vendor/github.com/masterzen/winrm/kerberos.go create mode 100644 vendor/github.com/masterzen/winrm/ntlm.go create mode 100644 vendor/github.com/masterzen/winrm/parameters.go create mode 100644 vendor/github.com/masterzen/winrm/powershell.go create mode 100644 vendor/github.com/masterzen/winrm/request.go create mode 100644 vendor/github.com/masterzen/winrm/response.go create mode 100644 vendor/github.com/masterzen/winrm/shell.go create mode 100644 vendor/github.com/masterzen/winrm/soap/header.go create mode 100644 vendor/github.com/masterzen/winrm/soap/message.go create mode 100644 vendor/github.com/masterzen/winrm/soap/namespaces.go create mode 100644 vendor/github.com/modern-go/reflect2/go.mod create mode 100644 vendor/github.com/modern-go/reflect2/go_above_118.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_118.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go delete mode 100644 vendor/github.com/modern-go/reflect2/test.sh create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/README.md delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/go.mod delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/main.go create mode 100644 vendor/golang.org/x/crypto/md4/md4.go create mode 100644 vendor/golang.org/x/crypto/md4/md4block.go create mode 100644 vendor/golang.org/x/crypto/sha3/doc.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.s create mode 100644 vendor/golang.org/x/crypto/sha3/register.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.s create mode 100644 vendor/golang.org/x/crypto/sha3/shake.go create mode 100644 vendor/golang.org/x/crypto/sha3/shake_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor_unaligned.go create mode 100644 vendor/golang.org/x/term/codereview.cfg delete mode 100644 vendor/golang.org/x/term/term_solaris.go delete mode 100644 vendor/golang.org/x/term/term_unix_linux.go rename vendor/golang.org/x/term/{term_unix_aix.go => term_unix_other.go} (63%) delete mode 100644 vendor/golang.org/x/term/term_unix_zos.go diff --git a/go.mod b/go.mod index f4896bd5f82..737746e2e62 100644 --- a/go.mod +++ b/go.mod @@ -13,8 +13,12 @@ require ( github.com/docker/go-connections v0.4.0 github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/go-git/go-git/v5 v5.4.2 + github.com/go-ozzo/ozzo-validation/v4 v4.3.0 github.com/google/uuid v1.2.0 github.com/imdario/mergo v0.3.12 + github.com/k0sproject/dig v0.2.0 + github.com/k0sproject/k0sctl v0.13.1 + github.com/k0sproject/rig v0.6.4 github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible github.com/lestrrat-go/strftime v1.0.6 // indirect github.com/mitchellh/go-homedir v1.1.0 @@ -40,23 +44,32 @@ require ( github.com/yusufpapurcu/wmi v1.2.2 // indirect go.etcd.io/etcd/client/v3 v3.5.0 go.uber.org/zap v1.17.0 - golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 - golang.org/x/net v0.0.0-20210510120150-4163338589ed + golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 + golang.org/x/sys v0.0.0-20220209214540-3681064d5158 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b gotest.tools v2.2.0+incompatible helm.sh/helm/v3 v3.6.2 k8s.io/api v0.21.0 - k8s.io/apimachinery v0.21.0 + k8s.io/apimachinery v0.22.1 k8s.io/cli-runtime v0.21.0 - k8s.io/client-go v0.21.0 + k8s.io/client-go v0.22.1 k8s.io/kube-proxy v0.21.0 k8s.io/kubelet v0.21.0 - k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 + k8s.io/utils v0.0.0-20210820185131-d34e5cb4466e sigs.k8s.io/controller-runtime v0.8.1 sigs.k8s.io/yaml v1.2.0 ) -replace github.com/docker/docker => github.com/docker/docker v20.10.3-0.20211208011758-87521affb077+incompatible +replace ( + github.com/docker/docker => github.com/docker/docker v20.10.3-0.20211208011758-87521affb077+incompatible + golang.org/x/crypto => golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 + golang.org/x/net => golang.org/x/net v0.0.0-20210510120150-4163338589ed + golang.org/x/sys => golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 + k8s.io/api => k8s.io/api v0.21.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.21.0 + k8s.io/client-go => k8s.io/client-go v0.21.0 + k8s.io/utils => k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 +) diff --git a/go.sum b/go.sum index e6ea0b3c42b..f84410e6de5 100644 --- a/go.sum +++ b/go.sum @@ -13,17 +13,19 @@ cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= @@ -35,6 +37,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= @@ -45,6 +48,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0/go.mod h1:0mMDvQFeLbbn1Wy8P2j3hwFhqBq+FKn8OZPno8WLmp8= +github.com/AlecAivazis/survey/v2 v2.3.2/go.mod h1:TH2kPCDU3Kqq7pLbnCWwZXDBjnhZtmsCle5EiYDJ2fg= github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= @@ -67,9 +71,7 @@ github.com/Azure/go-autorest v14.1.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= @@ -77,7 +79,6 @@ github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= @@ -89,7 +90,6 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= @@ -101,9 +101,13 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e h1:ZU22z/2YRFLyf/P4ZwUYSdNCWsMEI0VeyrFoI2rAhJQ= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 h1:w0E0fgc1YafGEh5cROhlROMWXiNoZqApk2PDN0M1+Ns= +github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= @@ -148,6 +152,7 @@ github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5 github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= @@ -164,8 +169,12 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/a8m/envsubst v1.2.0/go.mod h1:PpvLvNWa+Rvu/10qXmFbFiGICIU5hZvFJNPCCkUaObg= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= @@ -175,6 +184,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/aliyun/alibaba-cloud-sdk-go v1.61.985 h1:ETObK47vrphrw9wC+2/SinHL59YQtle2eBtSRucLTRQ= github.com/aliyun/alibaba-cloud-sdk-go v1.61.985/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= @@ -198,7 +209,11 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= +github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= @@ -227,6 +242,8 @@ github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAw github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmatcuk/doublestar/v4 v4.0.2 h1:X0krlUVAVmtr2cRoTqR8aDMrDqnB36ht8wpWTiQ3jsA= +github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bombsimon/wsl/v2 v2.0.0/go.mod h1:mf25kr/SqFEPhhcxW1+7pxzGlW+hIl/hYTKY95VwV8U= github.com/bombsimon/wsl/v2 v2.2.0/go.mod h1:Azh8c3XGEJl9LyX0/sFC+CKMc7Ssgua0g+6abzXN4Pg= @@ -390,12 +407,15 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creasty/defaults v1.5.2 h1:/VfB6uxpyp6h0fr7SPp7n8WJBoV8jfxQXPCnkVSjyls= +github.com/creasty/defaults v1.5.2/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= @@ -408,6 +428,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/deislabs/oras v0.11.1/go.mod h1:39lCtf8Q6WDC7ul9cnyWXONNzKvabEKk+AX+L0ImnQk= +github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI= github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= @@ -449,7 +470,6 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f/go.mod h github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -458,7 +478,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -495,6 +514,8 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/gammazero/deque v0.1.0/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2KenFLk6M= +github.com/gammazero/workerpool v1.1.2/go.mod h1:UelbXcO0zCIGFcufcirHhq2/xtLXJdQ29qZNlXG9OjQ= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -531,8 +552,10 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.0.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.1.0 h1:nAbevmWlS2Ic4m4+/An5NXkaGqlqpbBgdcuThZxnZyI= +github.com/go-logr/logr v1.1.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -586,6 +609,16 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es= +github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.9.0 h1:NgTtmN58D0m8+UuxtYmGztBJB7VnPgjj221I1QHci2A= +github.com/go-playground/validator/v10 v10.9.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -624,6 +657,8 @@ github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQq github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= @@ -653,6 +688,7 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -722,16 +758,19 @@ github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwG github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -768,6 +807,10 @@ github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -813,7 +856,11 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= +github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -827,6 +874,7 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -849,6 +897,18 @@ github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3 github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= @@ -874,14 +934,25 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/k0sproject/dig v0.2.0 h1:cNxEIl96g9kqSMfPSZLhpnZ0P8bWXKv08nxvsMHop5w= +github.com/k0sproject/dig v0.2.0/go.mod h1:rBcqaQlJpcKdt2x/OE/lPvhGU50u/e95CSm5g/r4s78= +github.com/k0sproject/k0sctl v0.13.1 h1:QJyzYrO3TG5VHHEl6c1toxJSJsSxyMqUTNtVQ/6ORMg= +github.com/k0sproject/k0sctl v0.13.1/go.mod h1:E803MsDkStpLdtssuZTFo8192IOM+vOs7Mc2b6cuQvM= +github.com/k0sproject/rig v0.6.4 h1:f0I0ruc4b7VjLDEVjojJjih/89z/oC5iDdsAlz/oEPA= +github.com/k0sproject/rig v0.6.4/go.mod h1:7x0Qph3bTFKLkR5OiGt0h4kF4Se/B6CepTYJDEVkp6c= +github.com/k0sproject/version v0.3.0 h1:6HAn8C29+WVksGCzbQvQ9feEJpUZ0iHD8GebIQMiftQ= +github.com/k0sproject/version v0.3.0/go.mod h1:oEjuz2ItQQtAnGyRgwEV9m5R6/9rjoFC6EiEEzbkFdI= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -903,10 +974,12 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -915,6 +988,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible h1:Y6sqxHMyB1D2YSzWkLibYKgg+SwmyFU9dF2hn6MdTj4= @@ -932,6 +1007,7 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= @@ -946,6 +1022,10 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg= +github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/masterzen/winrm v0.0.0-20211231115050-232efb40349e h1:au+BndCo30p6G49xKTj1ZigvPn/ekiO2Gt+V+pbujfQ= +github.com/masterzen/winrm v0.0.0-20211231115050-232efb40349e/go.mod h1:Iju3u6NzoTAvjuhsGCZc+7fReNnr/Bd6DsWj3WTokIU= github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= @@ -954,6 +1034,7 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= @@ -963,6 +1044,7 @@ github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -981,6 +1063,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= @@ -1031,8 +1114,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= @@ -1151,6 +1235,7 @@ github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1224,13 +1309,17 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE= github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1246,10 +1335,13 @@ github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvW github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= github.com/securego/gosec v0.0.0-20200401082031-e946c8c39989/go.mod h1:i9l/TNj+yDFh9SZXUTvspXTjbFXgZGP/UvhU1S65A4A= github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME= +github.com/segmentio/analytics-go v3.1.0+incompatible/go.mod h1:C7CYBtQWk4vRk2RyLu0qOcbHJ18E3F1HV2C/8JvKN48= +github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= +github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg= github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -1258,7 +1350,6 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -1322,6 +1413,7 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1370,6 +1462,7 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= @@ -1409,6 +1502,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1446,6 +1540,7 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/contrib v0.21.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0/go.mod h1:Vm5u/mtkj1OMhtao0v+BGo2LUoLCgHYXvRmj0jWITlE= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.21.0/go.mod h1:a9cocRplhIBkUAJmak+BPDx+LVL7cTmqUPB0uBcTA4k= @@ -1485,37 +1580,6 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1555,62 +1619,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210510120150-4163338589ed h1:p9UgmWI9wKpfYmgaV/IZKGdXc5qEK45tDwwwDyjS26I= golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1622,8 +1630,9 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1637,111 +1646,13 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200917073148-efd3b9a0ff20/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1750,16 +1661,18 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1834,9 +1747,13 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= @@ -1874,6 +1791,9 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1881,8 +1801,9 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1916,8 +1837,13 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1941,6 +1867,7 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= @@ -2024,26 +1951,13 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= -k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= k8s.io/apiextensions-apiserver v0.21.0 h1:Nd4uBuweg6ImzbxkC1W7xUNZcCV/8Vt10iTdTIVF3hw= k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= -k8s.io/apimachinery v0.0.0-20180904193909-def12e63c512/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= -k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= @@ -2053,13 +1967,6 @@ k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= k8s.io/cli-runtime v0.21.0 h1:/V2Kkxtf6x5NI2z+Sd/mIrq4FQyQ8jzZAUD6N5RnN7Y= k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= -k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc= -k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U= @@ -2085,17 +1992,16 @@ k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.20.0 h1:tlyxlSvd63k7axjhuchckaRJm+a92z5GSOrTOQY5sHw= +k8s.io/klog/v2 v2.20.0/go.mod h1:Gm8eSIfQN6457haJuPaMxZw4wyP5k+ykPFlrhQDvhvw= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= @@ -2108,9 +2014,6 @@ k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js= k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= @@ -2139,11 +2042,11 @@ sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4 sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/pkg/runtime/k0s/common.go b/pkg/runtime/k0s/common.go new file mode 100644 index 00000000000..b4c5a2d2ba7 --- /dev/null +++ b/pkg/runtime/k0s/common.go @@ -0,0 +1,22 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k0s + +const ( + GetK0sVersionCMD = "k0s version" + + SSHKeyGenCMD = "ssh-keygen -f /root/.ssh/id_rsa -P \"\"" + SSHCopyIDPrefix = "ssh-copy-id -f " +) diff --git a/pkg/runtime/k0s/init.go b/pkg/runtime/k0s/init.go new file mode 100644 index 00000000000..9aa2200ecd9 --- /dev/null +++ b/pkg/runtime/k0s/init.go @@ -0,0 +1,81 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k0s + +import ( + "fmt" +) + +func (k *Runtime) init() error { + pipeline := []func() error{ + k.MergeConfigOnMaster0, + k.SSHKeyGenAndCopyIDToHosts, + // TODO: move all these registry operation to the specific registry packages. + k.GenerateCert, + k.ApplyRegistryOnMaster0, + } + + for _, f := range pipeline { + if err := f(); err != nil { + return fmt.Errorf("failed to prepare Master0 env: %v", err) + } + } + + return nil +} + +// MergeConfigOnMaster0 convert the cluster file spec to kubectl.yaml (k0sctl config file) to lead cluster run +func (k *Runtime) MergeConfigOnMaster0() error { + if err := k.K0sConfig.ConvertTok0sConfig(k.cluster); err != nil { + return fmt.Errorf("failed to convert to k0s config from clusterfile: %v", err) + } + ssh, err := k.getHostSSHClient(k.cluster.GetMaster0IP()) + if err != nil { + return fmt.Errorf("failed to get ssh client: %v", err) + } + output, err := ssh.Cmd(k.cluster.GetMaster0IP(), GetK0sVersionCMD) + if err != nil { + return err + } + + k.K0sConfig.DefineConfigFork0s(string(output), k.RegConfig.Domain, k.RegConfig.Port, k.cluster.Name) + + //write k0sctl.yaml to master0 rootfs + if err := k.K0sConfig.WriteConfigToMaster0(k.getRootfs()); err != nil { + return err + } + return nil +} + +// SSHKeyGenAndCopyIDToHosts use ssh-copy-id to prepare a no password ssh-client for k0sctl install environment. +func (k *Runtime) SSHKeyGenAndCopyIDToHosts() error { + return k.sshKeyGenAndCopyIDToHosts() +} + +// GenerateCert generate the containerd CA for registry TLS. +func (k *Runtime) GenerateCert() error { + if err := k.GenerateRegistryCert(); err != nil { + return err + } + return k.SendRegistryCert(k.cluster.GetMasterIPList()[:1]) +} + +// sshKeyGenAndCopyIDToHosts use ssh-key-gen and ssh-copy-id to prepare an env without password for k0sctl. +func (k *Runtime) sshKeyGenAndCopyIDToHosts() error { + if err := k.sshKeyGen(); err != nil { + return err + } + return k.sshCopyIDToEveryHost() +} diff --git a/pkg/runtime/k0s/k0sctl/common.go b/pkg/runtime/k0s/k0sctl/common.go new file mode 100644 index 00000000000..a15eaf6e8c8 --- /dev/null +++ b/pkg/runtime/k0s/k0sctl/common.go @@ -0,0 +1,28 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k0sctl + +const ( + K0sUploadBinaryPath = "/usr/bin/k0s" + + ClusterFileRoleMaster = "master" + ClusterFileRoleWorker = "worker" + + K0sController = "controller" + K0sWorker = "worker" + K0sControllerAndWorker = "controller+worker" + + K0sDefaultUser = "root" +) diff --git a/pkg/runtime/k0s/k0sctl/k0sctl_config.go b/pkg/runtime/k0s/k0sctl/k0sctl_config.go new file mode 100644 index 00000000000..e79a4addc1b --- /dev/null +++ b/pkg/runtime/k0s/k0sctl/k0sctl_config.go @@ -0,0 +1,138 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k0sctl + +import ( + "fmt" + "strconv" + + "github.com/sealerio/sealer/pkg/runtime/k0s/k0sctl/v1beta1" + v1 "github.com/sealerio/sealer/types/api/v1" + v2 "github.com/sealerio/sealer/types/api/v2" + utilsnet "github.com/sealerio/sealer/utils/net" + osi "github.com/sealerio/sealer/utils/os" + + "github.com/k0sproject/dig" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" + "github.com/k0sproject/rig" + yaml2 "gopkg.in/yaml.v2" +) + +type K0sConfig struct { + *v1beta1.Cluster +} + +// ConvertTok0sConfig convert cluster file spec host to k0sctl spec hosts. +func (c *K0sConfig) ConvertTok0sConfig(clusterFile *v2.Cluster) error { + return c.convertIPVSToAddress(clusterFile) +} + +func (c *K0sConfig) convertIPVSToAddress(clusterFile *v2.Cluster) error { + masterIPList := utilsnet.IPsToIPStrs(clusterFile.GetIPSByRole(ClusterFileRoleMaster)) + if err := c.joinK0sHosts(masterIPList, clusterFile.Spec.Hosts, clusterFile.Spec.SSH, K0sController); err != nil { + return err + } + + nodeIPList := utilsnet.IPsToIPStrs(clusterFile.GetIPSByRole(ClusterFileRoleWorker)) + if err := c.joinK0sHosts(nodeIPList, clusterFile.Spec.Hosts, clusterFile.Spec.SSH, K0sWorker); err != nil { + return err + } + // TODO: Get the controller+worker role node. + // because sealer's cluster file do not support master and worker roles, + // so we can't generate a controller and worker role in k0s config. + return nil +} + +func (c *K0sConfig) joinK0sHosts(ipList []string, hosts []v2.Host, ssh v1.SSH, role string) error { + for _, ip := range ipList { + port, err := c.parseSSHPortByIP(ip, hosts, ssh) + if err != nil { + return err + } + //Now sealer do not support rootless, so default user is always 'root'. + c.addHostField(ip, port, role, K0sDefaultUser) + } + return nil +} + +// addHostField generate k0s config spec Host and append it to config file. +func (c *K0sConfig) addHostField(ipAddr string, port int, role, user string) { + host := cluster.Host{ + Connection: rig.Connection{ + SSH: &rig.SSH{ + Address: ipAddr, + Port: port, + User: user, + }, + }, + Role: role, + UploadBinary: true, + K0sBinaryPath: K0sUploadBinaryPath, + } + c.Spec.Hosts = append(c.Spec.Hosts, &host) +} + +// parseSSHPortByIP parse cluster file ssh port to k0s ssh port. +func (c *K0sConfig) parseSSHPortByIP(ipAddr string, hosts []v2.Host, ssh v1.SSH) (int, error) { + hostsMap := c.getHostsMap(hosts) + host := hostsMap[ipAddr] + if host.SSH.Port != "" { + return strconv.Atoi(host.SSH.Port) + } + return strconv.Atoi(ssh.Port) +} + +// getHostsMap convert hosts to map[string]v1.SSH with ssh arg +func (c *K0sConfig) getHostsMap(hosts []v2.Host) map[string]v2.Host { + hostsMap := make(map[string]v2.Host) + for _, host := range hosts { + ips := utilsnet.IPsToIPStrs(host.IPS) + for _, ip := range ips { + hostsMap[ip] = host + } + } + return hostsMap +} + +// DefineConfigFork0s define the private registry for image repo such as: sea.hub:5000 +func (c *K0sConfig) DefineConfigFork0s(version, domain, port, name string) { + c.Spec.K0s = &cluster.K0s{ + Version: version, + Config: dig.Mapping{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "metadata": dig.Mapping{ + "name": name, + }, + "spec": dig.Mapping{ + "images": dig.Mapping{ + "repository": "\"" + domain + ":" + port + "\"", + }, + }, + }, + } +} + +// WriteConfigToMaster0 write k0sctl config to rootfs +func (c *K0sConfig) WriteConfigToMaster0(rootfs string) error { + k0sConfig := c.Cluster + marshal, err := yaml2.Marshal(k0sConfig) + if err != nil { + return err + } + if err := osi.NewAtomicWriter(rootfs + "/k0sctl.yaml").WriteFile(marshal); err != nil { + return fmt.Errorf("failed to write k0sctl config: %v ", err) + } + return nil +} diff --git a/pkg/runtime/k0s/k0sctl/k0sctl_config_test.go b/pkg/runtime/k0s/k0sctl/k0sctl_config_test.go new file mode 100644 index 00000000000..8ed47d7e4dd --- /dev/null +++ b/pkg/runtime/k0s/k0sctl/k0sctl_config_test.go @@ -0,0 +1,424 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k0sctl + +import ( + "fmt" + "net" + "os" + "syscall" + "testing" + + "github.com/k0sproject/dig" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" + "github.com/k0sproject/rig" + "github.com/sealerio/sealer/pkg/runtime/k0s/k0sctl/v1beta1" + v1 "github.com/sealerio/sealer/types/api/v1" + v2 "github.com/sealerio/sealer/types/api/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var mockClusterFile = v2.Cluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "sealer.cloud/v2", + Kind: "Cluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-k0s-cluster", + }, + Spec: v2.ClusterSpec{ + Image: "k0s:v1.24.1-rc", + SSH: v1.SSH{ + Passwd: "test123", + Port: "2222", + }, + Hosts: []v2.Host{ + { + IPS: []net.IP{net.IP("192.168.0.2")}, + Roles: []string{"master"}, + SSH: v1.SSH{ + Passwd: "yyy", + Port: "22", + }, + }, + { + IPS: []net.IP{net.IP("192.168.0.3"), net.IP("192.168.0.4")}, + Roles: []string{"worker"}, + }, + { + IPS: []net.IP{net.IP("192.168.0.5")}, + Roles: []string{"worker"}, + }, + }, + }, +} + +var mockK0sctlConfig = v1beta1.Cluster{ + APIVersion: "k0sctl.k0sproject.io/v1beta1", + Kind: "Cluster", + Metadata: &v1beta1.ClusterMetadata{ + Name: "my-k0s-cluster", + }, + Spec: &cluster.Spec{ + Hosts: cluster.Hosts{ + { + Role: "controller", + InstallFlags: []string{"--debug"}, + Connection: rig.Connection{ + SSH: &rig.SSH{ + Address: "172.16.161.64", + User: "root", + Port: 22, + KeyPath: "~/.ssh/id_rsa", + }, + }, + UploadBinary: true, + K0sBinaryPath: K0sUploadBinaryPath, + }, + { + Role: "worker", + InstallFlags: []string{"--debug"}, + Connection: rig.Connection{ + SSH: &rig.SSH{ + Address: "172.16.161.65", + User: "root", + Port: 22, + KeyPath: "~/.ssh/id_rsa", + }, + }, + UploadBinary: true, + K0sBinaryPath: K0sUploadBinaryPath, + }, + }, + K0s: &cluster.K0s{ + Version: "v1.24.2+k0s.0", + Config: dig.Mapping{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "spec": dig.Mapping{ + "images": dig.Mapping{ + //"repository": "sea.hub:5000", + }, + }, + }, + }, + }, +} + +func TestK0sConfig_parseSSHPortByIP(t *testing.T) { + type fields struct { + Cluster *v1beta1.Cluster + } + type args struct { + ipAddr string + hosts []v2.Host + ssh v1.SSH + } + tests := []struct { + name string + fields fields + args args + want int + wantErr bool + }{ + { + name: "test ip overwrite", + fields: fields{Cluster: &v1beta1.Cluster{}}, + args: args{ + ipAddr: "192.168.0.2", + hosts: []v2.Host{ + { + IPS: []net.IP{net.ParseIP("192.168.0.2")}, + Roles: []string{"master"}, + SSH: v1.SSH{ + Passwd: "yyy", + Port: "22", + }, + }, + { + IPS: []net.IP{net.ParseIP("192.168.0.3"), net.ParseIP("192.168.0.4")}, + Roles: []string{"worker"}, + }, + { + IPS: []net.IP{net.ParseIP("192.168.0.5")}, + Roles: []string{"worker"}, + }, + }, + ssh: v1.SSH{ + Passwd: "test123", + Port: "2222", + }, + }, + want: 22, + wantErr: false, + }, + { + name: "test ip global cover", + fields: fields{Cluster: &v1beta1.Cluster{}}, + args: args{ + ipAddr: "192.168.0.3", + hosts: []v2.Host{ + { + IPS: []net.IP{net.ParseIP("192.168.0.2")}, + Roles: []string{"master"}, + SSH: v1.SSH{ + Passwd: "yyy", + Port: "22", + }, + }, + { + IPS: []net.IP{net.ParseIP("192.168.0.3"), net.ParseIP("192.168.0.4")}, + Roles: []string{"worker"}, + }, + { + IPS: []net.IP{net.ParseIP("192.168.0.5")}, + Roles: []string{"worker"}, + }, + }, + ssh: v1.SSH{ + Passwd: "test123", + Port: "2222", + }, + }, + want: 2222, + wantErr: false, + }, + { + name: "test incorrect with port parse error", + fields: fields{Cluster: &v1beta1.Cluster{}}, + args: args{ + ipAddr: "192.168.0.2", + hosts: []v2.Host{ + { + IPS: []net.IP{net.ParseIP("192.168.0.2")}, + Roles: []string{"master"}, + SSH: v1.SSH{ + Passwd: "yyy", + Port: "error", + }, + }, + { + IPS: []net.IP{net.ParseIP("192.168.0.3"), net.ParseIP("192.168.0.4")}, + Roles: []string{"worker"}, + }, + { + IPS: []net.IP{net.ParseIP("192.168.0.5")}, + Roles: []string{"worker"}, + }, + }, + ssh: v1.SSH{ + Passwd: "test123", + Port: "2222", + }, + }, + want: 0, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &K0sConfig{ + Cluster: tt.fields.Cluster, + } + got, err := c.parseSSHPortByIP(tt.args.ipAddr, tt.args.hosts, tt.args.ssh) + if (err != nil) != tt.wantErr { + t.Errorf("parseSSHPortByIP() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("parseSSHPortByIP() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestK0sConfig_addHostField(t *testing.T) { + type fields struct { + Cluster *v1beta1.Cluster + } + type args struct { + ipsAddr string + port int + role string + user string + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "test add Node", + fields: fields{Cluster: &mockK0sctlConfig}, + args: args{ + ipsAddr: "10.0.0.2", + port: 22, + role: "worker", + user: "root", + }, + }, + { + name: "test add Master", + fields: fields{Cluster: &mockK0sctlConfig}, + args: args{ + ipsAddr: "10.0.0.4", + port: 22, + role: "controller", + user: "root", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &K0sConfig{ + Cluster: tt.fields.Cluster, + } + c.addHostField(tt.args.ipsAddr, tt.args.port, tt.args.role, tt.args.user) + fmt.Println(c.Cluster.Spec.Hosts) + if err := c.Validate(); err != nil { + t.Errorf("add field wrong: %v", err) + } + }) + } +} + +func TestK0sConfig_DefineConfigFork0s(t *testing.T) { + type fields struct { + Cluster *v1beta1.Cluster + } + type args struct { + version string + domain string + port string + name string + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "test set repo field", + fields: fields{ + Cluster: &mockK0sctlConfig, + }, + args: args{ + version: "v1.24.2+k0s.0", + domain: "sea.hub", + port: "5000", + name: "my-k0s-cluster", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &K0sConfig{ + Cluster: tt.fields.Cluster, + } + c.DefineConfigFork0s(tt.args.version, tt.args.domain, tt.args.port, tt.args.name) + fmt.Println(c.Spec.K0s.Config.Dig("spec", "images", "repository")) + if err := c.Validate(); err != nil { + t.Errorf("add config error: %v", err) + } + }) + } +} + +func TestK0sConfig_convertIPVSToAddress(t *testing.T) { + type fields struct { + Cluster *v1beta1.Cluster + } + type args struct { + clusterFile *v2.Cluster + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "cluster file to k0s config", + fields: fields{ + Cluster: &mockK0sctlConfig, + }, + args: args{ + clusterFile: &mockClusterFile, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &K0sConfig{ + Cluster: tt.fields.Cluster, + } + if err := c.convertIPVSToAddress(tt.args.clusterFile); (err != nil) != tt.wantErr { + t.Errorf("convertIPVSToAddress() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestK0sConfig_WriteConfigToMaster0(t *testing.T) { + type fields struct { + Cluster *v1beta1.Cluster + } + type args struct { + rootfs string + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "test write rootfs/k0sctl.yaml", + fields: fields{ + Cluster: &mockK0sctlConfig, + }, + args: args{ + rootfs: "", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + //prevent the read permission denied. + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + dir, err := os.MkdirTemp("", "test-rootfs-metadata-tmp") + if err != nil { + t.Errorf("Make temp dir %s error = %s, wantErr %v", dir, err, tt.wantErr) + } + defer func() { + err = os.RemoveAll(dir) + if err != nil { + t.Errorf("Remove temp dir %s error = %v, wantErr %v", dir, err, tt.wantErr) + } + }() + tt.args.rootfs = dir + fmt.Println(tt.args.rootfs) + c := &K0sConfig{ + Cluster: tt.fields.Cluster, + } + if err := c.WriteConfigToMaster0(tt.args.rootfs); (err != nil) != tt.wantErr { + t.Errorf("WriteConfigToMaster0() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/pkg/runtime/k0s/k0sctl/v1beta1/types.go b/pkg/runtime/k0s/k0sctl/v1beta1/types.go new file mode 100644 index 00000000000..197ab7c3f0a --- /dev/null +++ b/pkg/runtime/k0s/k0sctl/v1beta1/types.go @@ -0,0 +1,63 @@ +/* +Copyright 2022 k0s authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + validation "github.com/go-ozzo/ozzo-validation/v4" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" +) + +// APIVersion is the current api version +const APIVersion = "k0sctl.k0sproject.io/v1beta1" + +// ClusterMetadata defines cluster metadata +type ClusterMetadata struct { + Name string `yaml:"name" validate:"required" default:"k0s-cluster"` + Kubeconfig string `yaml:"-"` +} + +// Cluster describes launchpad.yaml configuration +type Cluster struct { + APIVersion string `yaml:"apiVersion"` + Kind string `yaml:"kind"` + Metadata *ClusterMetadata `yaml:"metadata"` + Spec *cluster.Spec `yaml:"spec"` +} + +// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml +func (c *Cluster) UnmarshalYAML(unmarshal func(interface{}) error) error { + c.Metadata = &ClusterMetadata{ + Name: "k0s-cluster", + } + c.Spec = &cluster.Spec{} + + type clusterConfig Cluster + yc := (*clusterConfig)(c) + + if err := unmarshal(yc); err != nil { + return err + } + + return nil +} + +// Validate performs a configuration sanity check +func (c *Cluster) Validate() error { + validation.ErrorTag = "yaml" + return validation.ValidateStruct(c, + validation.Field(&c.APIVersion, validation.Required, validation.In(APIVersion).Error("must equal "+APIVersion)), + validation.Field(&c.Kind, validation.Required, validation.In("cluster", "Cluster").Error("must equal Cluster")), + validation.Field(&c.Spec), + ) +} diff --git a/pkg/runtime/k0s/registry.go b/pkg/runtime/k0s/registry.go new file mode 100644 index 00000000000..e9836f0ac7a --- /dev/null +++ b/pkg/runtime/k0s/registry.go @@ -0,0 +1,124 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k0s + +import ( + "fmt" + "net" + "path/filepath" + + "github.com/sealerio/sealer/common" + "github.com/sealerio/sealer/pkg/cert" +) + +const ( + SeaHub = "sea.hub" + RemoteAddEtcHosts = "cat /etc/hosts |grep '%s' || echo '%s' >> /etc/hosts" + ContainerdLoginCommand = "nerdctl login -u %s -p %s %s" + DefaultRegistryHtPasswdFile = "registry_htpasswd" + DockerCertDir = "/etc/docker/certs.d" +) + +// sendRegistryCertAndKey send registry cert to Master0 host. path like: /var/lib/sealer/data/my-k0s-cluster/certs +func (k *Runtime) sendRegistryCertAndKey() error { + return k.sendFileToHosts(k.cluster.GetMasterIPList()[:1], k.getCertsDir(), filepath.Join(k.getRootfs(), "certs")) +} + +// sendRegistryCert send registry cert to host. +func (k *Runtime) sendRegistryCert(host []net.IP) error { + cf := k.RegConfig + err := k.sendFileToHosts(host, fmt.Sprintf("%s/%s.crt", k.getCertsDir(), cf.Domain), fmt.Sprintf("%s/%s:%s/%s.crt", DockerCertDir, cf.Domain, cf.Port, cf.Domain)) + if err != nil { + return err + } + return k.sendFileToHosts(host, fmt.Sprintf("%s/%s.crt", k.getCertsDir(), cf.Domain), fmt.Sprintf("%s/%s:%s/%s.crt", DockerCertDir, SeaHub, cf.Port, cf.Domain)) +} + +func (k *Runtime) addRegistryDomainToHosts() (host string) { + content := fmt.Sprintf("%s %s", k.RegConfig.IP.String(), k.RegConfig.Domain) + return fmt.Sprintf(RemoteAddEtcHosts, content, content) +} + +// ApplyRegistryOnMaster0 Only use this for init, due to the initiation operations. +func (k *Runtime) ApplyRegistryOnMaster0() error { + ssh, err := k.getHostSSHClient(k.RegConfig.IP) + if err != nil { + return fmt.Errorf("failed to get registry ssh client: %v", err) + } + + if k.RegConfig.Username != "" && k.RegConfig.Password != "" { + htpasswd, err := k.RegConfig.GenerateHTTPBasicAuth() + if err != nil { + return err + } + err = ssh.CmdAsync(k.RegConfig.IP, fmt.Sprintf("echo '%s' > %s", htpasswd, filepath.Join(k.getRootfs(), "etc", DefaultRegistryHtPasswdFile))) + if err != nil { + return err + } + } + initRegistry := fmt.Sprintf("cd %s/scripts && ./init-registry.sh %s %s %s", k.getRootfs(), k.RegConfig.Port, fmt.Sprintf("%s/registry", k.getRootfs()), k.RegConfig.Domain) + addRegistryHosts := k.addRegistryDomainToHosts() + if k.RegConfig.Domain != SeaHub { + addSeaHubHosts := fmt.Sprintf(RemoteAddEtcHosts, k.RegConfig.IP.String()+" "+SeaHub, k.RegConfig.IP.String()+" "+SeaHub) + addRegistryHosts = fmt.Sprintf("%s && %s", addRegistryHosts, addSeaHubHosts) + } + if err = ssh.CmdAsync(k.RegConfig.IP, initRegistry); err != nil { + return err + } + if err = ssh.CmdAsync(k.cluster.GetMaster0IP(), addRegistryHosts); err != nil { + return err + } + if k.RegConfig.Username == "" || k.RegConfig.Password == "" { + return nil + } + return ssh.CmdAsync(k.cluster.GetMaster0IP(), k.GenLoginCommand()) +} + +func (k *Runtime) GenLoginCommand() string { + return fmt.Sprintf("%s && %s", + fmt.Sprintf(ContainerdLoginCommand, k.RegConfig.Username, k.RegConfig.Password, k.RegConfig.Domain+":"+k.RegConfig.Port), + fmt.Sprintf(ContainerdLoginCommand, k.RegConfig.Username, k.RegConfig.Password, SeaHub+":"+k.RegConfig.Port)) +} + +func (k *Runtime) GenerateRegistryCert() error { + return GenerateRegistryCert(k.getCertsDir(), k.RegConfig.Domain) +} + +func GenerateRegistryCert(registryCertPath string, BaseName string) error { + regCertConfig := cert.Config{ + Path: registryCertPath, + BaseName: BaseName, + CommonName: BaseName, + DNSNames: []string{BaseName}, + Organization: []string{common.ExecBinaryFileName}, + Year: 100, + } + if BaseName != SeaHub { + regCertConfig.DNSNames = append(regCertConfig.DNSNames, SeaHub) + } + crt, key, err := cert.NewCaCertAndKey(regCertConfig) + if err != nil { + return err + } + return cert.WriteCertAndKey(regCertConfig.Path, regCertConfig.BaseName, crt, key) +} + +func (k *Runtime) SendRegistryCert(host []net.IP) error { + err := k.sendRegistryCertAndKey() + if err != nil { + return err + } + return k.sendRegistryCert(host) +} diff --git a/pkg/runtime/k0s/runtime.go b/pkg/runtime/k0s/runtime.go new file mode 100644 index 00000000000..3b3c88f395c --- /dev/null +++ b/pkg/runtime/k0s/runtime.go @@ -0,0 +1,194 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k0s + +import ( + "context" + "fmt" + "net" + + "github.com/sealerio/sealer/common" + "github.com/sealerio/sealer/pkg/registry" + "github.com/sealerio/sealer/pkg/runtime" + "github.com/sealerio/sealer/pkg/runtime/k0s/k0sctl" + v2 "github.com/sealerio/sealer/types/api/v2" + utilsnet "github.com/sealerio/sealer/utils/net" + "github.com/sealerio/sealer/utils/platform" + "github.com/sealerio/sealer/utils/ssh" + + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +// Runtime struct is the runtime interface for k0s +type Runtime struct { + // cluster is sealer clusterFile + cluster *v2.Cluster + Vlog int + RegConfig *registry.Config + // clusterFileK0sConfig is k0sctl cluster config file + K0sConfig *k0sctl.K0sConfig +} + +func (k *Runtime) Init() error { + return k.init() +} + +func (k *Runtime) Upgrade() error { + //TODO implement me + panic("implement me") +} + +func (k *Runtime) Reset() error { + //TODO implement me + panic("implement me") +} + +func (k *Runtime) JoinMasters(newMastersIPList []net.IP) error { + //TODO implement me + panic("implement me") +} + +func (k *Runtime) JoinNodes(newNodesIPList []net.IP) error { + //TODO implement me + panic("implement me") +} + +func (k *Runtime) DeleteMasters(mastersIPList []net.IP) error { + //TODO implement me + panic("implement me") +} + +func (k *Runtime) DeleteNodes(nodesIPList []net.IP) error { + //TODO implement me + panic("implement me") +} + +func (k *Runtime) GetClusterMetadata() (*runtime.Metadata, error) { + //TODO implement me + panic("implement me") +} + +func (k *Runtime) UpdateCert(certs []string) error { + //TODO implement me + panic("implement me") +} + +// NewK0sRuntime arg "clusterConfig" is the k0s config file under etc/${ant_name.yaml}, runtime need read k0s config from it +// Mount image is required before new Runtime. +func NewK0sRuntime(cluster *v2.Cluster) (runtime.Interface, error) { + return newK0sRuntime(cluster) +} + +func newK0sRuntime(cluster *v2.Cluster) (runtime.Interface, error) { + k := &Runtime{ + cluster: cluster, + } + + k.RegConfig = registry.GetConfig(k.getImageMountDir(), k.cluster.GetMaster0IP()) + + if err := k.checkList(); err != nil { + return nil, err + } + + setDebugLevel(k) + return k, nil +} + +func setDebugLevel(k *Runtime) { + if logrus.GetLevel() == logrus.DebugLevel { + k.Vlog = 6 + } +} + +// checkList do a simple check for cluster spec Hosts which can not be empty. +func (k *Runtime) checkList() error { + if len(k.cluster.Spec.Hosts) == 0 { + return fmt.Errorf("hosts spec cannot be empty") + } + if k.cluster.GetMaster0IP() == nil { + return fmt.Errorf("master0 IP cannot be empty") + } + return nil +} + +// getImageMountDir return a path for mount dir, eg: /var/lib/sealer/data/my-k0s-cluster/mount +func (k *Runtime) getImageMountDir() string { + return platform.DefaultMountClusterImageDir(k.cluster.Name) +} + +// getHostSSHClient return ssh client with destination machine. +func (k *Runtime) getHostSSHClient(hostIP net.IP) (ssh.Interface, error) { + return ssh.NewStdoutSSHClient(hostIP, k.cluster) +} + +// getRootfs return the rootfs dir like: /var/lib/sealer/data/my-k0s-cluster/rootfs +func (k *Runtime) getRootfs() string { + return common.DefaultTheClusterRootfsDir(k.cluster.Name) +} + +// sshKeyGen generate pub key and private key on master0 +func (k *Runtime) sshKeyGen() error { + master0IP := k.cluster.GetMaster0IP() + sshClient, err := k.getHostSSHClient(master0IP) + if err != nil { + return fmt.Errorf("failed to get ssh client: %v", err) + } + if _, err := sshClient.Cmd(master0IP, SSHKeyGenCMD); err != nil { + return err + } + return nil +} + +// sshCopyIDToEveryHosts use ssh-copy-id to send pub key to every host which in cluster file +func (k Runtime) sshCopyIDToEveryHost() error { + master0IP := k.cluster.GetMaster0IP() + sshClient, err := k.getHostSSHClient(master0IP) + if err != nil { + return fmt.Errorf("failed to get ssh client: %v", err) + } + hostIPList := utilsnet.IPsToIPStrs(k.cluster.GetAllIPList()) + for _, ip := range hostIPList { + cmd := SSHCopyIDPrefix + ip + if _, err := sshClient.Cmd(master0IP, cmd); err != nil { + return fmt.Errorf("failed to exec ssh-copy-id to destination machine: %v", err) + } + } + return nil +} + +// getCertsDir return a Dir value such as: /var/lib/sealer/data/my-k0s-cluster/certs +func (k *Runtime) getCertsDir() string { + return common.TheDefaultClusterCertDir(k.cluster.Name) +} + +// sendFileToHosts sent file to the dst dir on host machine +func (k *Runtime) sendFileToHosts(Hosts []net.IP, src, dst string) error { + eg, _ := errgroup.WithContext(context.Background()) + for _, node := range Hosts { + node := node + eg.Go(func() error { + sshClient, err := k.getHostSSHClient(node) + if err != nil { + return fmt.Errorf("failed to send file: %v", err) + } + if err := sshClient.Copy(node, src, dst); err != nil { + return fmt.Errorf("failed to send file: %v", err) + } + return err + }) + } + return eg.Wait() +} diff --git a/pkg/runtime/k0s/runtime_test.go b/pkg/runtime/k0s/runtime_test.go new file mode 100644 index 00000000000..dd7b0103ccd --- /dev/null +++ b/pkg/runtime/k0s/runtime_test.go @@ -0,0 +1,115 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k0s + +import ( + "testing" + + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/util/yaml" + + "github.com/sealerio/sealer/pkg/runtime/k0s/k0sctl/v1beta1" + v2 "github.com/sealerio/sealer/types/api/v2" +) + +const ( + mockClusterFile = ` +apiVersion: sealer.cloud/v2 +kind: Cluster +metadata: + name: my-k0s-cluster +spec: + image: k0s:v1.24.1-rc + ssh: + passwd: test123 + port: "2222" + hosts: + - ips: [ 192.168.0.2 ] # this master ssh port is different with others. + roles: [ master ] + ssh: + passwd: yyy + port: "22" + - ips: [ 192.168.0.3,192.168.0.4 ] + roles: [ worker ] + - ips: [ 192.168.0.5 ] + roles: [ worker ] +` + mockK0sctlYaml = ` +apiVersion: k0sctl.k0sproject.io/v1beta1 +kind: Cluster +metadata: + name: my-k0s-cluster +spec: + hosts: + - role: controller + installFlags: + - --debug + ssh: + address: 10.0.0.1 + user: root + port: 22 + keyPath: ~/.ssh/id_rsa + - role: worker + installFlags: + - --debug + ssh: + address: 10.0.0.2 + k0s: + version: 0.10.0 + config: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: Cluster + metadata: + name: my-k0s-cluster + spec: + images: + calico: + cni: + image: calico/cni + version: v3.16.2 +` +) + +func TestNewK0sRuntime(t *testing.T) { + tests := []struct { + name string + wantErr bool + }{ + { + name: "test newK0sRuntime", + wantErr: false, + }, + { + name: "test newK0sRuntime", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testClusterFile := v2.Cluster{} + testClusterConfigYAML := v1beta1.Cluster{} + if err := yaml.Unmarshal([]byte(mockClusterFile), &testClusterFile); err != nil { + logrus.Errorf("decode to clusterfile yaml failed: %s", err) + } + if err := yaml.Unmarshal([]byte(mockK0sctlYaml), &testClusterConfigYAML); err != nil { + logrus.Errorf("decode to k0sctl.yaml failed: %s", err) + } + _, err := NewK0sRuntime(&testClusterFile) + if err != nil { + t.Errorf("NewK0sRuntime err: %s", err) + } + }) + } +} diff --git a/vendor/github.com/Azure/go-ntlmssp/.travis.yml b/vendor/github.com/Azure/go-ntlmssp/.travis.yml new file mode 100644 index 00000000000..23c95fe951b --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/.travis.yml @@ -0,0 +1,17 @@ +sudo: false + +language: go + +before_script: + - go get -u golang.org/x/lint/golint + +go: + - 1.10.x + - master + +script: + - test -z "$(gofmt -s -l . | tee /dev/stderr)" + - test -z "$(golint ./... | tee /dev/stderr)" + - go vet ./... + - go build -v ./... + - go test -v ./... diff --git a/vendor/github.com/Azure/go-ntlmssp/LICENSE b/vendor/github.com/Azure/go-ntlmssp/LICENSE new file mode 100644 index 00000000000..dc1cf39d135 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Azure/go-ntlmssp/README.md b/vendor/github.com/Azure/go-ntlmssp/README.md new file mode 100644 index 00000000000..55cdcefab70 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/README.md @@ -0,0 +1,29 @@ +# go-ntlmssp +Golang package that provides NTLM/Negotiate authentication over HTTP + +[![GoDoc](https://godoc.org/github.com/Azure/go-ntlmssp?status.svg)](https://godoc.org/github.com/Azure/go-ntlmssp) [![Build Status](https://travis-ci.org/Azure/go-ntlmssp.svg?branch=dev)](https://travis-ci.org/Azure/go-ntlmssp) + +Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx +Implementation hints from http://davenport.sourceforge.net/ntlm.html + +This package only implements authentication, no key exchange or encryption. It +only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. +This package implements NTLMv2. + +# Usage + +``` +url, user, password := "http://www.example.com/secrets", "robpike", "pw123" +client := &http.Client{ + Transport: ntlmssp.Negotiator{ + RoundTripper:&http.Transport{}, + }, +} + +req, _ := http.NewRequest("GET", url, nil) +req.SetBasicAuth(user, password) +res, _ := client.Do(req) +``` + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go new file mode 100644 index 00000000000..1b0fe7dd22b --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go @@ -0,0 +1,183 @@ +package ntlmssp + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "encoding/hex" + "errors" + "strings" + "time" +) + +type authenicateMessage struct { + LmChallengeResponse []byte + NtChallengeResponse []byte + + TargetName string + UserName string + + // only set if negotiateFlag_NTLMSSP_NEGOTIATE_KEY_EXCH + EncryptedRandomSessionKey []byte + + NegotiateFlags negotiateFlags + + MIC []byte +} + +type authenticateMessageFields struct { + messageHeader + LmChallengeResponse varField + NtChallengeResponse varField + TargetName varField + UserName varField + Workstation varField + _ [8]byte + NegotiateFlags negotiateFlags +} + +func (m authenicateMessage) MarshalBinary() ([]byte, error) { + if !m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE) { + return nil, errors.New("Only unicode is supported") + } + + target, user := toUnicode(m.TargetName), toUnicode(m.UserName) + workstation := toUnicode("") + + ptr := binary.Size(&authenticateMessageFields{}) + f := authenticateMessageFields{ + messageHeader: newMessageHeader(3), + NegotiateFlags: m.NegotiateFlags, + LmChallengeResponse: newVarField(&ptr, len(m.LmChallengeResponse)), + NtChallengeResponse: newVarField(&ptr, len(m.NtChallengeResponse)), + TargetName: newVarField(&ptr, len(target)), + UserName: newVarField(&ptr, len(user)), + Workstation: newVarField(&ptr, len(workstation)), + } + + f.NegotiateFlags.Unset(negotiateFlagNTLMSSPNEGOTIATEVERSION) + + b := bytes.Buffer{} + if err := binary.Write(&b, binary.LittleEndian, &f); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &m.LmChallengeResponse); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &m.NtChallengeResponse); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &target); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &user); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &workstation); err != nil { + return nil, err + } + + return b.Bytes(), nil +} + +//ProcessChallenge crafts an AUTHENTICATE message in response to the CHALLENGE message +//that was received from the server +func ProcessChallenge(challengeMessageData []byte, user, password string) ([]byte, error) { + if user == "" && password == "" { + return nil, errors.New("Anonymous authentication not supported") + } + + var cm challengeMessage + if err := cm.UnmarshalBinary(challengeMessageData); err != nil { + return nil, err + } + + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) { + return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)") + } + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) { + return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)") + } + + am := authenicateMessage{ + UserName: user, + TargetName: cm.TargetName, + NegotiateFlags: cm.NegotiateFlags, + } + + timestamp := cm.TargetInfo[avIDMsvAvTimestamp] + if timestamp == nil { // no time sent, take current time + ft := uint64(time.Now().UnixNano()) / 100 + ft += 116444736000000000 // add time between unix & windows offset + timestamp = make([]byte, 8) + binary.LittleEndian.PutUint64(timestamp, ft) + } + + clientChallenge := make([]byte, 8) + rand.Reader.Read(clientChallenge) + + ntlmV2Hash := getNtlmV2Hash(password, user, cm.TargetName) + + am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw) + + if cm.TargetInfoRaw == nil { + am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge) + } + return am.MarshalBinary() +} + +func ProcessChallengeWithHash(challengeMessageData []byte, user, hash string) ([]byte, error) { + if user == "" && hash == "" { + return nil, errors.New("Anonymous authentication not supported") + } + + var cm challengeMessage + if err := cm.UnmarshalBinary(challengeMessageData); err != nil { + return nil, err + } + + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) { + return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)") + } + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) { + return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)") + } + + am := authenicateMessage{ + UserName: user, + TargetName: cm.TargetName, + NegotiateFlags: cm.NegotiateFlags, + } + + timestamp := cm.TargetInfo[avIDMsvAvTimestamp] + if timestamp == nil { // no time sent, take current time + ft := uint64(time.Now().UnixNano()) / 100 + ft += 116444736000000000 // add time between unix & windows offset + timestamp = make([]byte, 8) + binary.LittleEndian.PutUint64(timestamp, ft) + } + + clientChallenge := make([]byte, 8) + rand.Reader.Read(clientChallenge) + + hashParts := strings.Split(hash, ":") + if len(hashParts) > 1 { + hash = hashParts[1] + } + hashBytes, err := hex.DecodeString(hash) + if err != nil { + return nil, err + } + ntlmV2Hash := hmacMd5(hashBytes, toUnicode(strings.ToUpper(user)+cm.TargetName)) + + am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw) + + if cm.TargetInfoRaw == nil { + am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge) + } + return am.MarshalBinary() +} diff --git a/vendor/github.com/Azure/go-ntlmssp/authheader.go b/vendor/github.com/Azure/go-ntlmssp/authheader.go new file mode 100644 index 00000000000..c9d30d32421 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/authheader.go @@ -0,0 +1,66 @@ +package ntlmssp + +import ( + "encoding/base64" + "strings" +) + +type authheader []string + +func (h authheader) IsBasic() bool { + for _, s := range h { + if strings.HasPrefix(string(s), "Basic ") { + return true + } + } + return false +} + +func (h authheader) Basic() string { + for _, s := range h { + if strings.HasPrefix(string(s), "Basic ") { + return s + } + } + return "" +} + +func (h authheader) IsNegotiate() bool { + for _, s := range h { + if strings.HasPrefix(string(s), "Negotiate") { + return true + } + } + return false +} + +func (h authheader) IsNTLM() bool { + for _, s := range h { + if strings.HasPrefix(string(s), "NTLM") { + return true + } + } + return false +} + +func (h authheader) GetData() ([]byte, error) { + for _, s := range h { + if strings.HasPrefix(string(s), "NTLM") || strings.HasPrefix(string(s), "Negotiate") || strings.HasPrefix(string(s), "Basic ") { + p := strings.Split(string(s), " ") + if len(p) < 2 { + return nil, nil + } + return base64.StdEncoding.DecodeString(string(p[1])) + } + } + return nil, nil +} + +func (h authheader) GetBasicCreds() (username, password string, err error) { + d, err := h.GetData() + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(d), ":", 2) + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/avids.go b/vendor/github.com/Azure/go-ntlmssp/avids.go new file mode 100644 index 00000000000..196b5f13163 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/avids.go @@ -0,0 +1,17 @@ +package ntlmssp + +type avID uint16 + +const ( + avIDMsvAvEOL avID = iota + avIDMsvAvNbComputerName + avIDMsvAvNbDomainName + avIDMsvAvDNSComputerName + avIDMsvAvDNSDomainName + avIDMsvAvDNSTreeName + avIDMsvAvFlags + avIDMsvAvTimestamp + avIDMsvAvSingleHost + avIDMsvAvTargetName + avIDMsvChannelBindings +) diff --git a/vendor/github.com/Azure/go-ntlmssp/challenge_message.go b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go new file mode 100644 index 00000000000..053b55e4adf --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go @@ -0,0 +1,82 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "fmt" +) + +type challengeMessageFields struct { + messageHeader + TargetName varField + NegotiateFlags negotiateFlags + ServerChallenge [8]byte + _ [8]byte + TargetInfo varField +} + +func (m challengeMessageFields) IsValid() bool { + return m.messageHeader.IsValid() && m.MessageType == 2 +} + +type challengeMessage struct { + challengeMessageFields + TargetName string + TargetInfo map[avID][]byte + TargetInfoRaw []byte +} + +func (m *challengeMessage) UnmarshalBinary(data []byte) error { + r := bytes.NewReader(data) + err := binary.Read(r, binary.LittleEndian, &m.challengeMessageFields) + if err != nil { + return err + } + if !m.challengeMessageFields.IsValid() { + return fmt.Errorf("Message is not a valid challenge message: %+v", m.challengeMessageFields.messageHeader) + } + + if m.challengeMessageFields.TargetName.Len > 0 { + m.TargetName, err = m.challengeMessageFields.TargetName.ReadStringFrom(data, m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE)) + if err != nil { + return err + } + } + + if m.challengeMessageFields.TargetInfo.Len > 0 { + d, err := m.challengeMessageFields.TargetInfo.ReadFrom(data) + m.TargetInfoRaw = d + if err != nil { + return err + } + m.TargetInfo = make(map[avID][]byte) + r := bytes.NewReader(d) + for { + var id avID + var l uint16 + err = binary.Read(r, binary.LittleEndian, &id) + if err != nil { + return err + } + if id == avIDMsvAvEOL { + break + } + + err = binary.Read(r, binary.LittleEndian, &l) + if err != nil { + return err + } + value := make([]byte, l) + n, err := r.Read(value) + if err != nil { + return err + } + if n != int(l) { + return fmt.Errorf("Expected to read %d bytes, got only %d", l, n) + } + m.TargetInfo[id] = value + } + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/messageheader.go b/vendor/github.com/Azure/go-ntlmssp/messageheader.go new file mode 100644 index 00000000000..247e284652c --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/messageheader.go @@ -0,0 +1,21 @@ +package ntlmssp + +import ( + "bytes" +) + +var signature = [8]byte{'N', 'T', 'L', 'M', 'S', 'S', 'P', 0} + +type messageHeader struct { + Signature [8]byte + MessageType uint32 +} + +func (h messageHeader) IsValid() bool { + return bytes.Equal(h.Signature[:], signature[:]) && + h.MessageType > 0 && h.MessageType < 4 +} + +func newMessageHeader(messageType uint32) messageHeader { + return messageHeader{signature, messageType} +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go new file mode 100644 index 00000000000..5905c023d69 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go @@ -0,0 +1,52 @@ +package ntlmssp + +type negotiateFlags uint32 + +const ( + /*A*/ negotiateFlagNTLMSSPNEGOTIATEUNICODE negotiateFlags = 1 << 0 + /*B*/ negotiateFlagNTLMNEGOTIATEOEM = 1 << 1 + /*C*/ negotiateFlagNTLMSSPREQUESTTARGET = 1 << 2 + + /*D*/ + negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4 + /*E*/ negotiateFlagNTLMSSPNEGOTIATESEAL = 1 << 5 + /*F*/ negotiateFlagNTLMSSPNEGOTIATEDATAGRAM = 1 << 6 + /*G*/ negotiateFlagNTLMSSPNEGOTIATELMKEY = 1 << 7 + + /*H*/ + negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9 + + /*J*/ + negotiateFlagANONYMOUS = 1 << 11 + /*K*/ negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED = 1 << 12 + /*L*/ negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED = 1 << 13 + + /*M*/ + negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15 + /*N*/ negotiateFlagNTLMSSPTARGETTYPEDOMAIN = 1 << 16 + /*O*/ negotiateFlagNTLMSSPTARGETTYPESERVER = 1 << 17 + + /*P*/ + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19 + /*Q*/ negotiateFlagNTLMSSPNEGOTIATEIDENTIFY = 1 << 20 + + /*R*/ + negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22 + /*S*/ negotiateFlagNTLMSSPNEGOTIATETARGETINFO = 1 << 23 + + /*T*/ + negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25 + + /*U*/ + negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29 + /*V*/ negotiateFlagNTLMSSPNEGOTIATEKEYEXCH = 1 << 30 + /*W*/ negotiateFlagNTLMSSPNEGOTIATE56 = 1 << 31 +) + +func (field negotiateFlags) Has(flags negotiateFlags) bool { + return field&flags == flags +} + +func (field *negotiateFlags) Unset(flags negotiateFlags) { + *field = *field ^ (*field & flags) +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go new file mode 100644 index 00000000000..e466a9861d8 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go @@ -0,0 +1,64 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "errors" + "strings" +) + +const expMsgBodyLen = 40 + +type negotiateMessageFields struct { + messageHeader + NegotiateFlags negotiateFlags + + Domain varField + Workstation varField + + Version +} + +var defaultFlags = negotiateFlagNTLMSSPNEGOTIATETARGETINFO | + negotiateFlagNTLMSSPNEGOTIATE56 | + negotiateFlagNTLMSSPNEGOTIATE128 | + negotiateFlagNTLMSSPNEGOTIATEUNICODE | + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY + +//NewNegotiateMessage creates a new NEGOTIATE message with the +//flags that this package supports. +func NewNegotiateMessage(domainName, workstationName string) ([]byte, error) { + payloadOffset := expMsgBodyLen + flags := defaultFlags + + if domainName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED + } + + if workstationName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED + } + + msg := negotiateMessageFields{ + messageHeader: newMessageHeader(1), + NegotiateFlags: flags, + Domain: newVarField(&payloadOffset, len(domainName)), + Workstation: newVarField(&payloadOffset, len(workstationName)), + Version: DefaultVersion(), + } + + b := bytes.Buffer{} + if err := binary.Write(&b, binary.LittleEndian, &msg); err != nil { + return nil, err + } + if b.Len() != expMsgBodyLen { + return nil, errors.New("incorrect body length") + } + + payload := strings.ToUpper(domainName + workstationName) + if _, err := b.WriteString(payload); err != nil { + return nil, err + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiator.go b/vendor/github.com/Azure/go-ntlmssp/negotiator.go new file mode 100644 index 00000000000..a5a5f5b7500 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiator.go @@ -0,0 +1,144 @@ +package ntlmssp + +import ( + "bytes" + "encoding/base64" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// GetDomain : parse domain name from based on slashes in the input +func GetDomain(user string) (string, string) { + domain := "" + + if strings.Contains(user, "\\") { + ucomponents := strings.SplitN(user, "\\", 2) + domain = ucomponents[0] + user = ucomponents[1] + } + return user, domain +} + +//Negotiator is a http.Roundtripper decorator that automatically +//converts basic authentication to NTLM/Negotiate authentication when appropriate. +type Negotiator struct{ http.RoundTripper } + +//RoundTrip sends the request to the server, handling any authentication +//re-sends as needed. +func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error) { + // Use default round tripper if not provided + rt := l.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + // If it is not basic auth, just round trip the request as usual + reqauth := authheader(req.Header.Values("Authorization")) + if !reqauth.IsBasic() { + return rt.RoundTrip(req) + } + reqauthBasic := reqauth.Basic() + // Save request body + body := bytes.Buffer{} + if req.Body != nil { + _, err = body.ReadFrom(req.Body) + if err != nil { + return nil, err + } + + req.Body.Close() + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + } + // first try anonymous, in case the server still finds us + // authenticated from previous traffic + req.Header.Del("Authorization") + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusUnauthorized { + return res, err + } + resauth := authheader(res.Header.Values("Www-Authenticate")) + if !resauth.IsNegotiate() && !resauth.IsNTLM() { + // Unauthorized, Negotiate not requested, let's try with basic auth + req.Header.Set("Authorization", string(reqauthBasic)) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusUnauthorized { + return res, err + } + resauth = authheader(res.Header.Values("Www-Authenticate")) + } + + if resauth.IsNegotiate() || resauth.IsNTLM() { + // 401 with request:Basic and response:Negotiate + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + // recycle credentials + u, p, err := reqauth.GetBasicCreds() + if err != nil { + return nil, err + } + + // get domain from username + domain := "" + u, domain = GetDomain(u) + + // send negotiate + negotiateMessage, err := NewNegotiateMessage(domain, "") + if err != nil { + return nil, err + } + if resauth.IsNTLM() { + req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiateMessage)) + } else { + req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(negotiateMessage)) + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + + // receive challenge? + resauth = authheader(res.Header.Values("Www-Authenticate")) + challengeMessage, err := resauth.GetData() + if err != nil { + return nil, err + } + if !(resauth.IsNegotiate() || resauth.IsNTLM()) || len(challengeMessage) == 0 { + // Negotiation failed, let client deal with response + return res, nil + } + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + // send authenticate + authenticateMessage, err := ProcessChallenge(challengeMessage, u, p) + if err != nil { + return nil, err + } + if resauth.IsNTLM() { + req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(authenticateMessage)) + } else { + req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(authenticateMessage)) + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + return rt.RoundTrip(req) + } + + return res, err +} diff --git a/vendor/github.com/Azure/go-ntlmssp/nlmp.go b/vendor/github.com/Azure/go-ntlmssp/nlmp.go new file mode 100644 index 00000000000..1e65abe8b53 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/nlmp.go @@ -0,0 +1,51 @@ +// Package ntlmssp provides NTLM/Negotiate authentication over HTTP +// +// Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx, +// implementation hints from http://davenport.sourceforge.net/ntlm.html . +// This package only implements authentication, no key exchange or encryption. It +// only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. +// This package implements NTLMv2. +package ntlmssp + +import ( + "crypto/hmac" + "crypto/md5" + "golang.org/x/crypto/md4" + "strings" +) + +func getNtlmV2Hash(password, username, target string) []byte { + return hmacMd5(getNtlmHash(password), toUnicode(strings.ToUpper(username)+target)) +} + +func getNtlmHash(password string) []byte { + hash := md4.New() + hash.Write(toUnicode(password)) + return hash.Sum(nil) +} + +func computeNtlmV2Response(ntlmV2Hash, serverChallenge, clientChallenge, + timestamp, targetInfo []byte) []byte { + + temp := []byte{1, 1, 0, 0, 0, 0, 0, 0} + temp = append(temp, timestamp...) + temp = append(temp, clientChallenge...) + temp = append(temp, 0, 0, 0, 0) + temp = append(temp, targetInfo...) + temp = append(temp, 0, 0, 0, 0) + + NTProofStr := hmacMd5(ntlmV2Hash, serverChallenge, temp) + return append(NTProofStr, temp...) +} + +func computeLmV2Response(ntlmV2Hash, serverChallenge, clientChallenge []byte) []byte { + return append(hmacMd5(ntlmV2Hash, serverChallenge, clientChallenge), clientChallenge...) +} + +func hmacMd5(key []byte, data ...[]byte) []byte { + mac := hmac.New(md5.New, key) + for _, d := range data { + mac.Write(d) + } + return mac.Sum(nil) +} diff --git a/vendor/github.com/Azure/go-ntlmssp/unicode.go b/vendor/github.com/Azure/go-ntlmssp/unicode.go new file mode 100644 index 00000000000..7b4f47163d0 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/unicode.go @@ -0,0 +1,29 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "errors" + "unicode/utf16" +) + +// helper func's for dealing with Windows Unicode (UTF16LE) + +func fromUnicode(d []byte) (string, error) { + if len(d)%2 > 0 { + return "", errors.New("Unicode (UTF 16 LE) specified, but uneven data length") + } + s := make([]uint16, len(d)/2) + err := binary.Read(bytes.NewReader(d), binary.LittleEndian, &s) + if err != nil { + return "", err + } + return string(utf16.Decode(s)), nil +} + +func toUnicode(s string) []byte { + uints := utf16.Encode([]rune(s)) + b := bytes.Buffer{} + binary.Write(&b, binary.LittleEndian, &uints) + return b.Bytes() +} diff --git a/vendor/github.com/Azure/go-ntlmssp/varfield.go b/vendor/github.com/Azure/go-ntlmssp/varfield.go new file mode 100644 index 00000000000..15f9aa113d8 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/varfield.go @@ -0,0 +1,40 @@ +package ntlmssp + +import ( + "errors" +) + +type varField struct { + Len uint16 + MaxLen uint16 + BufferOffset uint32 +} + +func (f varField) ReadFrom(buffer []byte) ([]byte, error) { + if len(buffer) < int(f.BufferOffset+uint32(f.Len)) { + return nil, errors.New("Error reading data, varField extends beyond buffer") + } + return buffer[f.BufferOffset : f.BufferOffset+uint32(f.Len)], nil +} + +func (f varField) ReadStringFrom(buffer []byte, unicode bool) (string, error) { + d, err := f.ReadFrom(buffer) + if err != nil { + return "", err + } + if unicode { // UTF-16LE encoding scheme + return fromUnicode(d) + } + // OEM encoding, close enough to ASCII, since no code page is specified + return string(d), err +} + +func newVarField(ptr *int, fieldsize int) varField { + f := varField{ + Len: uint16(fieldsize), + MaxLen: uint16(fieldsize), + BufferOffset: uint32(*ptr), + } + *ptr += fieldsize + return f +} diff --git a/vendor/github.com/Azure/go-ntlmssp/version.go b/vendor/github.com/Azure/go-ntlmssp/version.go new file mode 100644 index 00000000000..6d848921244 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/version.go @@ -0,0 +1,20 @@ +package ntlmssp + +// Version is a struct representing https://msdn.microsoft.com/en-us/library/cc236654.aspx +type Version struct { + ProductMajorVersion uint8 + ProductMinorVersion uint8 + ProductBuild uint16 + _ [3]byte + NTLMRevisionCurrent uint8 +} + +// DefaultVersion returns a Version with "sensible" defaults (Windows 7) +func DefaultVersion() Version { + return Version{ + ProductMajorVersion: 6, + ProductMinorVersion: 1, + ProductBuild: 7601, + NTLMRevisionCurrent: 15, + } +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/.travis.yml b/vendor/github.com/ChrisTrenkamp/goxpath/.travis.yml new file mode 100644 index 00000000000..53c84e840de --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.6 + +before_install: + - go get -u github.com/ChrisTrenkamp/goxpath + - go get -u github.com/ChrisTrenkamp/goxpath/cmd/goxpath + - go get -u github.com/wadey/gocovmerge + +script: + - go list -f '{{if gt (len .TestGoFiles) 0}}"go test -covermode count -coverprofile {{.Name}}.coverprofile -coverpkg ./... {{.ImportPath}}"{{end}}' ./... | xargs -I {} bash -c {} + - gocovmerge `ls *.coverprofile` > coverage.txt + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/LICENSE b/vendor/github.com/ChrisTrenkamp/goxpath/LICENSE new file mode 100644 index 00000000000..2afe88a6155 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 ChrisTrenkamp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/README.md b/vendor/github.com/ChrisTrenkamp/goxpath/README.md new file mode 100644 index 00000000000..49407dd70d8 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/README.md @@ -0,0 +1 @@ +# Deprecated. Use [xsel](https://github.com/ChrisTrenkamp/xsel) instead. diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/coverage.sh b/vendor/github.com/ChrisTrenkamp/goxpath/coverage.sh new file mode 100644 index 00000000000..0e35d7de751 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/coverage.sh @@ -0,0 +1,17 @@ +#!/bin/bash +cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +go get github.com/ChrisTrenkamp/goxpath/cmd/goxpath +if [ $? != 0 ]; then + exit 1 +fi +go test >/dev/null 2>&1 +if [ $? != 0 ]; then + go test + exit 1 +fi +gometalinter --deadline=1m ./... +go list -f '{{if gt (len .TestGoFiles) 0}}"go test -covermode count -coverprofile {{.Name}}.coverprofile -coverpkg ./... {{.ImportPath}}"{{end}} >/dev/null' ./... | xargs -I {} bash -c {} 2>/dev/null +gocovmerge `ls *.coverprofile` > coverage.txt +go tool cover -html=coverage.txt -o coverage.html +firefox coverage.html +rm coverage.html coverage.txt *.coverprofile diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go b/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go new file mode 100644 index 00000000000..189ebb9c2fa --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go @@ -0,0 +1,117 @@ +package goxpath + +import ( + "encoding/xml" + "fmt" + + "github.com/ChrisTrenkamp/goxpath/internal/execxp" + "github.com/ChrisTrenkamp/goxpath/parser" + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//Opts defines namespace mappings and custom functions for XPath expressions. +type Opts struct { + NS map[string]string + Funcs map[xml.Name]tree.Wrap + Vars map[string]tree.Result +} + +//FuncOpts is a function wrapper for Opts. +type FuncOpts func(*Opts) + +//XPathExec is the XPath executor, compiled from an XPath string +type XPathExec struct { + n *parser.Node +} + +//Parse parses the XPath expression, xp, returning an XPath executor. +func Parse(xp string) (XPathExec, error) { + n, err := parser.Parse(xp) + return XPathExec{n: n}, err +} + +//MustParse is like Parse, but panics instead of returning an error. +func MustParse(xp string) XPathExec { + ret, err := Parse(xp) + if err != nil { + panic(err) + } + return ret +} + +//Exec executes the XPath expression, xp, against the tree, t, with the +//namespace mappings, ns, and returns the result as a stringer. +func (xp XPathExec) Exec(t tree.Node, opts ...FuncOpts) (tree.Result, error) { + o := &Opts{ + NS: make(map[string]string), + Funcs: make(map[xml.Name]tree.Wrap), + Vars: make(map[string]tree.Result), + } + for _, i := range opts { + i(o) + } + return execxp.Exec(xp.n, t, o.NS, o.Funcs, o.Vars) +} + +//ExecBool is like Exec, except it will attempt to convert the result to its boolean value. +func (xp XPathExec) ExecBool(t tree.Node, opts ...FuncOpts) (bool, error) { + res, err := xp.Exec(t, opts...) + if err != nil { + return false, err + } + + b, ok := res.(tree.IsBool) + if !ok { + return false, fmt.Errorf("Cannot convert result to a boolean") + } + + return bool(b.Bool()), nil +} + +//ExecNum is like Exec, except it will attempt to convert the result to its number value. +func (xp XPathExec) ExecNum(t tree.Node, opts ...FuncOpts) (float64, error) { + res, err := xp.Exec(t, opts...) + if err != nil { + return 0, err + } + + n, ok := res.(tree.IsNum) + if !ok { + return 0, fmt.Errorf("Cannot convert result to a number") + } + + return float64(n.Num()), nil +} + +//ExecNode is like Exec, except it will attempt to return the result as a node-set. +func (xp XPathExec) ExecNode(t tree.Node, opts ...FuncOpts) (tree.NodeSet, error) { + res, err := xp.Exec(t, opts...) + if err != nil { + return nil, err + } + + n, ok := res.(tree.NodeSet) + if !ok { + return nil, fmt.Errorf("Cannot convert result to a node-set") + } + + return n, nil +} + +//MustExec is like Exec, but panics instead of returning an error. +func (xp XPathExec) MustExec(t tree.Node, opts ...FuncOpts) tree.Result { + res, err := xp.Exec(t, opts...) + if err != nil { + panic(err) + } + return res +} + +//ParseExec parses the XPath string, xpstr, and runs Exec. +func ParseExec(xpstr string, t tree.Node, opts ...FuncOpts) (tree.Result, error) { + xp, err := Parse(xpstr) + if err != nil { + return nil, err + } + return xp.Exec(t, opts...) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go new file mode 100644 index 00000000000..7b32515683e --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go @@ -0,0 +1,27 @@ +package execxp + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/parser" + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//Exec executes the XPath expression, xp, against the tree, t, with the +//namespace mappings, ns. +func Exec(n *parser.Node, t tree.Node, ns map[string]string, fns map[xml.Name]tree.Wrap, v map[string]tree.Result) (tree.Result, error) { + f := xpFilt{ + t: t, + ns: ns, + ctx: tree.NodeSet{t}, + fns: fns, + variables: v, + } + + return exec(&f, n) +} + +func exec(f *xpFilt, n *parser.Node) (tree.Result, error) { + err := xfExec(f, n) + return f.ctx, err +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go new file mode 100644 index 00000000000..058f79a0bd6 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go @@ -0,0 +1,307 @@ +package findutil + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/parser/pathexpr" + "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/xconst" +) + +const ( + wildcard = "*" +) + +type findFunc func(tree.Node, *pathexpr.PathExpr, *[]tree.Node) + +var findMap = map[string]findFunc{ + xconst.AxisAncestor: findAncestor, + xconst.AxisAncestorOrSelf: findAncestorOrSelf, + xconst.AxisAttribute: findAttribute, + xconst.AxisChild: findChild, + xconst.AxisDescendent: findDescendent, + xconst.AxisDescendentOrSelf: findDescendentOrSelf, + xconst.AxisFollowing: findFollowing, + xconst.AxisFollowingSibling: findFollowingSibling, + xconst.AxisNamespace: findNamespace, + xconst.AxisParent: findParent, + xconst.AxisPreceding: findPreceding, + xconst.AxisPrecedingSibling: findPrecedingSibling, + xconst.AxisSelf: findSelf, +} + +//Find finds nodes based on the pathexpr.PathExpr +func Find(x tree.Node, p pathexpr.PathExpr) []tree.Node { + ret := []tree.Node{} + + if p.Axis == "" { + findChild(x, &p, &ret) + return ret + } + + f := findMap[p.Axis] + f(x, &p, &ret) + + return ret +} + +func findAncestor(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + + addNode(x.GetParent(), p, ret) + findAncestor(x.GetParent(), p, ret) +} + +func findAncestorOrSelf(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + findSelf(x, p, ret) + findAncestor(x, p, ret) +} + +func findAttribute(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if ele, ok := x.(tree.Elem); ok { + for _, i := range ele.GetAttrs() { + addNode(i, p, ret) + } + } +} + +func findChild(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if ele, ok := x.(tree.Elem); ok { + ch := ele.GetChildren() + for i := range ch { + addNode(ch[i], p, ret) + } + } +} + +func findDescendent(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if ele, ok := x.(tree.Elem); ok { + ch := ele.GetChildren() + for i := range ch { + addNode(ch[i], p, ret) + findDescendent(ch[i], p, ret) + } + } +} + +func findDescendentOrSelf(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + findSelf(x, p, ret) + findDescendent(x, p, ret) +} + +func findFollowing(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + par := x.GetParent() + ch := par.GetChildren() + i := 0 + for x != ch[i] { + i++ + } + i++ + for i < len(ch) { + findDescendentOrSelf(ch[i], p, ret) + i++ + } + findFollowing(par, p, ret) +} + +func findFollowingSibling(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + par := x.GetParent() + ch := par.GetChildren() + i := 0 + for x != ch[i] { + i++ + } + i++ + for i < len(ch) { + findSelf(ch[i], p, ret) + i++ + } +} + +func findNamespace(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if ele, ok := x.(tree.NSElem); ok { + ns := tree.BuildNS(ele) + for _, i := range ns { + addNode(i, p, ret) + } + } +} + +func findParent(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() != tree.NtRoot { + addNode(x.GetParent(), p, ret) + } +} + +func findPreceding(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + par := x.GetParent() + ch := par.GetChildren() + i := len(ch) - 1 + for x != ch[i] { + i-- + } + i-- + for i >= 0 { + findDescendentOrSelf(ch[i], p, ret) + i-- + } + findPreceding(par, p, ret) +} + +func findPrecedingSibling(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + par := x.GetParent() + ch := par.GetChildren() + i := len(ch) - 1 + for x != ch[i] { + i-- + } + i-- + for i >= 0 { + findSelf(ch[i], p, ret) + i-- + } +} + +func findSelf(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + addNode(x, p, ret) +} + +func addNode(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + add := false + tok := x.GetToken() + + switch x.GetNodeType() { + case tree.NtAttr: + add = evalAttr(p, tok.(xml.Attr)) + case tree.NtChd: + add = evalChd(p) + case tree.NtComm: + add = evalComm(p) + case tree.NtElem, tree.NtRoot: + add = evalEle(p, tok.(xml.StartElement)) + case tree.NtNs: + add = evalNS(p, tok.(xml.Attr)) + case tree.NtPi: + add = evalPI(p) + } + + if add { + *ret = append(*ret, x) + } +} + +func evalAttr(p *pathexpr.PathExpr, a xml.Attr) bool { + if p.NodeType == "" { + if p.Name.Space != wildcard { + if a.Name.Space != p.NS[p.Name.Space] { + return false + } + } + + if p.Name.Local == wildcard && p.Axis == xconst.AxisAttribute { + return true + } + + if p.Name.Local == a.Name.Local { + return true + } + } else { + if p.NodeType == xconst.NodeTypeNode { + return true + } + } + + return false +} + +func evalChd(p *pathexpr.PathExpr) bool { + if p.NodeType == xconst.NodeTypeText || p.NodeType == xconst.NodeTypeNode { + return true + } + + return false +} + +func evalComm(p *pathexpr.PathExpr) bool { + if p.NodeType == xconst.NodeTypeComment || p.NodeType == xconst.NodeTypeNode { + return true + } + + return false +} + +func evalEle(p *pathexpr.PathExpr, ele xml.StartElement) bool { + if p.NodeType == "" { + return checkNameAndSpace(p, ele) + } + + if p.NodeType == xconst.NodeTypeNode { + return true + } + + return false +} + +func checkNameAndSpace(p *pathexpr.PathExpr, ele xml.StartElement) bool { + if p.Name.Local == wildcard && p.Name.Space == "" { + return true + } + + if p.Name.Space != wildcard && ele.Name.Space != p.NS[p.Name.Space] { + return false + } + + if p.Name.Local == wildcard && p.Axis != xconst.AxisAttribute && p.Axis != xconst.AxisNamespace { + return true + } + + if p.Name.Local == ele.Name.Local { + return true + } + + return false +} + +func evalNS(p *pathexpr.PathExpr, ns xml.Attr) bool { + if p.NodeType == "" { + if p.Name.Space != "" && p.Name.Space != wildcard { + return false + } + + if p.Name.Local == wildcard && p.Axis == xconst.AxisNamespace { + return true + } + + if p.Name.Local == ns.Name.Local { + return true + } + } else { + if p.NodeType == xconst.NodeTypeNode { + return true + } + } + + return false +} + +func evalPI(p *pathexpr.PathExpr) bool { + if p.NodeType == xconst.NodeTypeProcInst || p.NodeType == xconst.NodeTypeNode { + return true + } + + return false +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/boolfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/boolfns.go new file mode 100644 index 00000000000..5480ebcc4e9 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/boolfns.go @@ -0,0 +1,74 @@ +package intfns + +import ( + "fmt" + + "github.com/ChrisTrenkamp/goxpath/tree" + "golang.org/x/text/language" +) + +func boolean(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + if b, ok := args[0].(tree.IsBool); ok { + return b.Bool(), nil + } + + return nil, fmt.Errorf("Cannot convert object to a boolean") +} + +func not(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + b, ok := args[0].(tree.IsBool) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a boolean") + } + return !b.Bool(), nil +} + +func _true(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Bool(true), nil +} + +func _false(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Bool(false), nil +} + +func lang(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + lStr := args[0].String() + + var n tree.Elem + + for _, i := range c.NodeSet { + if i.GetNodeType() == tree.NtElem { + n = i.(tree.Elem) + } else { + n = i.GetParent() + } + + for n.GetNodeType() != tree.NtRoot { + if attr, ok := tree.GetAttribute(n, "lang", tree.XMLSpace); ok { + return checkLang(lStr, attr.Value), nil + } + n = n.GetParent() + } + } + + return tree.Bool(false), nil +} + +func checkLang(srcStr, targStr string) tree.Bool { + srcLang := language.Make(srcStr) + srcRegion, srcRegionConf := srcLang.Region() + + targLang := language.Make(targStr) + targRegion, targRegionConf := targLang.Region() + + if srcRegionConf == language.Exact && targRegionConf != language.Exact { + return tree.Bool(false) + } + + if srcRegion != targRegion && srcRegionConf == language.Exact && targRegionConf == language.Exact { + return tree.Bool(false) + } + + _, _, conf := language.NewMatcher([]language.Tag{srcLang}).Match(targLang) + return tree.Bool(conf >= language.High) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/intfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/intfns.go new file mode 100644 index 00000000000..8b09fa99f5a --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/intfns.go @@ -0,0 +1,41 @@ +package intfns + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//BuiltIn contains the list of built-in XPath functions +var BuiltIn = map[xml.Name]tree.Wrap{ + //String functions + {Local: "string"}: {Fn: _string, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "concat"}: {Fn: concat, NArgs: 3, LastArgOpt: tree.Variadic}, + {Local: "starts-with"}: {Fn: startsWith, NArgs: 2}, + {Local: "contains"}: {Fn: contains, NArgs: 2}, + {Local: "substring-before"}: {Fn: substringBefore, NArgs: 2}, + {Local: "substring-after"}: {Fn: substringAfter, NArgs: 2}, + {Local: "substring"}: {Fn: substring, NArgs: 3, LastArgOpt: tree.Optional}, + {Local: "string-length"}: {Fn: stringLength, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "normalize-space"}: {Fn: normalizeSpace, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "translate"}: {Fn: translate, NArgs: 3}, + //Node set functions + {Local: "last"}: {Fn: last}, + {Local: "position"}: {Fn: position}, + {Local: "count"}: {Fn: count, NArgs: 1}, + {Local: "local-name"}: {Fn: localName, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "namespace-uri"}: {Fn: namespaceURI, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "name"}: {Fn: name, NArgs: 1, LastArgOpt: tree.Optional}, + //boolean functions + {Local: "boolean"}: {Fn: boolean, NArgs: 1}, + {Local: "not"}: {Fn: not, NArgs: 1}, + {Local: "true"}: {Fn: _true}, + {Local: "false"}: {Fn: _false}, + {Local: "lang"}: {Fn: lang, NArgs: 1}, + //number functions + {Local: "number"}: {Fn: number, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "sum"}: {Fn: sum, NArgs: 1}, + {Local: "floor"}: {Fn: floor, NArgs: 1}, + {Local: "ceiling"}: {Fn: ceiling, NArgs: 1}, + {Local: "round"}: {Fn: round, NArgs: 1}, +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/nodesetfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/nodesetfns.go new file mode 100644 index 00000000000..7286a304259 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/nodesetfns.go @@ -0,0 +1,131 @@ +package intfns + +import ( + "encoding/xml" + "fmt" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +func last(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Num(c.Size), nil +} + +func position(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Num(c.Pos), nil +} + +func count(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + n, ok := args[0].(tree.NodeSet) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + return tree.Num(len(n)), nil +} + +func localName(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var n tree.NodeSet + ok := true + if len(args) == 1 { + n, ok = args[0].(tree.NodeSet) + } else { + n = c.NodeSet + } + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + ret := "" + if len(n) == 0 { + return tree.String(ret), nil + } + node := n[0] + + tok := node.GetToken() + + switch node.GetNodeType() { + case tree.NtElem: + ret = tok.(xml.StartElement).Name.Local + case tree.NtAttr: + ret = tok.(xml.Attr).Name.Local + case tree.NtPi: + ret = tok.(xml.ProcInst).Target + } + + return tree.String(ret), nil +} + +func namespaceURI(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var n tree.NodeSet + ok := true + if len(args) == 1 { + n, ok = args[0].(tree.NodeSet) + } else { + n = c.NodeSet + } + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + ret := "" + if len(n) == 0 { + return tree.String(ret), nil + } + node := n[0] + + tok := node.GetToken() + + switch node.GetNodeType() { + case tree.NtElem: + ret = tok.(xml.StartElement).Name.Space + case tree.NtAttr: + ret = tok.(xml.Attr).Name.Space + } + + return tree.String(ret), nil +} + +func name(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var n tree.NodeSet + ok := true + if len(args) == 1 { + n, ok = args[0].(tree.NodeSet) + } else { + n = c.NodeSet + } + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + ret := "" + if len(n) == 0 { + return tree.String(ret), nil + } + node := n[0] + + switch node.GetNodeType() { + case tree.NtElem: + t := node.GetToken().(xml.StartElement) + space := "" + + if t.Name.Space != "" { + space = fmt.Sprintf("{%s}", t.Name.Space) + } + + ret = fmt.Sprintf("%s%s", space, t.Name.Local) + case tree.NtAttr: + t := node.GetToken().(xml.Attr) + space := "" + + if t.Name.Space != "" { + space = fmt.Sprintf("{%s}", t.Name.Space) + } + + ret = fmt.Sprintf("%s%s", space, t.Name.Local) + case tree.NtPi: + ret = fmt.Sprintf("%s", node.GetToken().(xml.ProcInst).Target) + } + + return tree.String(ret), nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/numfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/numfns.go new file mode 100644 index 00000000000..464403f96b9 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/numfns.go @@ -0,0 +1,71 @@ +package intfns + +import ( + "fmt" + "math" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +func number(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + if b, ok := args[0].(tree.IsNum); ok { + return b.Num(), nil + } + + return nil, fmt.Errorf("Cannot convert object to a number") +} + +func sum(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + n, ok := args[0].(tree.NodeSet) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + ret := 0.0 + for _, i := range n { + ret += float64(tree.GetNodeNum(i)) + } + + return tree.Num(ret), nil +} + +func floor(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + n, ok := args[0].(tree.IsNum) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a number") + } + + return tree.Num(math.Floor(float64(n.Num()))), nil +} + +func ceiling(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + n, ok := args[0].(tree.IsNum) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a number") + } + + return tree.Num(math.Ceil(float64(n.Num()))), nil +} + +func round(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + isn, ok := args[0].(tree.IsNum) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a number") + } + + n := isn.Num() + + if math.IsNaN(float64(n)) || math.IsInf(float64(n), 0) { + return n, nil + } + + if n < -0.5 { + n = tree.Num(int(n - 0.5)) + } else if n > 0.5 { + n = tree.Num(int(n + 0.5)) + } else { + n = 0 + } + + return n, nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/stringfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/stringfns.go new file mode 100644 index 00000000000..4e6a73a0998 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/stringfns.go @@ -0,0 +1,141 @@ +package intfns + +import ( + "math" + "regexp" + "strings" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +func _string(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + if len(args) == 1 { + return tree.String(args[0].String()), nil + } + + return tree.String(c.NodeSet.String()), nil +} + +func concat(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + ret := "" + + for _, i := range args { + ret += i.String() + } + + return tree.String(ret), nil +} + +func startsWith(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Bool(strings.Index(args[0].String(), args[1].String()) == 0), nil +} + +func contains(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Bool(strings.Contains(args[0].String(), args[1].String())), nil +} + +func substringBefore(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + ind := strings.Index(args[0].String(), args[1].String()) + if ind == -1 { + return tree.String(""), nil + } + + return tree.String(args[0].String()[:ind]), nil +} + +func substringAfter(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + ind := strings.Index(args[0].String(), args[1].String()) + if ind == -1 { + return tree.String(""), nil + } + + return tree.String(args[0].String()[ind+len(args[1].String()):]), nil +} + +func substring(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + str := args[0].String() + + bNum, bErr := round(c, args[1]) + if bErr != nil { + return nil, bErr + } + + b := bNum.(tree.Num).Num() + + if float64(b-1) >= float64(len(str)) || math.IsNaN(float64(b)) { + return tree.String(""), nil + } + + if len(args) == 2 { + if b <= 1 { + b = 1 + } + + return tree.String(str[int(b)-1:]), nil + } + + eNum, eErr := round(c, args[2]) + if eErr != nil { + return nil, eErr + } + + e := eNum.(tree.Num).Num() + + if e <= 0 || math.IsNaN(float64(e)) || (math.IsInf(float64(b), 0) && math.IsInf(float64(e), 0)) { + return tree.String(""), nil + } + + if b <= 1 { + e = b + e - 1 + b = 1 + } + + if float64(b+e-1) >= float64(len(str)) { + e = tree.Num(len(str)) - b + 1 + } + + return tree.String(str[int(b)-1 : int(b+e)-1]), nil +} + +func stringLength(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var str string + if len(args) == 1 { + str = args[0].String() + } else { + str = c.NodeSet.String() + } + + return tree.Num(len(str)), nil +} + +var spaceTrim = regexp.MustCompile(`\s+`) + +func normalizeSpace(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var str string + if len(args) == 1 { + str = args[0].String() + } else { + str = c.NodeSet.String() + } + + str = strings.TrimSpace(str) + + return tree.String(spaceTrim.ReplaceAllString(str, " ")), nil +} + +func translate(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + ret := args[0].String() + src := args[1].String() + repl := args[2].String() + + for i := range src { + r := "" + if i < len(repl) { + r = string(repl[i]) + } + + ret = strings.Replace(ret, string(src[i]), r, -1) + } + + return tree.String(ret), nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/operators.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/operators.go new file mode 100644 index 00000000000..d41ba7fcc7b --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/operators.go @@ -0,0 +1,212 @@ +package execxp + +import ( + "fmt" + "math" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +func bothNodeOperator(left tree.NodeSet, right tree.NodeSet, f *xpFilt, op string) error { + var err error + for _, l := range left { + for _, r := range right { + lStr := l.ResValue() + rStr := r.ResValue() + + if eqOps[op] { + err = equalsOperator(tree.String(lStr), tree.String(rStr), f, op) + if err == nil && f.ctx.String() == tree.True { + return nil + } + } else { + err = numberOperator(tree.String(lStr), tree.String(rStr), f, op) + if err == nil && f.ctx.String() == tree.True { + return nil + } + } + } + } + + f.ctx = tree.Bool(false) + + return nil +} + +func leftNodeOperator(left tree.NodeSet, right tree.Result, f *xpFilt, op string) error { + var err error + for _, l := range left { + lStr := l.ResValue() + + if eqOps[op] { + err = equalsOperator(tree.String(lStr), right, f, op) + if err == nil && f.ctx.String() == tree.True { + return nil + } + } else { + err = numberOperator(tree.String(lStr), right, f, op) + if err == nil && f.ctx.String() == tree.True { + return nil + } + } + } + + f.ctx = tree.Bool(false) + + return nil +} + +func rightNodeOperator(left tree.Result, right tree.NodeSet, f *xpFilt, op string) error { + var err error + for _, r := range right { + rStr := r.ResValue() + + if eqOps[op] { + err = equalsOperator(left, tree.String(rStr), f, op) + if err == nil && f.ctx.String() == "true" { + return nil + } + } else { + err = numberOperator(left, tree.String(rStr), f, op) + if err == nil && f.ctx.String() == "true" { + return nil + } + } + } + + f.ctx = tree.Bool(false) + + return nil +} + +func equalsOperator(left, right tree.Result, f *xpFilt, op string) error { + _, lOK := left.(tree.Bool) + _, rOK := right.(tree.Bool) + + if lOK || rOK { + lTest, lt := left.(tree.IsBool) + rTest, rt := right.(tree.IsBool) + if !lt || !rt { + return fmt.Errorf("Cannot convert argument to boolean") + } + + if op == "=" { + f.ctx = tree.Bool(lTest.Bool() == rTest.Bool()) + } else { + f.ctx = tree.Bool(lTest.Bool() != rTest.Bool()) + } + + return nil + } + + _, lOK = left.(tree.Num) + _, rOK = right.(tree.Num) + if lOK || rOK { + return numberOperator(left, right, f, op) + } + + lStr := left.String() + rStr := right.String() + + if op == "=" { + f.ctx = tree.Bool(lStr == rStr) + } else { + f.ctx = tree.Bool(lStr != rStr) + } + + return nil +} + +func numberOperator(left, right tree.Result, f *xpFilt, op string) error { + lt, lOK := left.(tree.IsNum) + rt, rOK := right.(tree.IsNum) + if !lOK || !rOK { + return fmt.Errorf("Cannot convert data type to number") + } + + ln, rn := lt.Num(), rt.Num() + + switch op { + case "*": + f.ctx = ln * rn + case "div": + if rn != 0 { + f.ctx = ln / rn + } else { + if ln == 0 { + f.ctx = tree.Num(math.NaN()) + } else { + if math.Signbit(float64(ln)) == math.Signbit(float64(rn)) { + f.ctx = tree.Num(math.Inf(1)) + } else { + f.ctx = tree.Num(math.Inf(-1)) + } + } + } + case "mod": + f.ctx = tree.Num(int(ln) % int(rn)) + case "+": + f.ctx = ln + rn + case "-": + f.ctx = ln - rn + case "=": + f.ctx = tree.Bool(ln == rn) + case "!=": + f.ctx = tree.Bool(ln != rn) + case "<": + f.ctx = tree.Bool(ln < rn) + case "<=": + f.ctx = tree.Bool(ln <= rn) + case ">": + f.ctx = tree.Bool(ln > rn) + case ">=": + f.ctx = tree.Bool(ln >= rn) + } + + return nil +} + +func andOrOperator(left, right tree.Result, f *xpFilt, op string) error { + lt, lOK := left.(tree.IsBool) + rt, rOK := right.(tree.IsBool) + + if !lOK || !rOK { + return fmt.Errorf("Cannot convert argument to boolean") + } + + l, r := lt.Bool(), rt.Bool() + + if op == "and" { + f.ctx = l && r + } else { + f.ctx = l || r + } + + return nil +} + +func unionOperator(left, right tree.Result, f *xpFilt, op string) error { + lNode, lOK := left.(tree.NodeSet) + rNode, rOK := right.(tree.NodeSet) + + if !lOK || !rOK { + return fmt.Errorf("Cannot convert data type to node-set") + } + + uniq := make(map[int]tree.Node) + for _, i := range lNode { + uniq[i.Pos()] = i + } + for _, i := range rNode { + uniq[i.Pos()] = i + } + + res := make(tree.NodeSet, 0, len(uniq)) + for _, v := range uniq { + res = append(res, v) + } + + f.ctx = res + + return nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go new file mode 100644 index 00000000000..5a78138eef4 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go @@ -0,0 +1,396 @@ +package execxp + +import ( + "encoding/xml" + "fmt" + "strconv" + "strings" + + "github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil" + "github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns" + "github.com/ChrisTrenkamp/goxpath/internal/xsort" + "github.com/ChrisTrenkamp/goxpath/lexer" + "github.com/ChrisTrenkamp/goxpath/parser" + "github.com/ChrisTrenkamp/goxpath/parser/pathexpr" + "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/xconst" +) + +type xpFilt struct { + t tree.Node + ctx tree.Result + expr pathexpr.PathExpr + ns map[string]string + ctxPos int + ctxSize int + proxPos map[int]int + fns map[xml.Name]tree.Wrap + variables map[string]tree.Result +} + +type xpExecFn func(*xpFilt, string) + +var xpFns = map[lexer.XItemType]xpExecFn{ + lexer.XItemAbsLocPath: xfAbsLocPath, + lexer.XItemAbbrAbsLocPath: xfAbbrAbsLocPath, + lexer.XItemRelLocPath: xfRelLocPath, + lexer.XItemAbbrRelLocPath: xfAbbrRelLocPath, + lexer.XItemAxis: xfAxis, + lexer.XItemAbbrAxis: xfAbbrAxis, + lexer.XItemNCName: xfNCName, + lexer.XItemQName: xfQName, + lexer.XItemNodeType: xfNodeType, + lexer.XItemProcLit: xfProcInstLit, + lexer.XItemStrLit: xfStrLit, + lexer.XItemNumLit: xfNumLit, +} + +func xfExec(f *xpFilt, n *parser.Node) (err error) { + for n != nil { + if fn, ok := xpFns[n.Val.Typ]; ok { + fn(f, n.Val.Val) + n = n.Left + } else if n.Val.Typ == lexer.XItemPredicate { + if err = xfPredicate(f, n.Left); err != nil { + return + } + + n = n.Right + } else if n.Val.Typ == lexer.XItemFunction { + if err = xfFunction(f, n); err != nil { + return + } + + n = n.Right + } else if n.Val.Typ == lexer.XItemOperator { + lf := xpFilt{ + t: f.t, + ns: f.ns, + ctx: f.ctx, + ctxPos: f.ctxPos, + ctxSize: f.ctxSize, + proxPos: f.proxPos, + fns: f.fns, + variables: f.variables, + } + left, err := exec(&lf, n.Left) + if err != nil { + return err + } + + rf := xpFilt{ + t: f.t, + ns: f.ns, + ctx: f.ctx, + fns: f.fns, + variables: f.variables, + } + right, err := exec(&rf, n.Right) + if err != nil { + return err + } + + return xfOperator(left, right, f, n.Val.Val) + } else if n.Val.Typ == lexer.XItemVariable { + if res, ok := f.variables[n.Val.Val]; ok { + f.ctx = res + return nil + } + return fmt.Errorf("Invalid variable '%s'", n.Val.Val) + } else if string(n.Val.Typ) == "" { + n = n.Left + //} else { + // return fmt.Errorf("Cannot process " + string(n.Val.Typ)) + } + } + + return +} + +func xfPredicate(f *xpFilt, n *parser.Node) (err error) { + res := f.ctx.(tree.NodeSet) + newRes := make(tree.NodeSet, 0, len(res)) + + for i := range res { + pf := xpFilt{ + t: f.t, + ns: f.ns, + ctxPos: i, + ctxSize: f.ctxSize, + ctx: tree.NodeSet{res[i]}, + fns: f.fns, + variables: f.variables, + } + + predRes, err := exec(&pf, n) + if err != nil { + return err + } + + ok, err := checkPredRes(predRes, f, res[i]) + if err != nil { + return err + } + + if ok { + newRes = append(newRes, res[i]) + } + } + + f.proxPos = make(map[int]int) + for pos, j := range newRes { + f.proxPos[j.Pos()] = pos + 1 + } + + f.ctx = newRes + f.ctxSize = len(newRes) + + return +} + +func checkPredRes(ret tree.Result, f *xpFilt, node tree.Node) (bool, error) { + if num, ok := ret.(tree.Num); ok { + if float64(f.proxPos[node.Pos()]) == float64(num) { + return true, nil + } + return false, nil + } + + if b, ok := ret.(tree.IsBool); ok { + return bool(b.Bool()), nil + } + + return false, fmt.Errorf("Cannot convert argument to boolean") +} + +func xfFunction(f *xpFilt, n *parser.Node) error { + spl := strings.Split(n.Val.Val, ":") + var name xml.Name + if len(spl) == 1 { + name.Local = spl[0] + } else { + name.Space = f.ns[spl[0]] + name.Local = spl[1] + } + fn, ok := intfns.BuiltIn[name] + if !ok { + fn, ok = f.fns[name] + } + + if ok { + args := []tree.Result{} + param := n.Left + + for param != nil { + pf := xpFilt{ + t: f.t, + ctx: f.ctx, + ns: f.ns, + ctxPos: f.ctxPos, + ctxSize: f.ctxSize, + fns: f.fns, + variables: f.variables, + } + res, err := exec(&pf, param.Left) + if err != nil { + return err + } + + args = append(args, res) + param = param.Right + } + + filt, err := fn.Call(tree.Ctx{NodeSet: f.ctx.(tree.NodeSet), Size: f.ctxSize, Pos: f.ctxPos + 1}, args...) + f.ctx = filt + return err + } + + return fmt.Errorf("Unknown function: %s", n.Val.Val) +} + +var eqOps = map[string]bool{ + "=": true, + "!=": true, +} + +var booleanOps = map[string]bool{ + "=": true, + "!=": true, + "<": true, + "<=": true, + ">": true, + ">=": true, +} + +var numOps = map[string]bool{ + "*": true, + "div": true, + "mod": true, + "+": true, + "-": true, + "=": true, + "!=": true, + "<": true, + "<=": true, + ">": true, + ">=": true, +} + +var andOrOps = map[string]bool{ + "and": true, + "or": true, +} + +func xfOperator(left, right tree.Result, f *xpFilt, op string) error { + if booleanOps[op] { + lNode, lOK := left.(tree.NodeSet) + rNode, rOK := right.(tree.NodeSet) + if lOK && rOK { + return bothNodeOperator(lNode, rNode, f, op) + } + + if lOK { + return leftNodeOperator(lNode, right, f, op) + } + + if rOK { + return rightNodeOperator(left, rNode, f, op) + } + + if eqOps[op] { + return equalsOperator(left, right, f, op) + } + } + + if numOps[op] { + return numberOperator(left, right, f, op) + } + + if andOrOps[op] { + return andOrOperator(left, right, f, op) + } + + //if op == "|" { + return unionOperator(left, right, f, op) + //} + + //return fmt.Errorf("Unknown operator " + op) +} + +func xfAbsLocPath(f *xpFilt, val string) { + i := f.t + for i.GetNodeType() != tree.NtRoot { + i = i.GetParent() + } + f.ctx = tree.NodeSet{i} +} + +func xfAbbrAbsLocPath(f *xpFilt, val string) { + i := f.t + for i.GetNodeType() != tree.NtRoot { + i = i.GetParent() + } + f.ctx = tree.NodeSet{i} + f.expr = abbrPathExpr() + find(f) +} + +func xfRelLocPath(f *xpFilt, val string) { +} + +func xfAbbrRelLocPath(f *xpFilt, val string) { + f.expr = abbrPathExpr() + find(f) +} + +func xfAxis(f *xpFilt, val string) { + f.expr.Axis = val +} + +func xfAbbrAxis(f *xpFilt, val string) { + f.expr.Axis = xconst.AxisAttribute +} + +func xfNCName(f *xpFilt, val string) { + f.expr.Name.Space = val +} + +func xfQName(f *xpFilt, val string) { + f.expr.Name.Local = val + find(f) +} + +func xfNodeType(f *xpFilt, val string) { + f.expr.NodeType = val + find(f) +} + +func xfProcInstLit(f *xpFilt, val string) { + filt := tree.NodeSet{} + for _, i := range f.ctx.(tree.NodeSet) { + if i.GetToken().(xml.ProcInst).Target == val { + filt = append(filt, i) + } + } + f.ctx = filt +} + +func xfStrLit(f *xpFilt, val string) { + f.ctx = tree.String(val) +} + +func xfNumLit(f *xpFilt, val string) { + num, _ := strconv.ParseFloat(val, 64) + f.ctx = tree.Num(num) +} + +func abbrPathExpr() pathexpr.PathExpr { + return pathexpr.PathExpr{ + Name: xml.Name{}, + Axis: xconst.AxisDescendentOrSelf, + NodeType: xconst.NodeTypeNode, + } +} + +func find(f *xpFilt) { + dupFilt := make(map[int]tree.Node) + f.proxPos = make(map[int]int) + + if f.expr.Axis == "" && f.expr.NodeType == "" && f.expr.Name.Space == "" { + if f.expr.Name.Local == "." { + f.expr = pathexpr.PathExpr{ + Name: xml.Name{}, + Axis: xconst.AxisSelf, + NodeType: xconst.NodeTypeNode, + } + } + + if f.expr.Name.Local == ".." { + f.expr = pathexpr.PathExpr{ + Name: xml.Name{}, + Axis: xconst.AxisParent, + NodeType: xconst.NodeTypeNode, + } + } + } + + f.expr.NS = f.ns + + for _, i := range f.ctx.(tree.NodeSet) { + for pos, j := range findutil.Find(i, f.expr) { + dupFilt[j.Pos()] = j + f.proxPos[j.Pos()] = pos + 1 + } + } + + res := make(tree.NodeSet, 0, len(dupFilt)) + for _, i := range dupFilt { + res = append(res, i) + } + + xsort.SortNodes(res) + + f.expr = pathexpr.PathExpr{} + f.ctxSize = len(res) + f.ctx = res +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/xsort/xsort.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/xsort/xsort.go new file mode 100644 index 00000000000..95f57c24950 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/xsort/xsort.go @@ -0,0 +1,20 @@ +package xsort + +import ( + "sort" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +type nodeSort []tree.Node + +func (ns nodeSort) Len() int { return len(ns) } +func (ns nodeSort) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } +func (ns nodeSort) Less(i, j int) bool { + return ns[i].Pos() < ns[j].Pos() +} + +//SortNodes sorts the array by the node document order +func SortNodes(res []tree.Node) { + sort.Sort(nodeSort(res)) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/lexer/lexer.go b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/lexer.go new file mode 100644 index 00000000000..29f53c42abb --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/lexer.go @@ -0,0 +1,419 @@ +package lexer + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +const ( + //XItemError is an error with the parser input + XItemError XItemType = "Error" + //XItemAbsLocPath is an absolute path + XItemAbsLocPath = "Absolute path" + //XItemAbbrAbsLocPath represents an abbreviated absolute path + XItemAbbrAbsLocPath = "Abbreviated absolute path" + //XItemAbbrRelLocPath marks the start of a path expression + XItemAbbrRelLocPath = "Abbreviated relative path" + //XItemRelLocPath represents a relative location path + XItemRelLocPath = "Relative path" + //XItemEndPath marks the end of a path + XItemEndPath = "End path instruction" + //XItemAxis marks an axis specifier of a path + XItemAxis = "Axis" + //XItemAbbrAxis marks an abbreviated axis specifier (just @ at this point) + XItemAbbrAxis = "Abbreviated attribute axis" + //XItemNCName marks a namespace name in a node test + XItemNCName = "Namespace" + //XItemQName marks the local name in an a node test + XItemQName = "Local name" + //XItemNodeType marks a node type in a node test + XItemNodeType = "Node type" + //XItemProcLit marks a processing-instruction literal + XItemProcLit = "processing-instruction" + //XItemFunction marks a function call + XItemFunction = "function" + //XItemArgument marks a function argument + XItemArgument = "function argument" + //XItemEndFunction marks the end of a function + XItemEndFunction = "end of function" + //XItemPredicate marks a predicate in an axis + XItemPredicate = "predicate" + //XItemEndPredicate marks a predicate in an axis + XItemEndPredicate = "end of predicate" + //XItemStrLit marks a string literal + XItemStrLit = "string literal" + //XItemNumLit marks a numeric literal + XItemNumLit = "numeric literal" + //XItemOperator marks an operator + XItemOperator = "operator" + //XItemVariable marks a variable reference + XItemVariable = "variable" +) + +const ( + eof = -(iota + 1) +) + +//XItemType is the parser token types +type XItemType string + +//XItem is the token emitted from the parser +type XItem struct { + Typ XItemType + Val string +} + +type stateFn func(*Lexer) stateFn + +//Lexer lexes out XPath expressions +type Lexer struct { + input string + start int + pos int + width int + items chan XItem +} + +//Lex an XPath expresion on the io.Reader +func Lex(xpath string) chan XItem { + l := &Lexer{ + input: xpath, + items: make(chan XItem), + } + go l.run() + return l.items +} + +func (l *Lexer) run() { + for state := startState; state != nil; { + state = state(l) + } + + if l.peek() != eof { + l.errorf("Malformed XPath expression") + } + + close(l.items) +} + +func (l *Lexer) emit(t XItemType) { + l.items <- XItem{t, l.input[l.start:l.pos]} + l.start = l.pos +} + +func (l *Lexer) emitVal(t XItemType, val string) { + l.items <- XItem{t, val} + l.start = l.pos +} + +func (l *Lexer) next() (r rune) { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + + l.pos += l.width + + return r +} + +func (l *Lexer) ignore() { + l.start = l.pos +} + +func (l *Lexer) backup() { + l.pos -= l.width +} + +func (l *Lexer) peek() rune { + r := l.next() + + l.backup() + return r +} + +func (l *Lexer) peekAt(n int) rune { + if n <= 1 { + return l.peek() + } + + width := 0 + var ret rune + + for count := 0; count < n; count++ { + r, s := utf8.DecodeRuneInString(l.input[l.pos+width:]) + width += s + + if l.pos+width > len(l.input) { + return eof + } + + ret = r + } + + return ret +} + +func (l *Lexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + + l.backup() + return false +} + +func (l *Lexer) acceptRun(valid string) { + for strings.ContainsRune(valid, l.next()) { + } + l.backup() +} + +func (l *Lexer) skip(num int) { + for i := 0; i < num; i++ { + l.next() + } + l.ignore() +} + +func (l *Lexer) skipWS(ig bool) { + for { + n := l.next() + + if n == eof || !unicode.IsSpace(n) { + break + } + } + + l.backup() + + if ig { + l.ignore() + } +} + +func (l *Lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- XItem{ + XItemError, + fmt.Sprintf(format, args...), + } + + return nil +} + +func isElemChar(r rune) bool { + return string(r) != ":" && string(r) != "/" && + (unicode.Is(first, r) || unicode.Is(second, r) || string(r) == "*") && + r != eof +} + +func startState(l *Lexer) stateFn { + l.skipWS(true) + + if string(l.peek()) == "/" { + l.next() + l.ignore() + + if string(l.next()) == "/" { + l.ignore() + return abbrAbsLocPathState + } + + l.backup() + return absLocPathState + } else if string(l.peek()) == `'` || string(l.peek()) == `"` { + if err := getStrLit(l, XItemStrLit); err != nil { + return l.errorf(err.Error()) + } + + if l.peek() != eof { + return startState + } + } else if getNumLit(l) { + l.skipWS(true) + if l.peek() != eof { + return startState + } + } else if string(l.peek()) == "$" { + l.next() + l.ignore() + r := l.peek() + for unicode.Is(first, r) || unicode.Is(second, r) { + l.next() + r = l.peek() + } + tok := l.input[l.start:l.pos] + if len(tok) == 0 { + return l.errorf("Empty variable name") + } + l.emit(XItemVariable) + l.skipWS(true) + if l.peek() != eof { + return startState + } + } else if st := findOperatorState(l); st != nil { + return st + } else { + if isElemChar(l.peek()) { + colons := 0 + + for { + if isElemChar(l.peek()) { + l.next() + } else if string(l.peek()) == ":" { + l.next() + colons++ + } else { + break + } + } + + if string(l.peek()) == "(" && colons <= 1 { + tok := l.input[l.start:l.pos] + err := procFunc(l, tok) + if err != nil { + return l.errorf(err.Error()) + } + + l.skipWS(true) + + if string(l.peek()) == "/" { + l.next() + l.ignore() + + if string(l.next()) == "/" { + l.ignore() + return abbrRelLocPathState + } + + l.backup() + return relLocPathState + } + + return startState + } + + l.pos = l.start + return relLocPathState + } else if string(l.peek()) == "@" { + return relLocPathState + } + } + + return nil +} + +func strPeek(str string, l *Lexer) bool { + for i := 0; i < len(str); i++ { + if string(l.peekAt(i+1)) != string(str[i]) { + return false + } + } + return true +} + +func findOperatorState(l *Lexer) stateFn { + l.skipWS(true) + + switch string(l.peek()) { + case ">", "<", "!": + l.next() + if string(l.peek()) == "=" { + l.next() + } + l.emit(XItemOperator) + return startState + case "|", "+", "-", "*", "=": + l.next() + l.emit(XItemOperator) + return startState + case "(": + l.next() + l.emit(XItemOperator) + for state := startState; state != nil; { + state = state(l) + } + l.skipWS(true) + if string(l.next()) != ")" { + return l.errorf("Missing end )") + } + l.emit(XItemOperator) + return startState + } + + if strPeek("and", l) { + l.next() + l.next() + l.next() + l.emit(XItemOperator) + return startState + } + + if strPeek("or", l) { + l.next() + l.next() + l.emit(XItemOperator) + return startState + } + + if strPeek("mod", l) { + l.next() + l.next() + l.next() + l.emit(XItemOperator) + return startState + } + + if strPeek("div", l) { + l.next() + l.next() + l.next() + l.emit(XItemOperator) + return startState + } + + return nil +} + +func getStrLit(l *Lexer, tok XItemType) error { + q := l.next() + var r rune + + l.ignore() + + for r != q { + r = l.next() + if r == eof { + return fmt.Errorf("Unexpected end of string literal.") + } + } + + l.backup() + l.emit(tok) + l.next() + l.ignore() + + return nil +} + +func getNumLit(l *Lexer) bool { + const dig = "0123456789" + l.accept("-") + start := l.pos + l.acceptRun(dig) + + if l.pos == start { + return false + } + + if l.accept(".") { + l.acceptRun(dig) + } + + l.emit(XItemNumLit) + return true +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go new file mode 100644 index 00000000000..dac30d6b653 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go @@ -0,0 +1,219 @@ +package lexer + +import ( + "fmt" + + "github.com/ChrisTrenkamp/goxpath/xconst" +) + +func absLocPathState(l *Lexer) stateFn { + l.emit(XItemAbsLocPath) + return stepState +} + +func abbrAbsLocPathState(l *Lexer) stateFn { + l.emit(XItemAbbrAbsLocPath) + return stepState +} + +func relLocPathState(l *Lexer) stateFn { + l.emit(XItemRelLocPath) + return stepState +} + +func abbrRelLocPathState(l *Lexer) stateFn { + l.emit(XItemAbbrRelLocPath) + return stepState +} + +func stepState(l *Lexer) stateFn { + l.skipWS(true) + r := l.next() + + for isElemChar(r) { + r = l.next() + } + + l.backup() + tok := l.input[l.start:l.pos] + + state, err := parseSeparators(l, tok) + if err != nil { + return l.errorf(err.Error()) + } + + return getNextPathState(l, state) +} + +func parseSeparators(l *Lexer, tok string) (XItemType, error) { + l.skipWS(false) + state := XItemType(XItemQName) + r := l.peek() + + if string(r) == ":" && string(l.peekAt(2)) == ":" { + var err error + if state, err = getAxis(l, tok); err != nil { + return state, fmt.Errorf(err.Error()) + } + } else if string(r) == ":" { + state = XItemNCName + l.emitVal(state, tok) + l.skip(1) + l.skipWS(true) + } else if string(r) == "@" { + state = XItemAbbrAxis + l.emitVal(state, tok) + l.skip(1) + l.skipWS(true) + } else if string(r) == "(" { + var err error + if state, err = getNT(l, tok); err != nil { + return state, fmt.Errorf(err.Error()) + } + } else if len(tok) > 0 { + l.emitVal(state, tok) + } + + return state, nil +} + +func getAxis(l *Lexer, tok string) (XItemType, error) { + var state XItemType + for i := range xconst.AxisNames { + if tok == xconst.AxisNames[i] { + state = XItemAxis + } + } + if state != XItemAxis { + return state, fmt.Errorf("Invalid Axis specifier, %s", tok) + } + l.emitVal(state, tok) + l.skip(2) + l.skipWS(true) + return state, nil +} + +func getNT(l *Lexer, tok string) (XItemType, error) { + isNT := false + for _, i := range xconst.NodeTypes { + if tok == i { + isNT = true + break + } + } + + if isNT { + return procNT(l, tok) + } + + return XItemError, fmt.Errorf("Invalid node-type " + tok) +} + +func procNT(l *Lexer, tok string) (XItemType, error) { + state := XItemType(XItemNodeType) + l.emitVal(state, tok) + l.skip(1) + l.skipWS(true) + n := l.peek() + if tok == xconst.NodeTypeProcInst && (string(n) == `"` || string(n) == `'`) { + if err := getStrLit(l, XItemProcLit); err != nil { + return state, fmt.Errorf(err.Error()) + } + l.skipWS(true) + n = l.next() + } + + if string(n) != ")" { + return state, fmt.Errorf("Missing ) at end of NodeType declaration.") + } + + l.skip(1) + return state, nil +} + +func procFunc(l *Lexer, tok string) error { + state := XItemType(XItemFunction) + l.emitVal(state, tok) + l.skip(1) + l.skipWS(true) + if string(l.peek()) != ")" { + l.emit(XItemArgument) + for { + for state := startState; state != nil; { + state = state(l) + } + l.skipWS(true) + + if string(l.peek()) == "," { + l.emit(XItemArgument) + l.skip(1) + } else if string(l.peek()) == ")" { + l.emit(XItemEndFunction) + l.skip(1) + break + } else if l.peek() == eof { + return fmt.Errorf("Missing ) at end of function declaration.") + } + } + } else { + l.emit(XItemEndFunction) + l.skip(1) + } + + return nil +} + +func getNextPathState(l *Lexer, state XItemType) stateFn { + isMultiPart := state == XItemAxis || state == XItemAbbrAxis || state == XItemNCName + + l.skipWS(true) + + for string(l.peek()) == "[" { + if err := getPred(l); err != nil { + return l.errorf(err.Error()) + } + } + + if string(l.peek()) == "/" && !isMultiPart { + l.skip(1) + if string(l.peek()) == "/" { + l.skip(1) + return abbrRelLocPathState + } + l.skipWS(true) + return relLocPathState + } else if isMultiPart && isElemChar(l.peek()) { + return stepState + } + + if isMultiPart { + return l.errorf("Step is not complete") + } + + l.emit(XItemEndPath) + return findOperatorState +} + +func getPred(l *Lexer) error { + l.emit(XItemPredicate) + l.skip(1) + l.skipWS(true) + + if string(l.peek()) == "]" { + return fmt.Errorf("Missing content in predicate.") + } + + for state := startState; state != nil; { + state = state(l) + } + + l.skipWS(true) + if string(l.peek()) != "]" { + return fmt.Errorf("Missing ] at end of predicate.") + } + l.skip(1) + l.emit(XItemEndPredicate) + l.skipWS(true) + + return nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/lexer/xmlchars.go b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/xmlchars.go new file mode 100644 index 00000000000..4e6bc8f6790 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/xmlchars.go @@ -0,0 +1,316 @@ +package lexer + +import "unicode" + +//first and second was copied from src/encoding/xml/xml.go +var first = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x003A, 0x003A, 1}, + {0x0041, 0x005A, 1}, + {0x005F, 0x005F, 1}, + {0x0061, 0x007A, 1}, + {0x00C0, 0x00D6, 1}, + {0x00D8, 0x00F6, 1}, + {0x00F8, 0x00FF, 1}, + {0x0100, 0x0131, 1}, + {0x0134, 0x013E, 1}, + {0x0141, 0x0148, 1}, + {0x014A, 0x017E, 1}, + {0x0180, 0x01C3, 1}, + {0x01CD, 0x01F0, 1}, + {0x01F4, 0x01F5, 1}, + {0x01FA, 0x0217, 1}, + {0x0250, 0x02A8, 1}, + {0x02BB, 0x02C1, 1}, + {0x0386, 0x0386, 1}, + {0x0388, 0x038A, 1}, + {0x038C, 0x038C, 1}, + {0x038E, 0x03A1, 1}, + {0x03A3, 0x03CE, 1}, + {0x03D0, 0x03D6, 1}, + {0x03DA, 0x03E0, 2}, + {0x03E2, 0x03F3, 1}, + {0x0401, 0x040C, 1}, + {0x040E, 0x044F, 1}, + {0x0451, 0x045C, 1}, + {0x045E, 0x0481, 1}, + {0x0490, 0x04C4, 1}, + {0x04C7, 0x04C8, 1}, + {0x04CB, 0x04CC, 1}, + {0x04D0, 0x04EB, 1}, + {0x04EE, 0x04F5, 1}, + {0x04F8, 0x04F9, 1}, + {0x0531, 0x0556, 1}, + {0x0559, 0x0559, 1}, + {0x0561, 0x0586, 1}, + {0x05D0, 0x05EA, 1}, + {0x05F0, 0x05F2, 1}, + {0x0621, 0x063A, 1}, + {0x0641, 0x064A, 1}, + {0x0671, 0x06B7, 1}, + {0x06BA, 0x06BE, 1}, + {0x06C0, 0x06CE, 1}, + {0x06D0, 0x06D3, 1}, + {0x06D5, 0x06D5, 1}, + {0x06E5, 0x06E6, 1}, + {0x0905, 0x0939, 1}, + {0x093D, 0x093D, 1}, + {0x0958, 0x0961, 1}, + {0x0985, 0x098C, 1}, + {0x098F, 0x0990, 1}, + {0x0993, 0x09A8, 1}, + {0x09AA, 0x09B0, 1}, + {0x09B2, 0x09B2, 1}, + {0x09B6, 0x09B9, 1}, + {0x09DC, 0x09DD, 1}, + {0x09DF, 0x09E1, 1}, + {0x09F0, 0x09F1, 1}, + {0x0A05, 0x0A0A, 1}, + {0x0A0F, 0x0A10, 1}, + {0x0A13, 0x0A28, 1}, + {0x0A2A, 0x0A30, 1}, + {0x0A32, 0x0A33, 1}, + {0x0A35, 0x0A36, 1}, + {0x0A38, 0x0A39, 1}, + {0x0A59, 0x0A5C, 1}, + {0x0A5E, 0x0A5E, 1}, + {0x0A72, 0x0A74, 1}, + {0x0A85, 0x0A8B, 1}, + {0x0A8D, 0x0A8D, 1}, + {0x0A8F, 0x0A91, 1}, + {0x0A93, 0x0AA8, 1}, + {0x0AAA, 0x0AB0, 1}, + {0x0AB2, 0x0AB3, 1}, + {0x0AB5, 0x0AB9, 1}, + {0x0ABD, 0x0AE0, 0x23}, + {0x0B05, 0x0B0C, 1}, + {0x0B0F, 0x0B10, 1}, + {0x0B13, 0x0B28, 1}, + {0x0B2A, 0x0B30, 1}, + {0x0B32, 0x0B33, 1}, + {0x0B36, 0x0B39, 1}, + {0x0B3D, 0x0B3D, 1}, + {0x0B5C, 0x0B5D, 1}, + {0x0B5F, 0x0B61, 1}, + {0x0B85, 0x0B8A, 1}, + {0x0B8E, 0x0B90, 1}, + {0x0B92, 0x0B95, 1}, + {0x0B99, 0x0B9A, 1}, + {0x0B9C, 0x0B9C, 1}, + {0x0B9E, 0x0B9F, 1}, + {0x0BA3, 0x0BA4, 1}, + {0x0BA8, 0x0BAA, 1}, + {0x0BAE, 0x0BB5, 1}, + {0x0BB7, 0x0BB9, 1}, + {0x0C05, 0x0C0C, 1}, + {0x0C0E, 0x0C10, 1}, + {0x0C12, 0x0C28, 1}, + {0x0C2A, 0x0C33, 1}, + {0x0C35, 0x0C39, 1}, + {0x0C60, 0x0C61, 1}, + {0x0C85, 0x0C8C, 1}, + {0x0C8E, 0x0C90, 1}, + {0x0C92, 0x0CA8, 1}, + {0x0CAA, 0x0CB3, 1}, + {0x0CB5, 0x0CB9, 1}, + {0x0CDE, 0x0CDE, 1}, + {0x0CE0, 0x0CE1, 1}, + {0x0D05, 0x0D0C, 1}, + {0x0D0E, 0x0D10, 1}, + {0x0D12, 0x0D28, 1}, + {0x0D2A, 0x0D39, 1}, + {0x0D60, 0x0D61, 1}, + {0x0E01, 0x0E2E, 1}, + {0x0E30, 0x0E30, 1}, + {0x0E32, 0x0E33, 1}, + {0x0E40, 0x0E45, 1}, + {0x0E81, 0x0E82, 1}, + {0x0E84, 0x0E84, 1}, + {0x0E87, 0x0E88, 1}, + {0x0E8A, 0x0E8D, 3}, + {0x0E94, 0x0E97, 1}, + {0x0E99, 0x0E9F, 1}, + {0x0EA1, 0x0EA3, 1}, + {0x0EA5, 0x0EA7, 2}, + {0x0EAA, 0x0EAB, 1}, + {0x0EAD, 0x0EAE, 1}, + {0x0EB0, 0x0EB0, 1}, + {0x0EB2, 0x0EB3, 1}, + {0x0EBD, 0x0EBD, 1}, + {0x0EC0, 0x0EC4, 1}, + {0x0F40, 0x0F47, 1}, + {0x0F49, 0x0F69, 1}, + {0x10A0, 0x10C5, 1}, + {0x10D0, 0x10F6, 1}, + {0x1100, 0x1100, 1}, + {0x1102, 0x1103, 1}, + {0x1105, 0x1107, 1}, + {0x1109, 0x1109, 1}, + {0x110B, 0x110C, 1}, + {0x110E, 0x1112, 1}, + {0x113C, 0x1140, 2}, + {0x114C, 0x1150, 2}, + {0x1154, 0x1155, 1}, + {0x1159, 0x1159, 1}, + {0x115F, 0x1161, 1}, + {0x1163, 0x1169, 2}, + {0x116D, 0x116E, 1}, + {0x1172, 0x1173, 1}, + {0x1175, 0x119E, 0x119E - 0x1175}, + {0x11A8, 0x11AB, 0x11AB - 0x11A8}, + {0x11AE, 0x11AF, 1}, + {0x11B7, 0x11B8, 1}, + {0x11BA, 0x11BA, 1}, + {0x11BC, 0x11C2, 1}, + {0x11EB, 0x11F0, 0x11F0 - 0x11EB}, + {0x11F9, 0x11F9, 1}, + {0x1E00, 0x1E9B, 1}, + {0x1EA0, 0x1EF9, 1}, + {0x1F00, 0x1F15, 1}, + {0x1F18, 0x1F1D, 1}, + {0x1F20, 0x1F45, 1}, + {0x1F48, 0x1F4D, 1}, + {0x1F50, 0x1F57, 1}, + {0x1F59, 0x1F5B, 0x1F5B - 0x1F59}, + {0x1F5D, 0x1F5D, 1}, + {0x1F5F, 0x1F7D, 1}, + {0x1F80, 0x1FB4, 1}, + {0x1FB6, 0x1FBC, 1}, + {0x1FBE, 0x1FBE, 1}, + {0x1FC2, 0x1FC4, 1}, + {0x1FC6, 0x1FCC, 1}, + {0x1FD0, 0x1FD3, 1}, + {0x1FD6, 0x1FDB, 1}, + {0x1FE0, 0x1FEC, 1}, + {0x1FF2, 0x1FF4, 1}, + {0x1FF6, 0x1FFC, 1}, + {0x2126, 0x2126, 1}, + {0x212A, 0x212B, 1}, + {0x212E, 0x212E, 1}, + {0x2180, 0x2182, 1}, + {0x3007, 0x3007, 1}, + {0x3021, 0x3029, 1}, + {0x3041, 0x3094, 1}, + {0x30A1, 0x30FA, 1}, + {0x3105, 0x312C, 1}, + {0x4E00, 0x9FA5, 1}, + {0xAC00, 0xD7A3, 1}, + }, +} + +var second = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x002D, 0x002E, 1}, + {0x0030, 0x0039, 1}, + {0x00B7, 0x00B7, 1}, + {0x02D0, 0x02D1, 1}, + {0x0300, 0x0345, 1}, + {0x0360, 0x0361, 1}, + {0x0387, 0x0387, 1}, + {0x0483, 0x0486, 1}, + {0x0591, 0x05A1, 1}, + {0x05A3, 0x05B9, 1}, + {0x05BB, 0x05BD, 1}, + {0x05BF, 0x05BF, 1}, + {0x05C1, 0x05C2, 1}, + {0x05C4, 0x0640, 0x0640 - 0x05C4}, + {0x064B, 0x0652, 1}, + {0x0660, 0x0669, 1}, + {0x0670, 0x0670, 1}, + {0x06D6, 0x06DC, 1}, + {0x06DD, 0x06DF, 1}, + {0x06E0, 0x06E4, 1}, + {0x06E7, 0x06E8, 1}, + {0x06EA, 0x06ED, 1}, + {0x06F0, 0x06F9, 1}, + {0x0901, 0x0903, 1}, + {0x093C, 0x093C, 1}, + {0x093E, 0x094C, 1}, + {0x094D, 0x094D, 1}, + {0x0951, 0x0954, 1}, + {0x0962, 0x0963, 1}, + {0x0966, 0x096F, 1}, + {0x0981, 0x0983, 1}, + {0x09BC, 0x09BC, 1}, + {0x09BE, 0x09BF, 1}, + {0x09C0, 0x09C4, 1}, + {0x09C7, 0x09C8, 1}, + {0x09CB, 0x09CD, 1}, + {0x09D7, 0x09D7, 1}, + {0x09E2, 0x09E3, 1}, + {0x09E6, 0x09EF, 1}, + {0x0A02, 0x0A3C, 0x3A}, + {0x0A3E, 0x0A3F, 1}, + {0x0A40, 0x0A42, 1}, + {0x0A47, 0x0A48, 1}, + {0x0A4B, 0x0A4D, 1}, + {0x0A66, 0x0A6F, 1}, + {0x0A70, 0x0A71, 1}, + {0x0A81, 0x0A83, 1}, + {0x0ABC, 0x0ABC, 1}, + {0x0ABE, 0x0AC5, 1}, + {0x0AC7, 0x0AC9, 1}, + {0x0ACB, 0x0ACD, 1}, + {0x0AE6, 0x0AEF, 1}, + {0x0B01, 0x0B03, 1}, + {0x0B3C, 0x0B3C, 1}, + {0x0B3E, 0x0B43, 1}, + {0x0B47, 0x0B48, 1}, + {0x0B4B, 0x0B4D, 1}, + {0x0B56, 0x0B57, 1}, + {0x0B66, 0x0B6F, 1}, + {0x0B82, 0x0B83, 1}, + {0x0BBE, 0x0BC2, 1}, + {0x0BC6, 0x0BC8, 1}, + {0x0BCA, 0x0BCD, 1}, + {0x0BD7, 0x0BD7, 1}, + {0x0BE7, 0x0BEF, 1}, + {0x0C01, 0x0C03, 1}, + {0x0C3E, 0x0C44, 1}, + {0x0C46, 0x0C48, 1}, + {0x0C4A, 0x0C4D, 1}, + {0x0C55, 0x0C56, 1}, + {0x0C66, 0x0C6F, 1}, + {0x0C82, 0x0C83, 1}, + {0x0CBE, 0x0CC4, 1}, + {0x0CC6, 0x0CC8, 1}, + {0x0CCA, 0x0CCD, 1}, + {0x0CD5, 0x0CD6, 1}, + {0x0CE6, 0x0CEF, 1}, + {0x0D02, 0x0D03, 1}, + {0x0D3E, 0x0D43, 1}, + {0x0D46, 0x0D48, 1}, + {0x0D4A, 0x0D4D, 1}, + {0x0D57, 0x0D57, 1}, + {0x0D66, 0x0D6F, 1}, + {0x0E31, 0x0E31, 1}, + {0x0E34, 0x0E3A, 1}, + {0x0E46, 0x0E46, 1}, + {0x0E47, 0x0E4E, 1}, + {0x0E50, 0x0E59, 1}, + {0x0EB1, 0x0EB1, 1}, + {0x0EB4, 0x0EB9, 1}, + {0x0EBB, 0x0EBC, 1}, + {0x0EC6, 0x0EC6, 1}, + {0x0EC8, 0x0ECD, 1}, + {0x0ED0, 0x0ED9, 1}, + {0x0F18, 0x0F19, 1}, + {0x0F20, 0x0F29, 1}, + {0x0F35, 0x0F39, 2}, + {0x0F3E, 0x0F3F, 1}, + {0x0F71, 0x0F84, 1}, + {0x0F86, 0x0F8B, 1}, + {0x0F90, 0x0F95, 1}, + {0x0F97, 0x0F97, 1}, + {0x0F99, 0x0FAD, 1}, + {0x0FB1, 0x0FB7, 1}, + {0x0FB9, 0x0FB9, 1}, + {0x20D0, 0x20DC, 1}, + {0x20E1, 0x3005, 0x3005 - 0x20E1}, + {0x302A, 0x302F, 1}, + {0x3031, 0x3035, 1}, + {0x3099, 0x309A, 1}, + {0x309D, 0x309E, 1}, + {0x30FC, 0x30FE, 1}, + }, +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/marshal.go b/vendor/github.com/ChrisTrenkamp/goxpath/marshal.go new file mode 100644 index 00000000000..e1045f20eed --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/marshal.go @@ -0,0 +1,105 @@ +package goxpath + +import ( + "bytes" + "encoding/xml" + "io" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//Marshal prints the result tree, r, in XML form to w. +func Marshal(n tree.Node, w io.Writer) error { + return marshal(n, w) +} + +//MarshalStr is like Marhal, but returns a string. +func MarshalStr(n tree.Node) (string, error) { + ret := bytes.NewBufferString("") + err := marshal(n, ret) + + return ret.String(), err +} + +func marshal(n tree.Node, w io.Writer) error { + e := xml.NewEncoder(w) + err := encTok(n, e) + if err != nil { + return err + } + + return e.Flush() +} + +func encTok(n tree.Node, e *xml.Encoder) error { + switch n.GetNodeType() { + case tree.NtAttr: + return encAttr(n.GetToken().(xml.Attr), e) + case tree.NtElem: + return encEle(n.(tree.Elem), e) + case tree.NtNs: + return encNS(n.GetToken().(xml.Attr), e) + case tree.NtRoot: + for _, i := range n.(tree.Elem).GetChildren() { + err := encTok(i, e) + if err != nil { + return err + } + } + return nil + } + + //case tree.NtChd, tree.NtComm, tree.NtPi: + return e.EncodeToken(n.GetToken()) +} + +func encAttr(a xml.Attr, e *xml.Encoder) error { + str := a.Name.Local + `="` + a.Value + `"` + + if a.Name.Space != "" { + str += ` xmlns="` + a.Name.Space + `"` + } + + pi := xml.ProcInst{ + Target: "attribute", + Inst: ([]byte)(str), + } + + return e.EncodeToken(pi) +} + +func encNS(ns xml.Attr, e *xml.Encoder) error { + pi := xml.ProcInst{ + Target: "namespace", + Inst: ([]byte)(ns.Value), + } + return e.EncodeToken(pi) +} + +func encEle(n tree.Elem, e *xml.Encoder) error { + ele := xml.StartElement{ + Name: n.GetToken().(xml.StartElement).Name, + } + + attrs := n.GetAttrs() + ele.Attr = make([]xml.Attr, len(attrs)) + for i := range attrs { + ele.Attr[i] = attrs[i].GetToken().(xml.Attr) + } + + err := e.EncodeToken(ele) + if err != nil { + return err + } + + if x, ok := n.(tree.Elem); ok { + for _, i := range x.GetChildren() { + err := encTok(i, e) + if err != nil { + return err + } + } + } + + return e.EncodeToken(ele.End()) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go b/vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go new file mode 100644 index 00000000000..08196c62eee --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go @@ -0,0 +1,113 @@ +package parser + +import "github.com/ChrisTrenkamp/goxpath/lexer" + +//NodeType enumerations +const ( + Empty lexer.XItemType = "" +) + +//Node builds an AST tree for operating on XPath expressions +type Node struct { + Val lexer.XItem + Left *Node + Right *Node + Parent *Node + next *Node +} + +var beginPathType = map[lexer.XItemType]bool{ + lexer.XItemAbsLocPath: true, + lexer.XItemAbbrAbsLocPath: true, + lexer.XItemAbbrRelLocPath: true, + lexer.XItemRelLocPath: true, + lexer.XItemFunction: true, +} + +func (n *Node) add(i lexer.XItem) { + if n.Val.Typ == Empty { + n.Val = i + } else if n.Left == nil && n.Right == nil { + n.Left = &Node{Val: n.Val, Parent: n} + n.Val = i + } else if beginPathType[n.Val.Typ] { + next := &Node{Val: n.Val, Left: n.Left, Right: n.Right, Parent: n} + n.Left, n.Right = next, nil + n.Val = i + } else if n.Right == nil { + n.Right = &Node{Val: i, Parent: n} + } else { + next := &Node{Val: n.Val, Left: n.Left, Right: n.Right, Parent: n} + n.Left, n.Right = next, nil + n.Val = i + } + n.next = n +} + +func (n *Node) push(i lexer.XItem) { + if n.Left == nil { + n.Left = &Node{Val: i, Parent: n} + n.next = n.Left + } else if n.Right == nil { + n.Right = &Node{Val: i, Parent: n} + n.next = n.Right + } else { + next := &Node{Val: i, Left: n.Right, Parent: n} + n.Right = next + n.next = n.Right + } +} + +func (n *Node) pushNotEmpty(i lexer.XItem) { + if n.Val.Typ == Empty { + n.add(i) + } else { + n.push(i) + } +} + +/* +func (n *Node) prettyPrint(depth, width int) { + nodes := []*Node{} + n.getLine(depth, &nodes) + fmt.Printf("%*s", (width-depth)*2, "") + toggle := true + if len(nodes) > 1 { + for _, i := range nodes { + if i != nil { + if toggle { + fmt.Print("/ ") + } else { + fmt.Print("\\ ") + } + } + toggle = !toggle + } + fmt.Println() + fmt.Printf("%*s", (width-depth)*2, "") + } + for _, i := range nodes { + if i != nil { + fmt.Print(i.Val.Val, " ") + } + } + fmt.Println() +} + +func (n *Node) getLine(depth int, ret *[]*Node) { + if depth <= 0 && n != nil { + *ret = append(*ret, n) + return + } + if n.Left != nil { + n.Left.getLine(depth-1, ret) + } else if depth-1 <= 0 { + *ret = append(*ret, nil) + } + if n.Right != nil { + n.Right.getLine(depth-1, ret) + } else if depth-1 <= 0 { + *ret = append(*ret, nil) + } +} +*/ diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go b/vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go new file mode 100644 index 00000000000..e5ce8222fbc --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go @@ -0,0 +1,200 @@ +package parser + +import ( + "fmt" + + "github.com/ChrisTrenkamp/goxpath/lexer" +) + +type stateType int + +const ( + defState stateType = iota + xpathState + funcState + paramState + predState + parenState +) + +type parseStack struct { + stack []*Node + stateTypes []stateType + cur *Node +} + +func (p *parseStack) push(t stateType) { + p.stack = append(p.stack, p.cur) + p.stateTypes = append(p.stateTypes, t) +} + +func (p *parseStack) pop() { + stackPos := len(p.stack) - 1 + + p.cur = p.stack[stackPos] + p.stack = p.stack[:stackPos] + p.stateTypes = p.stateTypes[:stackPos] +} + +func (p *parseStack) curState() stateType { + if len(p.stateTypes) == 0 { + return defState + } + return p.stateTypes[len(p.stateTypes)-1] +} + +type lexFn func(*parseStack, lexer.XItem) + +var parseMap = map[lexer.XItemType]lexFn{ + lexer.XItemAbsLocPath: xiXPath, + lexer.XItemAbbrAbsLocPath: xiXPath, + lexer.XItemAbbrRelLocPath: xiXPath, + lexer.XItemRelLocPath: xiXPath, + lexer.XItemEndPath: xiEndPath, + lexer.XItemAxis: xiXPath, + lexer.XItemAbbrAxis: xiXPath, + lexer.XItemNCName: xiXPath, + lexer.XItemQName: xiXPath, + lexer.XItemNodeType: xiXPath, + lexer.XItemProcLit: xiXPath, + lexer.XItemFunction: xiFunc, + lexer.XItemArgument: xiFuncArg, + lexer.XItemEndFunction: xiEndFunc, + lexer.XItemPredicate: xiPred, + lexer.XItemEndPredicate: xiEndPred, + lexer.XItemStrLit: xiValue, + lexer.XItemNumLit: xiValue, + lexer.XItemOperator: xiOp, + lexer.XItemVariable: xiValue, +} + +var opPrecedence = map[string]int{ + "|": 1, + "*": 2, + "div": 2, + "mod": 2, + "+": 3, + "-": 3, + "=": 4, + "!=": 4, + "<": 4, + "<=": 4, + ">": 4, + ">=": 4, + "and": 5, + "or": 6, +} + +//Parse creates an AST tree for XPath expressions. +func Parse(xp string) (*Node, error) { + var err error + c := lexer.Lex(xp) + n := &Node{} + p := &parseStack{cur: n} + + for next := range c { + if next.Typ != lexer.XItemError { + parseMap[next.Typ](p, next) + } else if err == nil { + err = fmt.Errorf(next.Val) + } + } + + return n, err +} + +func xiXPath(p *parseStack, i lexer.XItem) { + if p.curState() == xpathState { + p.cur.push(i) + p.cur = p.cur.next + return + } + + if p.cur.Val.Typ == lexer.XItemFunction { + p.cur.Right = &Node{Val: i, Parent: p.cur} + p.cur.next = p.cur.Right + p.push(xpathState) + p.cur = p.cur.next + return + } + + p.cur.pushNotEmpty(i) + p.push(xpathState) + p.cur = p.cur.next +} + +func xiEndPath(p *parseStack, i lexer.XItem) { + p.pop() +} + +func xiFunc(p *parseStack, i lexer.XItem) { + if p.cur.Val.Typ == Empty { + p.cur.pushNotEmpty(i) + p.push(funcState) + p.cur = p.cur.next + return + } + p.cur.push(i) + p.cur = p.cur.next + p.push(funcState) +} + +func xiFuncArg(p *parseStack, i lexer.XItem) { + if p.curState() != funcState { + p.pop() + } + + p.cur.push(i) + p.cur = p.cur.next + p.push(paramState) + p.cur.push(lexer.XItem{Typ: Empty, Val: ""}) + p.cur = p.cur.next +} + +func xiEndFunc(p *parseStack, i lexer.XItem) { + if p.curState() == paramState { + p.pop() + } + p.pop() +} + +func xiPred(p *parseStack, i lexer.XItem) { + p.cur.push(i) + p.cur = p.cur.next + p.push(predState) + p.cur.push(lexer.XItem{Typ: Empty, Val: ""}) + p.cur = p.cur.next +} + +func xiEndPred(p *parseStack, i lexer.XItem) { + p.pop() +} + +func xiValue(p *parseStack, i lexer.XItem) { + p.cur.add(i) +} + +func xiOp(p *parseStack, i lexer.XItem) { + if i.Val == "(" { + p.cur.push(lexer.XItem{Typ: Empty, Val: ""}) + p.push(parenState) + p.cur = p.cur.next + return + } + + if i.Val == ")" { + p.pop() + return + } + + if p.cur.Val.Typ == lexer.XItemOperator { + if opPrecedence[p.cur.Val.Val] <= opPrecedence[i.Val] { + p.cur.add(i) + } else { + p.cur.push(i) + } + } else { + p.cur.add(i) + } + p.cur = p.cur.next +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/parser/pathexpr/pathexpr.go b/vendor/github.com/ChrisTrenkamp/goxpath/parser/pathexpr/pathexpr.go new file mode 100644 index 00000000000..7fe69eb4ae3 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/parser/pathexpr/pathexpr.go @@ -0,0 +1,11 @@ +package pathexpr + +import "encoding/xml" + +//PathExpr represents XPath step's. xmltree.XMLTree uses it to find nodes. +type PathExpr struct { + Name xml.Name + Axis string + NodeType string + NS map[string]string +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/interfaces.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/interfaces.go new file mode 100644 index 00000000000..a6e64257338 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/interfaces.go @@ -0,0 +1,18 @@ +package tree + +import "fmt" + +//Result is used for all data types. +type Result interface { + fmt.Stringer +} + +//IsBool is used for the XPath boolean function. It turns the data type to a bool. +type IsBool interface { + Bool() Bool +} + +//IsNum is used for the XPath number function. It turns the data type to a number. +type IsNum interface { + Num() Num +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/tree.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/tree.go new file mode 100644 index 00000000000..c5d6333d71d --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/tree.go @@ -0,0 +1,221 @@ +package tree + +import ( + "encoding/xml" + "sort" +) + +//XMLSpace is the W3C XML namespace +const XMLSpace = "http://www.w3.org/XML/1998/namespace" + +//NodePos is a helper for representing the node's document order +type NodePos int + +//Pos returns the node's document order position +func (n NodePos) Pos() int { + return int(n) +} + +//NodeType is a safer way to determine a node's type than type assertions. +type NodeType int + +//GetNodeType returns the node's type. +func (t NodeType) GetNodeType() NodeType { + return t +} + +//These are all the possible node types +const ( + NtAttr NodeType = iota + NtChd + NtComm + NtElem + NtNs + NtRoot + NtPi +) + +//Node is a XPath result that is a node except elements +type Node interface { + //ResValue prints the node's string value + ResValue() string + //Pos returns the node's position in the document order + Pos() int + //GetToken returns the xml.Token representation of the node + GetToken() xml.Token + //GetParent returns the parent node, which will always be an XML element + GetParent() Elem + //GetNodeType returns the node's type + GetNodeType() NodeType +} + +//Elem is a XPath result that is an element node +type Elem interface { + Node + //GetChildren returns the elements children. + GetChildren() []Node + //GetAttrs returns the attributes of the element + GetAttrs() []Node +} + +//NSElem is a node that keeps track of namespaces. +type NSElem interface { + Elem + GetNS() map[xml.Name]string +} + +//NSBuilder is a helper-struct for satisfying the NSElem interface +type NSBuilder struct { + NS map[xml.Name]string +} + +//GetNS returns the namespaces found on the current element. It should not be +//confused with BuildNS, which actually resolves the namespace nodes. +func (ns NSBuilder) GetNS() map[xml.Name]string { + return ns.NS +} + +type nsValueSort []NS + +func (ns nsValueSort) Len() int { return len(ns) } +func (ns nsValueSort) Swap(i, j int) { + ns[i], ns[j] = ns[j], ns[i] +} +func (ns nsValueSort) Less(i, j int) bool { + return ns[i].Value < ns[j].Value +} + +//BuildNS resolves all the namespace nodes of the element and returns them +func BuildNS(t Elem) (ret []NS) { + vals := make(map[xml.Name]string) + + if nselem, ok := t.(NSElem); ok { + buildNS(nselem, vals) + + ret = make([]NS, 0, len(vals)) + i := 1 + + for k, v := range vals { + if !(k.Local == "xmlns" && k.Space == "" && v == "") { + ret = append(ret, NS{ + Attr: xml.Attr{Name: k, Value: v}, + Parent: t, + NodeType: NtNs, + }) + i++ + } + } + + sort.Sort(nsValueSort(ret)) + for i := range ret { + ret[i].NodePos = NodePos(t.Pos() + i + 1) + } + } + + return ret +} + +func buildNS(x NSElem, ret map[xml.Name]string) { + if x.GetNodeType() == NtRoot { + return + } + + if nselem, ok := x.GetParent().(NSElem); ok { + buildNS(nselem, ret) + } + + for k, v := range x.GetNS() { + ret[k] = v + } +} + +//NS is a namespace node. +type NS struct { + xml.Attr + Parent Elem + NodePos + NodeType +} + +//GetToken returns the xml.Token representation of the namespace. +func (ns NS) GetToken() xml.Token { + return ns.Attr +} + +//GetParent returns the parent node of the namespace. +func (ns NS) GetParent() Elem { + return ns.Parent +} + +//ResValue returns the string value of the namespace +func (ns NS) ResValue() string { + return ns.Attr.Value +} + +//GetAttribute is a convenience function for getting the specified attribute from an element. +//false is returned if the attribute is not found. +func GetAttribute(n Elem, local, space string) (xml.Attr, bool) { + attrs := n.GetAttrs() + for _, i := range attrs { + attr := i.GetToken().(xml.Attr) + if local == attr.Name.Local && space == attr.Name.Space { + return attr, true + } + } + return xml.Attr{}, false +} + +//GetAttributeVal is like GetAttribute, except it returns the attribute's value. +func GetAttributeVal(n Elem, local, space string) (string, bool) { + attr, ok := GetAttribute(n, local, space) + return attr.Value, ok +} + +//GetAttrValOrEmpty is like GetAttributeVal, except it returns an empty string if +//the attribute is not found instead of false. +func GetAttrValOrEmpty(n Elem, local, space string) string { + val, ok := GetAttributeVal(n, local, space) + if !ok { + return "" + } + return val +} + +//FindNodeByPos finds a node from the given position. Returns nil if the node +//is not found. +func FindNodeByPos(n Node, pos int) Node { + if n.Pos() == pos { + return n + } + + if elem, ok := n.(Elem); ok { + chldrn := elem.GetChildren() + for i := 1; i < len(chldrn); i++ { + if chldrn[i-1].Pos() <= pos && chldrn[i].Pos() > pos { + return FindNodeByPos(chldrn[i-1], pos) + } + } + + if len(chldrn) > 0 { + if chldrn[len(chldrn)-1].Pos() <= pos { + return FindNodeByPos(chldrn[len(chldrn)-1], pos) + } + } + + attrs := elem.GetAttrs() + for _, i := range attrs { + if i.Pos() == pos { + return i + } + } + + ns := BuildNS(elem) + for _, i := range ns { + if i.Pos() == pos { + return i + } + } + } + + return nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xfn.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xfn.go new file mode 100644 index 00000000000..7a940bb3405 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xfn.go @@ -0,0 +1,52 @@ +package tree + +import ( + "fmt" +) + +//Ctx represents the current context position, size, node, and the current filtered result +type Ctx struct { + NodeSet + Pos int + Size int +} + +//Fn is a XPath function, written in Go +type Fn func(c Ctx, args ...Result) (Result, error) + +//LastArgOpt sets whether the last argument in a function is optional, variadic, or neither +type LastArgOpt int + +//LastArgOpt options +const ( + None LastArgOpt = iota + Optional + Variadic +) + +//Wrap interfaces XPath function calls with Go +type Wrap struct { + Fn Fn + //NArgs represents the number of arguments to the XPath function. -1 represents a single optional argument + NArgs int + LastArgOpt LastArgOpt +} + +//Call checks the arguments and calls Fn if they are valid +func (w Wrap) Call(c Ctx, args ...Result) (Result, error) { + switch w.LastArgOpt { + case Optional: + if len(args) == w.NArgs || len(args) == w.NArgs-1 { + return w.Fn(c, args...) + } + case Variadic: + if len(args) >= w.NArgs-1 { + return w.Fn(c, args...) + } + default: + if len(args) == w.NArgs { + return w.Fn(c, args...) + } + } + return nil, fmt.Errorf("Invalid number of arguments") +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder/xmlbuilder.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder/xmlbuilder.go new file mode 100644 index 00000000000..625713c9680 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder/xmlbuilder.go @@ -0,0 +1,25 @@ +package xmlbuilder + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//BuilderOpts supplies all the information needed to create an XML node. +type BuilderOpts struct { + Dec *xml.Decoder + Tok xml.Token + NodeType tree.NodeType + NS map[xml.Name]string + Attrs []*xml.Attr + NodePos int + AttrStartPos int +} + +//XMLBuilder is an interface for creating XML structures. +type XMLBuilder interface { + tree.Node + CreateNode(*BuilderOpts) XMLBuilder + EndElem() XMLBuilder +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele/xmlele.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele/xmlele.go new file mode 100644 index 00000000000..85e3445d4c0 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele/xmlele.go @@ -0,0 +1,106 @@ +package xmlele + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder" + "github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode" +) + +//XMLEle is an implementation of XPRes for XML elements +type XMLEle struct { + xml.StartElement + tree.NSBuilder + Attrs []tree.Node + Children []tree.Node + Parent tree.Elem + tree.NodePos + tree.NodeType +} + +//Root is the default root node builder for xmltree.ParseXML +func Root() xmlbuilder.XMLBuilder { + return &XMLEle{NodeType: tree.NtRoot} +} + +//CreateNode is an implementation of xmlbuilder.XMLBuilder. It appends the node +//specified in opts and returns the child if it is an element. Otherwise, it returns x. +func (x *XMLEle) CreateNode(opts *xmlbuilder.BuilderOpts) xmlbuilder.XMLBuilder { + if opts.NodeType == tree.NtElem { + ele := &XMLEle{ + StartElement: opts.Tok.(xml.StartElement), + NSBuilder: tree.NSBuilder{NS: opts.NS}, + Attrs: make([]tree.Node, len(opts.Attrs)), + Parent: x, + NodePos: tree.NodePos(opts.NodePos), + NodeType: opts.NodeType, + } + for i := range opts.Attrs { + ele.Attrs[i] = xmlnode.XMLNode{ + Token: opts.Attrs[i], + NodePos: tree.NodePos(opts.AttrStartPos + i), + NodeType: tree.NtAttr, + Parent: ele, + } + } + x.Children = append(x.Children, ele) + return ele + } + + node := xmlnode.XMLNode{ + Token: opts.Tok, + NodePos: tree.NodePos(opts.NodePos), + NodeType: opts.NodeType, + Parent: x, + } + x.Children = append(x.Children, node) + return x +} + +//EndElem is an implementation of xmlbuilder.XMLBuilder. It returns x's parent. +func (x *XMLEle) EndElem() xmlbuilder.XMLBuilder { + return x.Parent.(*XMLEle) +} + +//GetToken returns the xml.Token representation of the node +func (x *XMLEle) GetToken() xml.Token { + return x.StartElement +} + +//GetParent returns the parent node, or itself if it's the root +func (x *XMLEle) GetParent() tree.Elem { + return x.Parent +} + +//GetChildren returns all child nodes of the element +func (x *XMLEle) GetChildren() []tree.Node { + ret := make([]tree.Node, len(x.Children)) + + for i := range x.Children { + ret[i] = x.Children[i] + } + + return ret +} + +//GetAttrs returns all attributes of the element +func (x *XMLEle) GetAttrs() []tree.Node { + ret := make([]tree.Node, len(x.Attrs)) + for i := range x.Attrs { + ret[i] = x.Attrs[i] + } + return ret +} + +//ResValue returns the string value of the element and children +func (x *XMLEle) ResValue() string { + ret := "" + for i := range x.Children { + switch x.Children[i].GetNodeType() { + case tree.NtChd, tree.NtElem, tree.NtRoot: + ret += x.Children[i].ResValue() + } + } + return ret +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode/xmlnode.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode/xmlnode.go new file mode 100644 index 00000000000..9be769855e4 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode/xmlnode.go @@ -0,0 +1,43 @@ +package xmlnode + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//XMLNode will represent an attribute, character data, comment, or processing instruction node +type XMLNode struct { + xml.Token + tree.NodePos + tree.NodeType + Parent tree.Elem +} + +//GetToken returns the xml.Token representation of the node +func (a XMLNode) GetToken() xml.Token { + if a.NodeType == tree.NtAttr { + ret := a.Token.(*xml.Attr) + return *ret + } + return a.Token +} + +//GetParent returns the parent node +func (a XMLNode) GetParent() tree.Elem { + return a.Parent +} + +//ResValue returns the string value of the attribute +func (a XMLNode) ResValue() string { + switch a.NodeType { + case tree.NtAttr: + return a.Token.(*xml.Attr).Value + case tree.NtChd: + return string(a.Token.(xml.CharData)) + case tree.NtComm: + return string(a.Token.(xml.Comment)) + } + //case tree.NtPi: + return string(a.Token.(xml.ProcInst).Inst) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmltree.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmltree.go new file mode 100644 index 00000000000..2ec066c5bf5 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmltree.go @@ -0,0 +1,158 @@ +package xmltree + +import ( + "encoding/xml" + "io" + + "golang.org/x/net/html/charset" + + "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder" + "github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele" +) + +//ParseOptions is a set of methods and function pointers that alter +//the way the XML decoder works and the Node types that are created. +//Options that are not set will default to what is set in internal/defoverride.go +type ParseOptions struct { + Strict bool + XMLRoot func() xmlbuilder.XMLBuilder +} + +//DirectiveParser is an optional interface extended from XMLBuilder that handles +//XML directives. +type DirectiveParser interface { + xmlbuilder.XMLBuilder + Directive(xml.Directive, *xml.Decoder) +} + +//ParseSettings is a function for setting the ParseOptions you want when +//parsing an XML tree. +type ParseSettings func(s *ParseOptions) + +//MustParseXML is like ParseXML, but panics instead of returning an error. +func MustParseXML(r io.Reader, op ...ParseSettings) tree.Node { + ret, err := ParseXML(r, op...) + + if err != nil { + panic(err) + } + + return ret +} + +//ParseXML creates an XMLTree structure from an io.Reader. +func ParseXML(r io.Reader, op ...ParseSettings) (tree.Node, error) { + ov := ParseOptions{ + Strict: true, + XMLRoot: xmlele.Root, + } + for _, i := range op { + i(&ov) + } + + dec := xml.NewDecoder(r) + dec.CharsetReader = charset.NewReaderLabel + dec.Strict = ov.Strict + + ordrPos := 1 + xmlTree := ov.XMLRoot() + + t, err := dec.Token() + + if err != nil { + return nil, err + } + + if head, ok := t.(xml.ProcInst); ok && head.Target == "xml" { + t, err = dec.Token() + } + + opts := xmlbuilder.BuilderOpts{ + Dec: dec, + } + + for err == nil { + switch xt := t.(type) { + case xml.StartElement: + setEle(&opts, xmlTree, xt, &ordrPos) + xmlTree = xmlTree.CreateNode(&opts) + case xml.CharData: + setNode(&opts, xmlTree, xt, tree.NtChd, &ordrPos) + xmlTree = xmlTree.CreateNode(&opts) + case xml.Comment: + setNode(&opts, xmlTree, xt, tree.NtComm, &ordrPos) + xmlTree = xmlTree.CreateNode(&opts) + case xml.ProcInst: + setNode(&opts, xmlTree, xt, tree.NtPi, &ordrPos) + xmlTree = xmlTree.CreateNode(&opts) + case xml.EndElement: + xmlTree = xmlTree.EndElem() + case xml.Directive: + if dp, ok := xmlTree.(DirectiveParser); ok { + dp.Directive(xt.Copy(), dec) + } + } + + t, err = dec.Token() + } + + if err == io.EOF { + err = nil + } + + return xmlTree, err +} + +func setEle(opts *xmlbuilder.BuilderOpts, xmlTree xmlbuilder.XMLBuilder, ele xml.StartElement, ordrPos *int) { + opts.NodePos = *ordrPos + opts.Tok = ele + opts.Attrs = opts.Attrs[0:0:cap(opts.Attrs)] + opts.NS = make(map[xml.Name]string) + opts.NodeType = tree.NtElem + + for i := range ele.Attr { + attr := ele.Attr[i].Name + val := ele.Attr[i].Value + + if (attr.Local == "xmlns" && attr.Space == "") || attr.Space == "xmlns" { + opts.NS[attr] = val + } else { + opts.Attrs = append(opts.Attrs, &ele.Attr[i]) + } + } + + if nstree, ok := xmlTree.(tree.NSElem); ok { + ns := make(map[xml.Name]string) + + for _, i := range tree.BuildNS(nstree) { + ns[i.Name] = i.Value + } + + for k, v := range opts.NS { + ns[k] = v + } + + if ns[xml.Name{Local: "xmlns"}] == "" { + delete(ns, xml.Name{Local: "xmlns"}) + } + + for k, v := range ns { + opts.NS[k] = v + } + + if xmlTree.GetNodeType() == tree.NtRoot { + opts.NS[xml.Name{Space: "xmlns", Local: "xml"}] = tree.XMLSpace + } + } + + opts.AttrStartPos = len(opts.NS) + len(opts.Attrs) + *ordrPos + *ordrPos = opts.AttrStartPos + 1 +} + +func setNode(opts *xmlbuilder.BuilderOpts, xmlTree xmlbuilder.XMLBuilder, tok xml.Token, nt tree.NodeType, ordrPos *int) { + opts.Tok = xml.CopyToken(tok) + opts.NodeType = nt + opts.NodePos = *ordrPos + *ordrPos++ +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xtypes.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xtypes.go new file mode 100644 index 00000000000..b9704109f96 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xtypes.go @@ -0,0 +1,113 @@ +package tree + +import ( + "fmt" + "math" + "strconv" + "strings" +) + +//Boolean strings +const ( + True = "true" + False = "false" +) + +//Bool is a boolean XPath type +type Bool bool + +//ResValue satisfies the Res interface for Bool +func (b Bool) String() string { + if b { + return True + } + + return False +} + +//Bool satisfies the HasBool interface for Bool's +func (b Bool) Bool() Bool { + return b +} + +//Num satisfies the HasNum interface for Bool's +func (b Bool) Num() Num { + if b { + return Num(1) + } + + return Num(0) +} + +//Num is a number XPath type +type Num float64 + +//ResValue satisfies the Res interface for Num +func (n Num) String() string { + if math.IsInf(float64(n), 0) { + if math.IsInf(float64(n), 1) { + return "Infinity" + } + return "-Infinity" + } + return fmt.Sprintf("%g", float64(n)) +} + +//Bool satisfies the HasBool interface for Num's +func (n Num) Bool() Bool { + return n != 0 +} + +//Num satisfies the HasNum interface for Num's +func (n Num) Num() Num { + return n +} + +//String is string XPath type +type String string + +//ResValue satisfies the Res interface for String +func (s String) String() string { + return string(s) +} + +//Bool satisfies the HasBool interface for String's +func (s String) Bool() Bool { + return Bool(len(s) > 0) +} + +//Num satisfies the HasNum interface for String's +func (s String) Num() Num { + num, err := strconv.ParseFloat(strings.TrimSpace(string(s)), 64) + if err != nil { + return Num(math.NaN()) + } + return Num(num) +} + +//NodeSet is a node-set XPath type +type NodeSet []Node + +//GetNodeNum converts the node to a string-value and to a number +func GetNodeNum(n Node) Num { + return String(n.ResValue()).Num() +} + +//String satisfies the Res interface for NodeSet +func (n NodeSet) String() string { + if len(n) == 0 { + return "" + } + + return n[0].ResValue() +} + +//Bool satisfies the HasBool interface for node-set's +func (n NodeSet) Bool() Bool { + return Bool(len(n) > 0) +} + +//Num satisfies the HasNum interface for NodeSet's +func (n NodeSet) Num() Num { + return String(n.String()).Num() +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/xconst/xconst.go b/vendor/github.com/ChrisTrenkamp/goxpath/xconst/xconst.go new file mode 100644 index 00000000000..650991272e4 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/xconst/xconst.go @@ -0,0 +1,66 @@ +package xconst + +const ( + //AxisAncestor represents the "ancestor" axis + AxisAncestor = "ancestor" + //AxisAncestorOrSelf represents the "ancestor-or-self" axis + AxisAncestorOrSelf = "ancestor-or-self" + //AxisAttribute represents the "attribute" axis + AxisAttribute = "attribute" + //AxisChild represents the "child" axis + AxisChild = "child" + //AxisDescendent represents the "descendant" axis + AxisDescendent = "descendant" + //AxisDescendentOrSelf represents the "descendant-or-self" axis + AxisDescendentOrSelf = "descendant-or-self" + //AxisFollowing represents the "following" axis + AxisFollowing = "following" + //AxisFollowingSibling represents the "following-sibling" axis + AxisFollowingSibling = "following-sibling" + //AxisNamespace represents the "namespace" axis + AxisNamespace = "namespace" + //AxisParent represents the "parent" axis + AxisParent = "parent" + //AxisPreceding represents the "preceding" axis + AxisPreceding = "preceding" + //AxisPrecedingSibling represents the "preceding-sibling" axis + AxisPrecedingSibling = "preceding-sibling" + //AxisSelf represents the "self" axis + AxisSelf = "self" +) + +//AxisNames is all the possible Axis identifiers wrapped in an array for convenience +var AxisNames = []string{ + AxisAncestor, + AxisAncestorOrSelf, + AxisAttribute, + AxisChild, + AxisDescendent, + AxisDescendentOrSelf, + AxisFollowing, + AxisFollowingSibling, + AxisNamespace, + AxisParent, + AxisPreceding, + AxisPrecedingSibling, + AxisSelf, +} + +const ( + //NodeTypeComment represents the "comment" node test + NodeTypeComment = "comment" + //NodeTypeText represents the "text" node test + NodeTypeText = "text" + //NodeTypeProcInst represents the "processing-instruction" node test + NodeTypeProcInst = "processing-instruction" + //NodeTypeNode represents the "node" node test + NodeTypeNode = "node" +) + +//NodeTypes is all the possible node tests wrapped in an array for convenience +var NodeTypes = []string{ + NodeTypeComment, + NodeTypeText, + NodeTypeProcInst, + NodeTypeNode, +} diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml new file mode 100644 index 00000000000..096369d44d9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -0,0 +1,29 @@ +language: go + +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +script: + - make setup + - make test + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md new file mode 100644 index 00000000000..e405c9a84d9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -0,0 +1,109 @@ +# 1.5.0 (2019-09-11) + +## Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +## Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +## Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +# 1.4.2 (2018-04-10) + +## Changed +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +## Fixed +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +# 1.4.1 (2018-04-02) + +## Fixed +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +# 1.4.0 (2017-10-04) + +## Changed +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +# 1.3.1 (2017-07-10) + +## Fixed +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +# 1.3.0 (2017-05-02) + +## Added +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +## Fixed +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +## Changed +- #55: The godoc icon moved from png to svg + +# 1.2.3 (2017-04-03) + +## Fixed +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +# Release 1.2.2 (2016-12-13) + +## Fixed +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +# Release 1.2.1 (2016-11-28) + +## Fixed +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +# Release 1.2.0 (2016-11-04) + +## Added +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +## Fixed +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +# Release 1.1.1 (2016-06-30) + +## Changed +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +# Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +# Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +# Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt new file mode 100644 index 00000000000..9ff7da9c48b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile new file mode 100644 index 00000000000..a7a1b4e36de --- /dev/null +++ b/vendor/github.com/Masterminds/semver/Makefile @@ -0,0 +1,36 @@ +.PHONY: setup +setup: + go get -u gopkg.in/alecthomas/gometalinter.v1 + gometalinter.v1 --install + +.PHONY: test +test: validate lint + @echo "==> Running tests" + go test -v + +.PHONY: validate +validate: + @echo "==> Running static validations" + @gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1 + +.PHONY: lint +lint: + @echo "==> Running linters" + @gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md new file mode 100644 index 00000000000..1b52d2f4362 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/README.md @@ -0,0 +1,194 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + +```go + v, err := semver.NewVersion("1.2.3-beta.1+build345") +``` + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the [documentation](https://godoc.org/github.com/Masterminds/semver). + +## Sorting Semantic Versions + +A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) +package from the standard library. For example, + +```go + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) +``` + +## Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +## Working With Pre-release Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precidence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons without a pre-release comparator will skip pre-release versions. +For example, `>=1.2.3` will skip pre-releases when looking at a list of releases +while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +## Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +## Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +## Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +## Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +``` + +# Fuzzing + + [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing. + +1. `go-fuzz-build` +2. `go-fuzz -workdir=fuzz` + +# Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml new file mode 100644 index 00000000000..b2778df15a4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/appveyor.yml @@ -0,0 +1,44 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\semver +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go version + - go env + - go get -u gopkg.in/alecthomas/gometalinter.v1 + - set PATH=%PATH%;%GOPATH%\bin + - gometalinter.v1.exe --install + +build_script: + - go install -v ./... + +test_script: + - "gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1" + - "gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || :" + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go new file mode 100644 index 00000000000..a78235895fd --- /dev/null +++ b/vendor/github.com/Masterminds/semver/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go new file mode 100644 index 00000000000..b94b93413f3 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -0,0 +1,423 @@ +package semver + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + cs := strings.Split(v, ",") + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if !c.check(v) { + em := fmt.Errorf(c.msg, v, c.orig) + e = append(e, em) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +var constraintOps map[string]cfunc +var constraintMsg map[string]string +var constraintRegex *regexp.Regexp + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + constraintMsg = map[string]string{ + "": "%s is not equal to %s", + "=": "%s is not equal to %s", + "!=": "%s is equal to %s", + ">": "%s is less than or equal to %s", + "<": "%s is greater than or equal to %s", + ">=": "%s is less than %s", + "=>": "%s is less than %s", + "<=": "%s is greater than %s", + "=<": "%s is greater than %s", + "~": "%s does not have same major and minor version as %s", + "~>": "%s does not have same major and minor version as %s", + "^": "%s does not have same major version as %s", + } + + ops := make([]string, 0, len(constraintOps)) + for k := range constraintOps { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) +} + +// An individual constraint +type constraint struct { + // The callback function for the restraint. It performs the logic for + // the constraint. + function cfunc + + msg string + + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) bool { + return c.function(v, c) +} + +type cfunc func(v *Version, c *constraint) bool + +func parseConstraint(c string) (*constraint, error) { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + ver := m[2] + orig := ver + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + function: constraintOps[m[1]], + msg: constraintMsg[m[1]], + con: con, + orig: orig, + minorDirty: minorDirty, + patchDirty: patchDirty, + dirty: dirty, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) bool { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.con.Major() != v.Major() { + return true + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true + } else if c.minorDirty { + return false + } + + return false + } + + return !v.Equal(c.con) +} + +func constraintGreaterThan(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) == 1 +} + +func constraintLessThan(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) < 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +func constraintGreaterThanEqual(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) >= 0 +} + +func constraintLessThanEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) <= 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true + } + + if v.Major() != c.con.Major() { + return false + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.dirty { + c.msg = constraintMsg["~"] + return constraintTilde(v, c) + } + + return v.Equal(c.con) +} + +// ^* --> (any) +// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 +// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 +// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 +// ^1.2.3 --> >=1.2.3, <2.0.0 +// ^1.2.0 --> >=1.2.0, <2.0.0 +func constraintCaret(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + if v.Major() != c.con.Major() { + return false + } + + return true +} + +var constraintRangeRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go new file mode 100644 index 00000000000..6a6c24c6d6e --- /dev/null +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -0,0 +1,115 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the documentation at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + + * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3, < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `~1.x` is equivalent to `>= 1, < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go new file mode 100644 index 00000000000..400d4f93412 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version.go @@ -0,0 +1,425 @@ +package semver + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var validPrereleaseRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// SemVerRegex is the regular expression used to parse a semantic version. +const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// ValidPrerelease is the regular expression which validates +// both prerelease and metadata values. +const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch int64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") + validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var temp int64 + temp, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.major = temp + + if m[2] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.minor = temp + } else { + sv.minor = 0 + } + + if m[3] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.patch = temp + } else { + sv.patch = 0 + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v *Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v *Version) Major() int64 { + return v.major +} + +// Minor returns the minor version. +func (v *Version) Minor() int64 { + return v.minor +} + +// Patch returns the patch version. +func (v *Version) Patch() int64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v *Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v *Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v *Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps curent patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hypen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { + return vNext, ErrInvalidPrerelease + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { + return vNext, ErrInvalidMetadata + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + temp = nil + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v *Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func compareSegment(v, o int64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go new file mode 100644 index 00000000000..b42bcd62b95 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version_fuzz.go @@ -0,0 +1,10 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + if _, err := NewVersion(string(data)); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE b/vendor/github.com/acarl005/stripansi/LICENSE similarity index 96% rename from vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE rename to vendor/github.com/acarl005/stripansi/LICENSE index c35c17af980..00abe0dbf49 100644 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE +++ b/vendor/github.com/acarl005/stripansi/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2015 Dmitri Shuralyov +Copyright (c) 2018 Andrew Carlson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/acarl005/stripansi/README.md b/vendor/github.com/acarl005/stripansi/README.md new file mode 100644 index 00000000000..8bdb1f5059f --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/README.md @@ -0,0 +1,30 @@ +Strip ANSI +========== + +This Go package removes ANSI escape codes from strings. + +Ideally, we would prevent these from appearing in any text we want to process. +However, sometimes this can't be helped, and we need to be able to deal with that noise. +This will use a regexp to remove those unwanted escape codes. + + +## Install + +```sh +$ go get -u github.com/acarl005/stripansi +``` + +## Usage + +```go +import ( + "fmt" + "github.com/acarl005/stripansi" +) + +func main() { + msg := "\x1b[38;5;140m foo\x1b[0m bar" + cleanMsg := stripansi.Strip(msg) + fmt.Println(cleanMsg) // " foo bar" +} +``` diff --git a/vendor/github.com/acarl005/stripansi/stripansi.go b/vendor/github.com/acarl005/stripansi/stripansi.go new file mode 100644 index 00000000000..235732a7829 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/stripansi.go @@ -0,0 +1,13 @@ +package stripansi + +import ( + "regexp" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var re = regexp.MustCompile(ansi) + +func Strip(str string) string { + return re.ReplaceAllString(str, "") +} diff --git a/vendor/github.com/alessio/shellescape/.gitignore b/vendor/github.com/alessio/shellescape/.gitignore new file mode 100644 index 00000000000..4ba7c2d137e --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea/ + +escargs diff --git a/vendor/github.com/alessio/shellescape/.golangci.yml b/vendor/github.com/alessio/shellescape/.golangci.yml new file mode 100644 index 00000000000..cd4a17e442c --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.golangci.yml @@ -0,0 +1,64 @@ +# run: +# # timeout for analysis, e.g. 30s, 5m, default is 1m +# timeout: 5m + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - goconst + - gocritic + - gofmt + - goimports + - golint + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - maligned + - misspell + - prealloc + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - misspell + - wsl + +issues: + exclude-rules: + - text: "Use of weak random number generator" + linters: + - gosec + - text: "comment on exported var" + linters: + - golint + - text: "don't use an underscore in package name" + linters: + - golint + - text: "ST1003:" + linters: + - stylecheck + # FIXME: Disabled until golangci-lint updates stylecheck with this fix: + # https://github.com/dominikh/go-tools/issues/389 + - text: "ST1016:" + linters: + - stylecheck + +linters-settings: + dogsled: + max-blank-identifiers: 3 + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + +run: + tests: false diff --git a/vendor/github.com/alessio/shellescape/.goreleaser.yml b/vendor/github.com/alessio/shellescape/.goreleaser.yml new file mode 100644 index 00000000000..064c9374d79 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.goreleaser.yml @@ -0,0 +1,33 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + # You may remove this if you don't use go modules. + - go mod download + # you may remove this if you don't need go generate + - go generate ./... +builds: + - env: + - CGO_ENABLED=0 + main: ./cmd/escargs + goos: + - linux + - windows + - darwin +archives: + - replacements: + darwin: Darwin + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/alessio/shellescape/AUTHORS b/vendor/github.com/alessio/shellescape/AUTHORS new file mode 100644 index 00000000000..4a647a6f41b --- /dev/null +++ b/vendor/github.com/alessio/shellescape/AUTHORS @@ -0,0 +1 @@ +Alessio Treglia diff --git a/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md b/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..e8eda606200 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at alessio@debian.org. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/alessio/shellescape/LICENSE b/vendor/github.com/alessio/shellescape/LICENSE new file mode 100644 index 00000000000..9f760679f40 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Alessio Treglia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alessio/shellescape/README.md b/vendor/github.com/alessio/shellescape/README.md new file mode 100644 index 00000000000..910bb253b9d --- /dev/null +++ b/vendor/github.com/alessio/shellescape/README.md @@ -0,0 +1,61 @@ +![Build](https://github.com/alessio/shellescape/workflows/Build/badge.svg) +[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/alessio/shellescape?tab=overview) +[![sourcegraph](https://sourcegraph.com/github.com/alessio/shellescape/-/badge.svg)](https://sourcegraph.com/github.com/alessio/shellescape) +[![codecov](https://codecov.io/gh/alessio/shellescape/branch/master/graph/badge.svg)](https://codecov.io/gh/alessio/shellescape) +[![Coverage](https://gocover.io/_badge/github.com/alessio/shellescape)](https://gocover.io/github.com/alessio/shellescape) +[![Go Report Card](https://goreportcard.com/badge/github.com/alessio/shellescape)](https://goreportcard.com/report/github.com/alessio/shellescape) + +# shellescape +Escape arbitrary strings for safe use as command line arguments. +## Contents of the package + +This package provides the `shellescape.Quote()` function that returns a +shell-escaped copy of a string. This functionality could be helpful +in those cases where it is known that the output of a Go program will +be appended to/used in the context of shell programs' command line arguments. + +This work was inspired by the Python original package +[shellescape](https://pypi.python.org/pypi/shellescape). + +## Usage + +The following snippet shows a typical unsafe idiom: + +```go +package main + +import ( + "fmt" + "os" +) + +func main() { + fmt.Printf("ls -l %s\n", os.Args[1]) +} +``` +_[See in Go Playground](https://play.golang.org/p/Wj2WoUfH_d)_ + +Especially when creating pipeline of commands which might end up being +executed by a shell interpreter, it is particularly unsafe to not +escape arguments. + +`shellescape.Quote()` comes in handy and to safely escape strings: + +```go +package main + +import ( + "fmt" + "os" + + "gopkg.in/alessio/shellescape.v1" +) + +func main() { + fmt.Printf("ls -l %s\n", shellescape.Quote(os.Args[1])) +} +``` +_[See in Go Playground](https://play.golang.org/p/HJ_CXgSrmp)_ + +## The escargs utility +__escargs__ reads lines from the standard input and prints shell-escaped versions. Unlinke __xargs__, blank lines on the standard input are not discarded. diff --git a/vendor/github.com/alessio/shellescape/go.mod b/vendor/github.com/alessio/shellescape/go.mod new file mode 100644 index 00000000000..08d282650d3 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/go.mod @@ -0,0 +1,3 @@ +module github.com/alessio/shellescape + +go 1.14 diff --git a/vendor/github.com/alessio/shellescape/shellescape.go b/vendor/github.com/alessio/shellescape/shellescape.go new file mode 100644 index 00000000000..dc34a556ae6 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/shellescape.go @@ -0,0 +1,66 @@ +/* +Package shellescape provides the shellescape.Quote to escape arbitrary +strings for a safe use as command line arguments in the most common +POSIX shells. + +The original Python package which this work was inspired by can be found +at https://pypi.python.org/pypi/shellescape. +*/ +package shellescape // "import gopkg.in/alessio/shellescape.v1" + +/* +The functionality provided by shellescape.Quote could be helpful +in those cases where it is known that the output of a Go program will +be appended to/used in the context of shell programs' command line arguments. +*/ + +import ( + "regexp" + "strings" + "unicode" +) + +var pattern *regexp.Regexp + +func init() { + pattern = regexp.MustCompile(`[^\w@%+=:,./-]`) +} + +// Quote returns a shell-escaped version of the string s. The returned value +// is a string that can safely be used as one token in a shell command line. +func Quote(s string) string { + if len(s) == 0 { + return "''" + } + + if pattern.MatchString(s) { + return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'" + } + + return s +} + +// QuoteCommand returns a shell-escaped version of the slice of strings. +// The returned value is a string that can safely be used as shell command arguments. +func QuoteCommand(args []string) string { + l := make([]string, len(args)) + + for i, s := range args { + l[i] = Quote(s) + } + + return strings.Join(l, " ") +} + +// StripUnsafe remove non-printable runes, e.g. control characters in +// a string that is meant for consumption by terminals that support +// control characters. +func StripUnsafe(s string) string { + return strings.Map(func(r rune) rune { + if unicode.IsPrint(r) { + return r + } + + return -1 + }, s) +} diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore new file mode 100644 index 00000000000..8d69a9418aa --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.gitignore @@ -0,0 +1,15 @@ +bin/ +.idea/ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml new file mode 100644 index 00000000000..17c4d0a7107 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -0,0 +1,18 @@ +dist: bionic +language: go +env: GO111MODULE=on GOFLAGS='-mod vendor' +install: true +email: false + +go: + - 1.10 + - 1.11 + - 1.12 + - 1.13 + - tip + +before_script: + - go install github.com/golangci/golangci-lint/cmd/golangci-lint +script: + - golangci-lint run # run a bunch of code checkers/linters in parallel + - go test -v -race ./... # Run all the tests with the race detector enabled diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md new file mode 100644 index 00000000000..7ed268a1edd --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -0,0 +1,63 @@ +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE new file mode 100644 index 00000000000..2f9a31fadf6 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md new file mode 100644 index 00000000000..78f999e8341 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -0,0 +1,616 @@ +govalidator +=========== +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) + +A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). + +#### Installation +Make sure that Go is installed on your computer. +Type the following command in your terminal: + + go get github.com/asaskevich/govalidator + +or you can get specified release of the package with `gopkg.in`: + + go get gopkg.in/asaskevich/govalidator.v10 + +After it the package is ready to use. + + +#### Import package in your project +Add following line in your `*.go` file: +```go +import "github.com/asaskevich/govalidator" +``` +If you are unhappy to use long `govalidator`, you can do something like this: +```go +import ( + valid "github.com/asaskevich/govalidator" +) +``` + +#### Activate behavior to require all fields have a validation tag by default +`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. + +`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. + +```go +import "github.com/asaskevich/govalidator" + +func init() { + govalidator.SetFieldsRequiredByDefault(true) +} +``` + +Here's some code to explain it: +```go +// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +type exampleStruct struct { + Name string `` + Email string `valid:"email"` +} + +// this, however, will only fail when Email is empty or an invalid email address: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +// lastly, this will only fail when Email is an invalid email address but not when it's empty: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email,optional"` +} +``` + +#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) +##### Custom validator function signature +A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. +```go +import "github.com/asaskevich/govalidator" + +// old signature +func(i interface{}) bool + +// new signature +func(i interface{}, o interface{}) bool +``` + +##### Adding a custom validator +This was changed to prevent data races when accessing custom validators. +```go +import "github.com/asaskevich/govalidator" + +// before +govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { + // ... +} + +// after +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { + // ... +}) +``` + +#### List of functions: +```go +func Abs(value float64) float64 +func BlackList(str, chars string) string +func ByteLength(str string, params ...string) bool +func CamelCaseToUnderscore(str string) string +func Contains(str, substring string) bool +func Count(array []interface{}, iterator ConditionIterator) int +func Each(array []interface{}, iterator Iterator) +func ErrorByField(e error, field string) string +func ErrorsByField(e error) map[string]string +func Filter(array []interface{}, iterator ConditionIterator) []interface{} +func Find(array []interface{}, iterator ConditionIterator) interface{} +func GetLine(s string, index int) (string, error) +func GetLines(s string) []string +func HasLowerCase(str string) bool +func HasUpperCase(str string) bool +func HasWhitespace(str string) bool +func HasWhitespaceOnly(str string) bool +func InRange(value interface{}, left interface{}, right interface{}) bool +func InRangeFloat32(value, left, right float32) bool +func InRangeFloat64(value, left, right float64) bool +func InRangeInt(value, left, right interface{}) bool +func IsASCII(str string) bool +func IsAlpha(str string) bool +func IsAlphanumeric(str string) bool +func IsBase64(str string) bool +func IsByteLength(str string, min, max int) bool +func IsCIDR(str string) bool +func IsCRC32(str string) bool +func IsCRC32b(str string) bool +func IsCreditCard(str string) bool +func IsDNSName(str string) bool +func IsDataURI(str string) bool +func IsDialString(str string) bool +func IsDivisibleBy(str, num string) bool +func IsEmail(str string) bool +func IsExistingEmail(email string) bool +func IsFilePath(str string) (bool, int) +func IsFloat(str string) bool +func IsFullWidth(str string) bool +func IsHalfWidth(str string) bool +func IsHash(str string, algorithm string) bool +func IsHexadecimal(str string) bool +func IsHexcolor(str string) bool +func IsHost(str string) bool +func IsIP(str string) bool +func IsIPv4(str string) bool +func IsIPv6(str string) bool +func IsISBN(str string, version int) bool +func IsISBN10(str string) bool +func IsISBN13(str string) bool +func IsISO3166Alpha2(str string) bool +func IsISO3166Alpha3(str string) bool +func IsISO4217(str string) bool +func IsISO693Alpha2(str string) bool +func IsISO693Alpha3b(str string) bool +func IsIn(str string, params ...string) bool +func IsInRaw(str string, params ...string) bool +func IsInt(str string) bool +func IsJSON(str string) bool +func IsLatitude(str string) bool +func IsLongitude(str string) bool +func IsLowerCase(str string) bool +func IsMAC(str string) bool +func IsMD4(str string) bool +func IsMD5(str string) bool +func IsMagnetURI(str string) bool +func IsMongoID(str string) bool +func IsMultibyte(str string) bool +func IsNatural(value float64) bool +func IsNegative(value float64) bool +func IsNonNegative(value float64) bool +func IsNonPositive(value float64) bool +func IsNotNull(str string) bool +func IsNull(str string) bool +func IsNumeric(str string) bool +func IsPort(str string) bool +func IsPositive(value float64) bool +func IsPrintableASCII(str string) bool +func IsRFC3339(str string) bool +func IsRFC3339WithoutZone(str string) bool +func IsRGBcolor(str string) bool +func IsRequestURI(rawurl string) bool +func IsRequestURL(rawurl string) bool +func IsRipeMD128(str string) bool +func IsRipeMD160(str string) bool +func IsRsaPub(str string, params ...string) bool +func IsRsaPublicKey(str string, keylen int) bool +func IsSHA1(str string) bool +func IsSHA256(str string) bool +func IsSHA384(str string) bool +func IsSHA512(str string) bool +func IsSSN(str string) bool +func IsSemver(str string) bool +func IsTiger128(str string) bool +func IsTiger160(str string) bool +func IsTiger192(str string) bool +func IsTime(str string, format string) bool +func IsType(v interface{}, params ...string) bool +func IsURL(str string) bool +func IsUTFDigit(str string) bool +func IsUTFLetter(str string) bool +func IsUTFLetterNumeric(str string) bool +func IsUTFNumeric(str string) bool +func IsUUID(str string) bool +func IsUUIDv3(str string) bool +func IsUUIDv4(str string) bool +func IsUUIDv5(str string) bool +func IsUnixTime(str string) bool +func IsUpperCase(str string) bool +func IsVariableWidth(str string) bool +func IsWhole(value float64) bool +func LeftTrim(str, chars string) string +func Map(array []interface{}, iterator ResultIterator) []interface{} +func Matches(str, pattern string) bool +func MaxStringLength(str string, params ...string) bool +func MinStringLength(str string, params ...string) bool +func NormalizeEmail(str string) (string, error) +func PadBoth(str string, padStr string, padLen int) string +func PadLeft(str string, padStr string, padLen int) string +func PadRight(str string, padStr string, padLen int) string +func PrependPathToErrors(err error, path string) error +func Range(str string, params ...string) bool +func RemoveTags(s string) string +func ReplacePattern(str, pattern, replace string) string +func Reverse(s string) string +func RightTrim(str, chars string) string +func RuneLength(str string, params ...string) bool +func SafeFileName(str string) string +func SetFieldsRequiredByDefault(value bool) +func SetNilPtrAllowedByRequired(value bool) +func Sign(value float64) float64 +func StringLength(str string, params ...string) bool +func StringMatches(s string, params ...string) bool +func StripLow(str string, keepNewLines bool) string +func ToBoolean(str string) (bool, error) +func ToFloat(str string) (float64, error) +func ToInt(value interface{}) (res int64, err error) +func ToJSON(obj interface{}) (string, error) +func ToString(obj interface{}) string +func Trim(str, chars string) string +func Truncate(str string, length int, ending string) string +func TruncatingErrorf(str string, args ...interface{}) error +func UnderscoreToCamelCase(s string) string +func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) +func ValidateStruct(s interface{}) (bool, error) +func WhiteList(str, chars string) string +type ConditionIterator +type CustomTypeValidator +type Error +func (e Error) Error() string +type Errors +func (es Errors) Error() string +func (es Errors) Errors() []error +type ISO3166Entry +type ISO693Entry +type InterfaceParamValidator +type Iterator +type ParamValidator +type ResultIterator +type UnsupportedTypeError +func (e *UnsupportedTypeError) Error() string +type Validator +``` + +#### Examples +###### IsURL +```go +println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) +``` +###### IsType +```go +println(govalidator.IsType("Bob", "string")) +println(govalidator.IsType(1, "int")) +i := 1 +println(govalidator.IsType(&i, "*int")) +``` + +IsType can be used through the tag `type` which is essential for map validation: +```go +type User struct { + Name string `valid:"type(string)"` + Age int `valid:"type(int)"` + Meta interface{} `valid:"type(string)"` +} +result, err := govalidator.ValidateStruct(user{"Bob", 20, "meta"}) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ToString +```go +type User struct { + FirstName string + LastName string +} + +str := govalidator.ToString(&User{"John", "Juan"}) +println(str) +``` +###### Each, Map, Filter, Count for slices +Each iterates over the slice/array and calls Iterator for every item +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.Iterator = func(value interface{}, index int) { + println(value.(int)) +} +govalidator.Each(data, fn) +``` +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 +} +_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +``` +```go +data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 +} +_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +_ = govalidator.Count(data, fn) // result = 5 +``` +###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) +If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: +```go +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) +``` +For completely custom validators (interface-based), see below. + +Here is a list of available validators for struct fields (validator - used function): +```go +"email": IsEmail, +"url": IsURL, +"dialstring": IsDialString, +"requrl": IsRequestURL, +"requri": IsRequestURI, +"alpha": IsAlpha, +"utfletter": IsUTFLetter, +"alphanum": IsAlphanumeric, +"utfletternum": IsUTFLetterNumeric, +"numeric": IsNumeric, +"utfnumeric": IsUTFNumeric, +"utfdigit": IsUTFDigit, +"hexadecimal": IsHexadecimal, +"hexcolor": IsHexcolor, +"rgbcolor": IsRGBcolor, +"lowercase": IsLowerCase, +"uppercase": IsUpperCase, +"int": IsInt, +"float": IsFloat, +"null": IsNull, +"uuid": IsUUID, +"uuidv3": IsUUIDv3, +"uuidv4": IsUUIDv4, +"uuidv5": IsUUIDv5, +"creditcard": IsCreditCard, +"isbn10": IsISBN10, +"isbn13": IsISBN13, +"json": IsJSON, +"multibyte": IsMultibyte, +"ascii": IsASCII, +"printableascii": IsPrintableASCII, +"fullwidth": IsFullWidth, +"halfwidth": IsHalfWidth, +"variablewidth": IsVariableWidth, +"base64": IsBase64, +"datauri": IsDataURI, +"ip": IsIP, +"port": IsPort, +"ipv4": IsIPv4, +"ipv6": IsIPv6, +"dns": IsDNSName, +"host": IsHost, +"mac": IsMAC, +"latitude": IsLatitude, +"longitude": IsLongitude, +"ssn": IsSSN, +"semver": IsSemver, +"rfc3339": IsRFC3339, +"rfc3339WithoutZone": IsRFC3339WithoutZone, +"ISO3166Alpha2": IsISO3166Alpha2, +"ISO3166Alpha3": IsISO3166Alpha3, +``` +Validators with parameters + +```go +"range(min|max)": Range, +"length(min|max)": ByteLength, +"runelength(min|max)": RuneLength, +"stringlength(min|max)": StringLength, +"matches(pattern)": StringMatches, +"in(string1|string2|...|stringN)": IsIn, +"rsapub(keylength)" : IsRsaPub, +``` +Validators with parameters for any type + +```go +"type(type)": IsType, +``` + +And here is small example of usage: +```go +type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + Message2 string `valid:"animal(dog)"` + AuthorIP string `valid:"ipv4"` + Date string `valid:"-"` +} +post := &Post{ + Title: "My Example Post", + Message: "duck", + Message2: "dog", + AuthorIP: "123.234.54.3", +} + +// Add your own struct validation tags +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) + +// Add your own struct validation tags with parameter +govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { + species := params[0] + return str == species +}) +govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") + +result, err := govalidator.ValidateStruct(post) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) +If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` + +So here is small example of usage: +```go +var mapTemplate = map[string]interface{}{ + "name":"required,alpha", + "family":"required,alpha", + "email":"required,email", + "cell-phone":"numeric", + "address":map[string]interface{}{ + "line1":"required,alphanum", + "line2":"alphanum", + "postal-code":"numeric", + }, +} + +var inputMap = map[string]interface{}{ + "name":"Bob", + "family":"Smith", + "email":"foo@bar.baz", + "address":map[string]interface{}{ + "line1":"", + "line2":"", + "postal-code":"", + }, +} + +result, err := govalidator.ValidateMap(inputMap, mapTemplate) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` + +###### WhiteList +```go +// Remove all characters from string ignoring characters between "a" and "z" +println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +``` + +###### Custom validation functions +Custom validation using your own domain specific validators is also available - here's an example of how to use it: +```go +import "github.com/asaskevich/govalidator" + +type CustomByteArray [6]byte // custom types are supported and can be validated + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // you can type switch on the context interface being validated + case StructWithCustomByteArray: + // you can check and validate against some other field in the context, + // return early or not validate against the context at all – your choice + case SomeOtherType: + // ... + default: + // expecting some other type? Throw/panic here or continue + } + + switch v := i.(type) { // type switch on the struct field being validated + case CustomByteArray: + for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes + if e != 0 { + return true + } + } + } + return false +}) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false +}) +``` + +###### Loop over Error() +By default .Error() returns all errors in a single String. To access each error you can do this: +```go + if err != nil { + errs := err.(govalidator.Errors).Errors() + for _, e := range errs { + fmt.Println(e.Error()) + } + } +``` + +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + +#### Notes +Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). +Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). + +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + +#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) +* [Daniel Lohse](https://github.com/annismckenzie) +* [Attila Oláh](https://github.com/attilaolah) +* [Daniel Korner](https://github.com/Dadie) +* [Steven Wilkin](https://github.com/stevenwilkin) +* [Deiwin Sarjas](https://github.com/deiwin) +* [Noah Shibley](https://github.com/slugmobile) +* [Nathan Davies](https://github.com/nathj07) +* [Matt Sanford](https://github.com/mzsanford) +* [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go new file mode 100644 index 00000000000..5bace2654d3 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/arrays.go @@ -0,0 +1,58 @@ +package govalidator + +// Iterator is the function that accepts element of slice/array and its index +type Iterator func(interface{}, int) + +// ResultIterator is the function that accepts element of slice/array and its index and returns any result +type ResultIterator func(interface{}, int) interface{} + +// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean +type ConditionIterator func(interface{}, int) bool + +// Each iterates over the slice and apply Iterator to every item +func Each(array []interface{}, iterator Iterator) { + for index, data := range array { + iterator(data, index) + } +} + +// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. +func Map(array []interface{}, iterator ResultIterator) []interface{} { + var result = make([]interface{}, len(array)) + for index, data := range array { + result[index] = iterator(data, index) + } + return result +} + +// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. +func Find(array []interface{}, iterator ConditionIterator) interface{} { + for index, data := range array { + if iterator(data, index) { + return data + } + } + return nil +} + +// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. +func Filter(array []interface{}, iterator ConditionIterator) []interface{} { + var result = make([]interface{}, 0) + for index, data := range array { + if iterator(data, index) { + result = append(result, data) + } + } + return result +} + +// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. +func Count(array []interface{}, iterator ConditionIterator) int { + count := 0 + for index, data := range array { + if iterator(data, index) { + count = count + 1 + } + } + return count +} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go new file mode 100644 index 00000000000..cf1e5d569ba --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/converter.go @@ -0,0 +1,64 @@ +package govalidator + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// ToString convert the input to a string. +func ToString(obj interface{}) string { + res := fmt.Sprintf("%v", obj) + return string(res) +} + +// ToJSON convert the input to a valid JSON string +func ToJSON(obj interface{}) (string, error) { + res, err := json.Marshal(obj) + if err != nil { + res = []byte("") + } + return string(res), err +} + +// ToFloat convert the input string to a float, or 0.0 if the input is not a float. +func ToFloat(str string) (float64, error) { + res, err := strconv.ParseFloat(str, 64) + if err != nil { + res = 0.0 + } + return res, err +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + default: + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + + return +} + +// ToBoolean convert the input string to a boolean. +func ToBoolean(str string) (bool, error) { + return strconv.ParseBool(str) +} diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go new file mode 100644 index 00000000000..55dce62dc8c --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/doc.go @@ -0,0 +1,3 @@ +package govalidator + +// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go new file mode 100644 index 00000000000..1da2336f47e --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/error.go @@ -0,0 +1,47 @@ +package govalidator + +import ( + "sort" + "strings" +) + +// Errors is an array of multiple errors and conforms to the error interface. +type Errors []error + +// Errors returns itself. +func (es Errors) Errors() []error { + return es +} + +func (es Errors) Error() string { + var errs []string + for _, e := range es { + errs = append(errs, e.Error()) + } + sort.Strings(errs) + return strings.Join(errs, ";") +} + +// Error encapsulates a name, an error and whether there's a custom error message or not. +type Error struct { + Name string + Err error + CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string + Path []string +} + +func (e Error) Error() string { + if e.CustomErrorMessageExists { + return e.Err.Error() + } + + errName := e.Name + if len(e.Path) > 0 { + errName = strings.Join(append(e.Path, e.Name), ".") + } + + return errName + ": " + e.Err.Error() +} diff --git a/vendor/github.com/asaskevich/govalidator/go.mod b/vendor/github.com/asaskevich/govalidator/go.mod new file mode 100644 index 00000000000..c1ce891dfa0 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/go.mod @@ -0,0 +1,3 @@ +module github.com/asaskevich/govalidator + +go 1.12 diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go new file mode 100644 index 00000000000..7e6c652e140 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/numerics.go @@ -0,0 +1,97 @@ +package govalidator + +import ( + "math" + "reflect" +) + +// Abs returns absolute value of number +func Abs(value float64) float64 { + return math.Abs(value) +} + +// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise +func Sign(value float64) float64 { + if value > 0 { + return 1 + } else if value < 0 { + return -1 + } else { + return 0 + } +} + +// IsNegative returns true if value < 0 +func IsNegative(value float64) bool { + return value < 0 +} + +// IsPositive returns true if value > 0 +func IsPositive(value float64) bool { + return value > 0 +} + +// IsNonNegative returns true if value >= 0 +func IsNonNegative(value float64) bool { + return value >= 0 +} + +// IsNonPositive returns true if value <= 0 +func IsNonPositive(value float64) bool { + return value <= 0 +} + +// InRange returns true if value lies between left and right border +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 + } + return value64 >= left64 && value64 <= right64 +} + +// InRange returns true if value lies between left and right border +func InRangeFloat32(value, left, right float32) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border +func InRangeFloat64(value, left, right float64) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type +func InRange(value interface{}, left interface{}, right interface{}) bool { + + reflectValue := reflect.TypeOf(value).Kind() + reflectLeft := reflect.TypeOf(left).Kind() + reflectRight := reflect.TypeOf(right).Kind() + + if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int { + return InRangeInt(value.(int), left.(int), right.(int)) + } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 { + return InRangeFloat32(value.(float32), left.(float32), right.(float32)) + } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 { + return InRangeFloat64(value.(float64), left.(float64), right.(float64)) + } else { + return false + } +} + +// IsWhole returns true if value is whole number +func IsWhole(value float64) bool { + return math.Remainder(value, 1) == 0 +} + +// IsNatural returns true if value is natural number (positive and whole) +func IsNatural(value float64) bool { + return IsWhole(value) && IsPositive(value) +} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go new file mode 100644 index 00000000000..e55451cff57 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -0,0 +1,105 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" + IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" +) + +// Used by IsFilePath func +const ( + // Unknown is unresolved OS type + Unknown = iota + // Win is Windows type + Win + // Unix is *nix OS types + Unix +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxMagnetURI = regexp.MustCompile(MagnetURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) + rxIMEI = regexp.MustCompile(IMEI) +) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go new file mode 100644 index 00000000000..b57b666f57c --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/types.go @@ -0,0 +1,653 @@ +package govalidator + +import ( + "reflect" + "regexp" + "sort" + "sync" +) + +// Validator is a wrapper for a validator function that returns bool and accepts string. +type Validator func(str string) bool + +// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. +// The second parameter should be the context (in the case of validating a struct: the whole object being validated). +type CustomTypeValidator func(i interface{}, o interface{}) bool + +// ParamValidator is a wrapper for validator functions that accepts additional parameters. +type ParamValidator func(str string, params ...string) bool +type InterfaceParamValidator func(in interface{}, params ...string) bool +type tagOptionsMap map[string]tagOption + +func (t tagOptionsMap) orderedKeys() []string { + var keys []string + for k := range t { + keys = append(keys, k) + } + + sort.Slice(keys, func(a, b int) bool { + return t[keys[a]].order < t[keys[b]].order + }) + + return keys +} + +type tagOption struct { + name string + customErrorMessage string + order int +} + +// UnsupportedTypeError is a wrapper for reflect.Type +type UnsupportedTypeError struct { + Type reflect.Type +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflect.Value + +// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value +var InterfaceParamTagMap = map[string]InterfaceParamValidator{ + "type": IsType, +} + +// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. +var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ + "type": regexp.MustCompile(`^type\((.*)\)$`), +} + +// ParamTagMap is a map of functions accept variants parameters +var ParamTagMap = map[string]ParamValidator{ + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": IsInRaw, + "rsapub": IsRsaPub, + "minstringlength": MinStringLength, + "maxstringlength": MaxStringLength, +} + +// ParamTagRegexMap maps param tags to their respective regexes. +var ParamTagRegexMap = map[string]*regexp.Regexp{ + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), + "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), + "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), +} + +type customTypeTagMap struct { + validators map[string]CustomTypeValidator + + sync.RWMutex +} + +func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { + tm.RLock() + defer tm.RUnlock() + v, ok := tm.validators[name] + return v, ok +} + +func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { + tm.Lock() + defer tm.Unlock() + tm.validators[name] = ctv +} + +// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. +// Use this to validate compound or custom types that need to be handled as a whole, e.g. +// `type UUID [16]byte` (this would be handled as an array of bytes). +var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} + +// TagMap is a map of functions, that can be used as tags for ValidateStruct function. +var TagMap = map[string]Validator{ + "email": IsEmail, + "url": IsURL, + "dialstring": IsDialString, + "requrl": IsRequestURL, + "requri": IsRequestURI, + "alpha": IsAlpha, + "utfletter": IsUTFLetter, + "alphanum": IsAlphanumeric, + "utfletternum": IsUTFLetterNumeric, + "numeric": IsNumeric, + "utfnumeric": IsUTFNumeric, + "utfdigit": IsUTFDigit, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHexcolor, + "rgbcolor": IsRGBcolor, + "lowercase": IsLowerCase, + "uppercase": IsUpperCase, + "int": IsInt, + "float": IsFloat, + "null": IsNull, + "notnull": IsNotNull, + "uuid": IsUUID, + "uuidv3": IsUUIDv3, + "uuidv4": IsUUIDv4, + "uuidv5": IsUUIDv5, + "creditcard": IsCreditCard, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "json": IsJSON, + "multibyte": IsMultibyte, + "ascii": IsASCII, + "printableascii": IsPrintableASCII, + "fullwidth": IsFullWidth, + "halfwidth": IsHalfWidth, + "variablewidth": IsVariableWidth, + "base64": IsBase64, + "datauri": IsDataURI, + "ip": IsIP, + "port": IsPort, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "dns": IsDNSName, + "host": IsHost, + "mac": IsMAC, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "semver": IsSemver, + "rfc3339": IsRFC3339, + "rfc3339WithoutZone": IsRFC3339WithoutZone, + "ISO3166Alpha2": IsISO3166Alpha2, + "ISO3166Alpha3": IsISO3166Alpha3, + "ISO4217": IsISO4217, + "IMEI": IsIMEI, +} + +// ISO3166Entry stores country codes +type ISO3166Entry struct { + EnglishShortName string + FrenchShortName string + Alpha2Code string + Alpha3Code string + Numeric string +} + +//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" +var ISO3166List = []ISO3166Entry{ + {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, + {"Albania", "Albanie (l')", "AL", "ALB", "008"}, + {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, + {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, + {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, + {"Andorra", "Andorre (l')", "AD", "AND", "020"}, + {"Angola", "Angola (l')", "AO", "AGO", "024"}, + {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, + {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, + {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, + {"Australia", "Australie (l')", "AU", "AUS", "036"}, + {"Austria", "Autriche (l')", "AT", "AUT", "040"}, + {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, + {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, + {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, + {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, + {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, + {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, + {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, + {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, + {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, + {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, + {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, + {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, + {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, + {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, + {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, + {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, + {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, + {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, + {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, + {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, + {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, + {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, + {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, + {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, + {"Canada", "Canada (le)", "CA", "CAN", "124"}, + {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, + {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, + {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, + {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, + {"Chad", "Tchad (le)", "TD", "TCD", "148"}, + {"Chile", "Chili (le)", "CL", "CHL", "152"}, + {"China", "Chine (la)", "CN", "CHN", "156"}, + {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, + {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, + {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, + {"Colombia", "Colombie (la)", "CO", "COL", "170"}, + {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, + {"Mayotte", "Mayotte", "YT", "MYT", "175"}, + {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, + {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, + {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, + {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, + {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, + {"Cuba", "Cuba", "CU", "CUB", "192"}, + {"Cyprus", "Chypre", "CY", "CYP", "196"}, + {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, + {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, + {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, + {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, + {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, + {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, + {"El Salvador", "El Salvador", "SV", "SLV", "222"}, + {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, + {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, + {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, + {"Estonia", "Estonie (l')", "EE", "EST", "233"}, + {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, + {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, + {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, + {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, + {"Finland", "Finlande (la)", "FI", "FIN", "246"}, + {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, + {"France", "France (la)", "FR", "FRA", "250"}, + {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, + {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, + {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, + {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, + {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, + {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, + {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, + {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, + {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, + {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, + {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, + {"Kiribati", "Kiribati", "KI", "KIR", "296"}, + {"Greece", "Grèce (la)", "GR", "GRC", "300"}, + {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, + {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, + {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, + {"Guam", "Guam", "GU", "GUM", "316"}, + {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, + {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, + {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, + {"Haiti", "Haïti", "HT", "HTI", "332"}, + {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, + {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, + {"Honduras", "Honduras (le)", "HN", "HND", "340"}, + {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, + {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, + {"Iceland", "Islande (l')", "IS", "ISL", "352"}, + {"India", "Inde (l')", "IN", "IND", "356"}, + {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, + {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, + {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, + {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, + {"Israel", "Israël", "IL", "ISR", "376"}, + {"Italy", "Italie (l')", "IT", "ITA", "380"}, + {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, + {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, + {"Japan", "Japon (le)", "JP", "JPN", "392"}, + {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, + {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, + {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, + {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, + {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, + {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, + {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, + {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, + {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, + {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, + {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, + {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, + {"Libya", "Libye (la)", "LY", "LBY", "434"}, + {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, + {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, + {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, + {"Macao", "Macao", "MO", "MAC", "446"}, + {"Madagascar", "Madagascar", "MG", "MDG", "450"}, + {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, + {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, + {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, + {"Mali", "Mali (le)", "ML", "MLI", "466"}, + {"Malta", "Malte", "MT", "MLT", "470"}, + {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, + {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, + {"Mauritius", "Maurice", "MU", "MUS", "480"}, + {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, + {"Monaco", "Monaco", "MC", "MCO", "492"}, + {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, + {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, + {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, + {"Montserrat", "Montserrat", "MS", "MSR", "500"}, + {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, + {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, + {"Oman", "Oman", "OM", "OMN", "512"}, + {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, + {"Nauru", "Nauru", "NR", "NRU", "520"}, + {"Nepal", "Népal (le)", "NP", "NPL", "524"}, + {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, + {"Curaçao", "Curaçao", "CW", "CUW", "531"}, + {"Aruba", "Aruba", "AW", "ABW", "533"}, + {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, + {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, + {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, + {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, + {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, + {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, + {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, + {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, + {"Niue", "Niue", "NU", "NIU", "570"}, + {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, + {"Norway", "Norvège (la)", "NO", "NOR", "578"}, + {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, + {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, + {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, + {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, + {"Palau", "Palaos (les)", "PW", "PLW", "585"}, + {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, + {"Panama", "Panama (le)", "PA", "PAN", "591"}, + {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, + {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, + {"Peru", "Pérou (le)", "PE", "PER", "604"}, + {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, + {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, + {"Poland", "Pologne (la)", "PL", "POL", "616"}, + {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, + {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, + {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, + {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, + {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, + {"Réunion", "Réunion (La)", "RE", "REU", "638"}, + {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, + {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, + {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, + {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, + {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, + {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, + {"Anguilla", "Anguilla", "AI", "AIA", "660"}, + {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, + {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, + {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, + {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, + {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, + {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, + {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, + {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, + {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, + {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, + {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, + {"Singapore", "Singapour", "SG", "SGP", "702"}, + {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, + {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, + {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, + {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, + {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, + {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, + {"Spain", "Espagne (l')", "ES", "ESP", "724"}, + {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, + {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, + {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, + {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, + {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, + {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, + {"Sweden", "Suède (la)", "SE", "SWE", "752"}, + {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, + {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, + {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, + {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, + {"Togo", "Togo (le)", "TG", "TGO", "768"}, + {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, + {"Tonga", "Tonga (les)", "TO", "TON", "776"}, + {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, + {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, + {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, + {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, + {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, + {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, + {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, + {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, + {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, + {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, + {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, + {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, + {"Guernsey", "Guernesey", "GG", "GGY", "831"}, + {"Jersey", "Jersey", "JE", "JEY", "832"}, + {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, + {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, + {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, + {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, + {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, + {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, + {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, + {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, + {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, + {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, + {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, + {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, +} + +// ISO4217List is the list of ISO currency codes +var ISO4217List = []string{ + "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", + "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", + "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", + "DJF", "DKK", "DOP", "DZD", + "EGP", "ERN", "ETB", "EUR", + "FJD", "FKP", + "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", + "HKD", "HNL", "HRK", "HTG", "HUF", + "IDR", "ILS", "INR", "IQD", "IRR", "ISK", + "JMD", "JOD", "JPY", + "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", + "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", + "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", + "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", + "OMR", + "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", + "QAR", + "RON", "RSD", "RUB", "RWF", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", + "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS", + "VEF", "VND", "VUV", + "WST", + "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", + "YER", + "ZAR", "ZMW", "ZWL", +} + +// ISO693Entry stores ISO language codes +type ISO693Entry struct { + Alpha3bCode string + Alpha2Code string + English string +} + +//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json +var ISO693List = []ISO693Entry{ + {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, + {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, + {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, + {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, + {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, + {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, + {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, + {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, + {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, + {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, + {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, + {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, + {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, + {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, + {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, + {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, + {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, + {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, + {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, + {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, + {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, + {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, + {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, + {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, + {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, + {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, + {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, + {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, + {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, + {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, + {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, + {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, + {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, + {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, + {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, + {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, + {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, + {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, + {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, + {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, + {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, + {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, + {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, + {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, + {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, + {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, + {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, + {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, + {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, + {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, + {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, + {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, + {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, + {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, + {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, + {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, + {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, + {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, + {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, + {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, + {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, + {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, + {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, + {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, + {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, + {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, + {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, + {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, + {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, + {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, + {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, + {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, + {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, + {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, + {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, + {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, + {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, + {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, + {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, + {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, + {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, + {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, + {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, + {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, + {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, + {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, + {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, + {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, + {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, + {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, + {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, + {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, + {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, + {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, + {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, + {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, + {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, + {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, + {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, + {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, + {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, + {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, + {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, + {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, + {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, + {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, + {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, + {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, + {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, + {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, + {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, + {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, + {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, + {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, + {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, + {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, + {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, + {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, + {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, + {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, + {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, + {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, + {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, + {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, + {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, + {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, + {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, + {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, + {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, + {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, + {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, + {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, + {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, + {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, + {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, + {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, + {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, + {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, + {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, + {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, + {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, + {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, + {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, + {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, + {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, + {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, + {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, + {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, + {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, + {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, + {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, + {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, + {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, + {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, + {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, + {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, + {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, + {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, + {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, + {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, + {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, + {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, + {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, + {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, + {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, + {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, + {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, + {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, + {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, + {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, + {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, + {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, + {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, + {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, + {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, + {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, + {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, + {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, + {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, + {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, + {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, + {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, + {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, + {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, +} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go new file mode 100644 index 00000000000..f4c30f824a2 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/utils.go @@ -0,0 +1,270 @@ +package govalidator + +import ( + "errors" + "fmt" + "html" + "math" + "path" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Contains checks if the string contains the substring. +func Contains(str, substring string) bool { + return strings.Contains(str, substring) +} + +// Matches checks if string matches the pattern (pattern is regular expression) +// In case of error return false +func Matches(str, pattern string) bool { + match, _ := regexp.MatchString(pattern, str) + return match +} + +// LeftTrim trims characters from the left side of the input. +// If second argument is empty, it will remove leading spaces. +func LeftTrim(str, chars string) string { + if chars == "" { + return strings.TrimLeftFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("^[" + chars + "]+") + return r.ReplaceAllString(str, "") +} + +// RightTrim trims characters from the right side of the input. +// If second argument is empty, it will remove trailing spaces. +func RightTrim(str, chars string) string { + if chars == "" { + return strings.TrimRightFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("[" + chars + "]+$") + return r.ReplaceAllString(str, "") +} + +// Trim trims characters from both sides of the input. +// If second argument is empty, it will remove spaces. +func Trim(str, chars string) string { + return LeftTrim(RightTrim(str, chars), chars) +} + +// WhiteList removes characters that do not appear in the whitelist. +func WhiteList(str, chars string) string { + pattern := "[^" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// BlackList removes characters that appear in the blacklist. +func BlackList(str, chars string) string { + pattern := "[" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. +// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). +func StripLow(str string, keepNewLines bool) string { + chars := "" + if keepNewLines { + chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" + } else { + chars = "\x00-\x1F\x7F" + } + return BlackList(str, chars) +} + +// ReplacePattern replaces regular expression pattern in string +func ReplacePattern(str, pattern, replace string) string { + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, replace) +} + +// Escape replaces <, >, & and " with HTML entities. +var Escape = html.EscapeString + +func addSegment(inrune, segment []rune) []rune { + if len(segment) == 0 { + return inrune + } + if len(inrune) != 0 { + inrune = append(inrune, '_') + } + inrune = append(inrune, segment...) + return inrune +} + +// UnderscoreToCamelCase converts from underscore separated form to camel case form. +// Ex.: my_func => MyFunc +func UnderscoreToCamelCase(s string) string { + return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) +} + +// CamelCaseToUnderscore converts from camel case form to underscore separated form. +// Ex.: MyFunc => my_func +func CamelCaseToUnderscore(str string) string { + var output []rune + var segment []rune + for _, r := range str { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { + output = addSegment(output, segment) + segment = nil + } + segment = append(segment, unicode.ToLower(r)) + } + output = addSegment(output, segment) + return string(output) +} + +// Reverse returns reversed string +func Reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} + +// GetLines splits string by "\n" and return array of lines +func GetLines(s string) []string { + return strings.Split(s, "\n") +} + +// GetLine returns specified line of multiline string +func GetLine(s string, index int) (string, error) { + lines := GetLines(s) + if index < 0 || index >= len(lines) { + return "", errors.New("line index out of bounds") + } + return lines[index], nil +} + +// RemoveTags removes all tags from HTML string +func RemoveTags(s string) string { + return ReplacePattern(s, "<[^>]*>", "") +} + +// SafeFileName returns safe string that can be used in file names +func SafeFileName(str string) string { + name := strings.ToLower(str) + name = path.Clean(path.Base(name)) + name = strings.Trim(name, " ") + separators, err := regexp.Compile(`[ &_=+:]`) + if err == nil { + name = separators.ReplaceAllString(name, "-") + } + legal, err := regexp.Compile(`[^[:alnum:]-.]`) + if err == nil { + name = legal.ReplaceAllString(name, "") + } + for strings.Contains(name, "--") { + name = strings.Replace(name, "--", "-", -1) + } + return name +} + +// NormalizeEmail canonicalize an email address. +// The local part of the email address is lowercased for all domains; the hostname is always lowercased and +// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). +// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and +// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are +// normalized to @gmail.com. +func NormalizeEmail(str string) (string, error) { + if !IsEmail(str) { + return "", fmt.Errorf("%s is not an email", str) + } + parts := strings.Split(str, "@") + parts[0] = strings.ToLower(parts[0]) + parts[1] = strings.ToLower(parts[1]) + if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { + parts[1] = "gmail.com" + parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] + } + return strings.Join(parts, "@"), nil +} + +// Truncate a string to the closest length without breaking words. +func Truncate(str string, length int, ending string) string { + var aftstr, befstr string + if len(str) > length { + words := strings.Fields(str) + before, present := 0, 0 + for i := range words { + befstr = aftstr + before = present + aftstr = aftstr + words[i] + " " + present = len(aftstr) + if present > length && i != 0 { + if (length - before) < (present - length) { + return Trim(befstr, " /\\.,\"'#!?&@+-") + ending + } + return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending + } + } + } + + return str +} + +// PadLeft pads left side of a string if size of string is less then indicated pad length +func PadLeft(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, false) +} + +// PadRight pads right side of a string if size of string is less then indicated pad length +func PadRight(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, false, true) +} + +// PadBoth pads both sides of a string if size of string is less then indicated pad length +func PadBoth(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, true) +} + +// PadString either left, right or both sides. +// Note that padding string can be unicode and more then one character +func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { + + // When padded length is less then the current string size + if padLen < utf8.RuneCountInString(str) { + return str + } + + padLen -= utf8.RuneCountInString(str) + + targetLen := padLen + + targetLenLeft := targetLen + targetLenRight := targetLen + if padLeft && padRight { + targetLenLeft = padLen / 2 + targetLenRight = padLen - targetLenLeft + } + + strToRepeatLen := utf8.RuneCountInString(padStr) + + repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) + repeatedString := strings.Repeat(padStr, repeatTimes) + + leftSide := "" + if padLeft { + leftSide = repeatedString[0:targetLenLeft] + } + + rightSide := "" + if padRight { + rightSide = repeatedString[0:targetLenRight] + } + + return leftSide + str + rightSide +} + +// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object +func TruncatingErrorf(str string, args ...interface{}) error { + n := strings.Count(str, "%s") + return fmt.Errorf(str, args[:n]...) +} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go new file mode 100644 index 00000000000..298f9920d38 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -0,0 +1,1530 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +var ( + fieldsRequiredByDefault bool + nilPtrAllowedByRequired = false + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + paramsRegexp = regexp.MustCompile(`\(.*\)$`) +) + +const maxURLRuneCount = 2083 +const minURLRuneCount = 3 +const RF3339WithoutZone = "2006-01-02T15:04:05" + +// SetFieldsRequiredByDefault causes validation to fail when struct fields +// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). +// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +// type exampleStruct struct { +// Name string `` +// Email string `valid:"email"` +// This, however, will only fail when Email is empty or an invalid email address: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email"` +// Lastly, this will only fail when Email is an invalid email address but not when it's empty: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email,optional"` +func SetFieldsRequiredByDefault(value bool) { + fieldsRequiredByDefault = value +} + +// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. +// The validation will still reject ptr fields in their zero value state. Example with this enabled: +// type exampleStruct struct { +// Name *string `valid:"required"` +// With `Name` set to "", this will be considered invalid input and will cause a validation error. +// With `Name` set to nil, this will be considered valid by validation. +// By default this is disabled. +func SetNilPtrAllowedByRequired(value bool) { + nilPtrAllowedByRequired = value +} + +// IsEmail check if the string is an email. +func IsEmail(str string) bool { + // TODO uppercase letters are not supported + return rxEmail.MatchString(str) +} + +// IsExistingEmail check if the string is an email of existing domain +func IsExistingEmail(email string) bool { + + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true +} + +// IsURL check if the string is an URL. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} + +// IsRequestURL check if the string rawurl, assuming +// it was received in an HTTP request, is a valid +// URL confirm to RFC 3986 +func IsRequestURL(rawurl string) bool { + url, err := url.ParseRequestURI(rawurl) + if err != nil { + return false //Couldn't even parse the rawurl + } + if len(url.Scheme) == 0 { + return false //No Scheme found + } + return true +} + +// IsRequestURI check if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid. +func IsAlpha(str string) bool { + if IsNull(str) { + return true + } + return rxAlpha.MatchString(str) +} + +//IsUTFLetter check if the string contains only unicode letter characters. +//Similar to IsAlpha but for all languages. Empty string is valid. +func IsUTFLetter(str string) bool { + if IsNull(str) { + return true + } + + for _, c := range str { + if !unicode.IsLetter(c) { + return false + } + } + return true + +} + +// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid. +func IsAlphanumeric(str string) bool { + if IsNull(str) { + return true + } + return rxAlphanumeric.MatchString(str) +} + +// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid. +func IsUTFLetterNumeric(str string) bool { + if IsNull(str) { + return true + } + for _, c := range str { + if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok + return false + } + } + return true + +} + +// IsNumeric check if the string contains only numbers. Empty string is valid. +func IsNumeric(str string) bool { + if IsNull(str) { + return true + } + return rxNumeric.MatchString(str) +} + +// IsUTFNumeric check if the string contains only unicode numbers of any kind. +// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. +func IsUTFNumeric(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsNumber(c) { //numbers && minus sign are ok + return false + } + } + return true + +} + +// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid. +func IsUTFDigit(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsDigit(c) { //digits && minus sign are ok + return false + } + } + return true + +} + +// IsHexadecimal check if the string is a hexadecimal number. +func IsHexadecimal(str string) bool { + return rxHexadecimal.MatchString(str) +} + +// IsHexcolor check if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsLowerCase check if the string is lowercase. Empty string is valid. +func IsLowerCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToLower(str) +} + +// IsUpperCase check if the string is uppercase. Empty string is valid. +func IsUpperCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToUpper(str) +} + +// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase check if the string contains as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + +// IsInt check if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsFloat check if the string is a float. +func IsFloat(str string) bool { + return str != "" && rxFloat.MatchString(str) +} + +// IsDivisibleBy check if the string is a number that's divisible by another. +// If second argument is not valid integer or zero, it's return false. +// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). +func IsDivisibleBy(str, num string) bool { + f, _ := ToFloat(str) + p := int64(f) + q, _ := ToInt(num) + if q == 0 { + return false + } + return (p == 0) || (p%q == 0) +} + +// IsNull check if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} + +// IsNotNull check if the string is not null. +func IsNotNull(str string) bool { + return !IsNull(str) +} + +// HasWhitespaceOnly checks the string only contains whitespace +func HasWhitespaceOnly(str string) bool { + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) +} + +// HasWhitespace checks if the string contains any whitespace +func HasWhitespace(str string) bool { + return len(str) > 0 && rxHasWhitespace.MatchString(str) +} + +// IsByteLength check if the string's length (in bytes) falls in a range. +func IsByteLength(str string, min, max int) bool { + return len(str) >= min && len(str) <= max +} + +// IsUUIDv3 check if the string is a UUID version 3. +func IsUUIDv3(str string) bool { + return rxUUID3.MatchString(str) +} + +// IsUUIDv4 check if the string is a UUID version 4. +func IsUUIDv4(str string) bool { + return rxUUID4.MatchString(str) +} + +// IsUUIDv5 check if the string is a UUID version 5. +func IsUUIDv5(str string) bool { + return rxUUID5.MatchString(str) +} + +// IsUUID check if the string is a UUID (version 3, 4 or 5). +func IsUUID(str string) bool { + return rxUUID.MatchString(str) +} + +// IsCreditCard check if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := notNumberRegexp.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + var sum int64 + var digit string + var tmpNum int64 + var shouldDouble bool + for i := len(sanitized) - 1; i >= 0; i-- { + digit = sanitized[i:(i + 1)] + tmpNum, _ = ToInt(digit) + if shouldDouble { + tmpNum *= 2 + if tmpNum >= 10 { + sum += ((tmpNum % 10) + 1) + } else { + sum += tmpNum + } + } else { + sum += tmpNum + } + shouldDouble = !shouldDouble + } + + return sum%10 == 0 +} + +// IsISBN10 check if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 check if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN check if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be check both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsJSON check if the string is valid JSON (note: uses json.Unmarshal). +func IsJSON(str string) bool { + var js json.RawMessage + return json.Unmarshal([]byte(str), &js) == nil +} + +// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid. +func IsMultibyte(str string) bool { + if IsNull(str) { + return true + } + return rxMultibyte.MatchString(str) +} + +// IsASCII check if the string contains ASCII chars only. Empty string is valid. +func IsASCII(str string) bool { + if IsNull(str) { + return true + } + return rxASCII.MatchString(str) +} + +// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid. +func IsPrintableASCII(str string) bool { + if IsNull(str) { + return true + } + return rxPrintableASCII.MatchString(str) +} + +// IsFullWidth check if the string contains any full-width chars. Empty string is valid. +func IsFullWidth(str string) bool { + if IsNull(str) { + return true + } + return rxFullWidth.MatchString(str) +} + +// IsHalfWidth check if the string contains any half-width chars. Empty string is valid. +func IsHalfWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) +} + +// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid. +func IsVariableWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) +} + +// IsBase64 check if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsFilePath check is a string is Win or Unix file path and returns it's type. +func IsFilePath(str string) (bool, int) { + if rxWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false, Win + } + return true, Win + } else if rxUnixPath.MatchString(str) { + return true, Unix + } + return false, Unknown +} + +// IsDataURI checks if a string is base64 encoded data URI such as an image +func IsDataURI(str string) bool { + dataURI := strings.Split(str, ",") + if !rxDataURI.MatchString(dataURI[0]) { + return false + } + return IsBase64(dataURI[1]) +} + +// IsMagnetURI checks if a string is valid magnet URI +func IsMagnetURI(str string) bool { + return rxMagnetURI.MatchString(str) +} + +// IsISO3166Alpha2 checks if a string is valid two-letter country code +func IsISO3166Alpha2(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO3166Alpha3 checks if a string is valid three-letter country code +func IsISO3166Alpha3(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha3Code { + return true + } + } + return false +} + +// IsISO693Alpha2 checks if a string is valid two-letter language code +func IsISO693Alpha2(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO693Alpha3b checks if a string is valid three-letter language code +func IsISO693Alpha3b(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha3bCode { + return true + } + } + return false +} + +// IsDNSName will validate the given string as a DNS name +func IsDNSName(str string) bool { + if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { + // constraints already violated + return false + } + return !IsIP(str) && rxDNSName.MatchString(str) +} + +// IsHash checks if a string is a hash of type algorithm. +// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] +func IsHash(str string, algorithm string) bool { + var len string + algo := strings.ToLower(algorithm) + + if algo == "crc32" || algo == "crc32b" { + len = "8" + } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { + len = "32" + } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { + len = "40" + } else if algo == "tiger192" { + len = "48" + } else if algo == "sha256" { + len = "64" + } else if algo == "sha384" { + len = "96" + } else if algo == "sha512" { + len = "128" + } else { + return false + } + + return Matches(str, "^[a-f0-9]{"+len+"}$") +} + +// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` +func IsSHA512(str string) bool { + return IsHash(str, "sha512") +} + +// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` +func IsSHA384(str string) bool { + return IsHash(str, "sha384") +} + +// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` +func IsSHA256(str string) bool { + return IsHash(str, "sha256") +} + +// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` +func IsTiger192(str string) bool { + return IsHash(str, "tiger192") +} + +// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` +func IsTiger160(str string) bool { + return IsHash(str, "tiger160") +} + +// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` +func IsRipeMD160(str string) bool { + return IsHash(str, "ripemd160") +} + +// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` +func IsSHA1(str string) bool { + return IsHash(str, "sha1") +} + +// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` +func IsTiger128(str string) bool { + return IsHash(str, "tiger128") +} + +// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` +func IsRipeMD128(str string) bool { + return IsHash(str, "ripemd128") +} + +// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` +func IsCRC32(str string) bool { + return IsHash(str, "crc32") +} + +// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` +func IsCRC32b(str string) bool { + return IsHash(str, "crc32b") +} + +// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` +func IsMD5(str string) bool { + return IsHash(str, "md5") +} + +// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` +func IsMD4(str string) bool { + return IsHash(str, "md4") +} + +// IsDialString validates the given string for usage with the various Dial() functions +func IsDialString(str string) bool { + if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { + return true + } + + return false +} + +// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` +func IsIP(str string) bool { + return net.ParseIP(str) != nil +} + +// IsPort checks if a string represents a valid port +func IsPort(str string) bool { + if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { + return true + } + return false +} + +// IsIPv4 check if the string is an IP version 4. +func IsIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// IsIPv6 check if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6) +func IsCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// IsMAC check if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name +func IsHost(str string) bool { + return IsIP(str) || IsDNSName(str) +} + +// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId. +func IsMongoID(str string) bool { + return rxHexadecimal.MatchString(str) && (len(str) == 24) +} + +// IsLatitude check if a string is valid latitude. +func IsLatitude(str string) bool { + return rxLatitude.MatchString(str) +} + +// IsLongitude check if a string is valid longitude. +func IsLongitude(str string) bool { + return rxLongitude.MatchString(str) +} + +// IsIMEI check if a string is valid IMEI +func IsIMEI(str string) bool { + return rxIMEI.MatchString(str) +} + +// IsRsaPublicKey check if a string is valid public key with provided length +func IsRsaPublicKey(str string, keylen int) bool { + bb := bytes.NewBufferString(str) + pemBytes, err := ioutil.ReadAll(bb) + if err != nil { + return false + } + block, _ := pem.Decode(pemBytes) + if block != nil && block.Type != "PUBLIC KEY" { + return false + } + var der []byte + + if block != nil { + der = block.Bytes + } else { + der, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return false + } + } + + key, err := x509.ParsePKIXPublicKey(der) + if err != nil { + return false + } + pubkey, ok := key.(*rsa.PublicKey) + if !ok { + return false + } + bitlen := len(pubkey.N.Bytes()) * 8 + return bitlen == int(keylen) +} + +func toJSONName(tag string) string { + if tag == "" { + return "" + } + + // JSON name always comes first. If there's no options then split[0] is + // JSON name, if JSON name is not set, then split[0] is an empty string. + split := strings.SplitN(tag, ",", 2) + + name := split[0] + + // However it is possible that the field is skipped when + // (de-)serializing from/to JSON, in which case assume that there is no + // tag name to use + if name == "-" { + return "" + } + return name +} + +func PrependPathToErrors(err error, path string) error { + switch err2 := err.(type) { + case Error: + err2.Path = append([]string{path}, err2.Path...) + return err2 + case Errors: + errors := err2.Errors() + for i, err3 := range errors { + errors[i] = PrependPathToErrors(err3, path) + } + return err2 + } + return err +} + +// ValidateMap use validation map for fields. +// result will be equal to `false` if there are any errors. +// s is the map containing the data to be validated. +// m is the validation map in the form: +// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} +func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + var errs Errors + var index int + val := reflect.ValueOf(s) + for key, value := range s { + presentResult := true + validator, ok := m[key] + if !ok { + presentResult = false + var err error + err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + valueField := reflect.ValueOf(value) + mapResult := true + typeResult := true + structResult := true + resultField := true + switch subValidator := validator.(type) { + case map[string]interface{}: + var err error + if v, ok := value.(map[string]interface{}); !ok { + mapResult = false + err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } else { + mapResult, err = ValidateMap(v, subValidator) + if err != nil { + mapResult = false + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + } + case string: + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + subValidator != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + } + resultField, err = typeCheck(valueField, reflect.StructField{ + Name: key, + PkgPath: "", + Type: val.Type(), + Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), + Offset: 0, + Index: []int{index}, + Anonymous: false, + }, val, nil) + if err != nil { + errs = append(errs, err) + } + case nil: + // already handlerd when checked before + default: + typeResult = false + err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + result = result && presentResult && typeResult && resultField && structResult && mapResult + index++ + } + // check required keys + requiredResult := true + for key, value := range m { + if schema, ok := value.(string); ok { + tags := parseTagIntoMap(schema) + if required, ok := tags["required"]; ok { + if _, ok := s[key]; !ok { + requiredResult = false + if required.customErrorMessage != "" { + err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} + } else { + err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} + } + errs = append(errs, err) + } + } + } + } + + if len(errs) > 0 { + err = errs + } + return result && requiredResult, err +} + +// ValidateStruct use tags for fields. +// result will be equal to `false` if there are any errors. +// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) +func ValidateStruct(s interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + val := reflect.ValueOf(s) + if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + val = val.Elem() + } + // we only accept structs + if val.Kind() != reflect.Struct { + return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) + } + var errs Errors + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + if typeField.PkgPath != "" { + continue // Private field + } + structResult := true + if valueField.Kind() == reflect.Interface { + valueField = valueField.Elem() + } + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = PrependPathToErrors(err, typeField.Name) + errs = append(errs, err) + } + } + resultField, err2 := typeCheck(valueField, typeField, val, nil) + if err2 != nil { + + // Replace structure name with JSON name if there is a tag on the variable + jsonTag := toJSONName(typeField.Tag.Get("json")) + if jsonTag != "" { + switch jsonError := err2.(type) { + case Error: + jsonError.Name = jsonTag + err2 = jsonError + case Errors: + for i2, err3 := range jsonError { + switch customErr := err3.(type) { + case Error: + customErr.Name = jsonTag + jsonError[i2] = customErr + } + } + + err2 = jsonError + } + } + + errs = append(errs, err2) + } + result = result && resultField && structResult + } + if len(errs) > 0 { + err = errs + } + return result, err +} + +// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} +func parseTagIntoMap(tag string) tagOptionsMap { + optionsMap := make(tagOptionsMap) + options := strings.Split(tag, ",") + + for i, option := range options { + option = strings.TrimSpace(option) + + validationOptions := strings.Split(option, "~") + if !isValidTag(validationOptions[0]) { + continue + } + if len(validationOptions) == 2 { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} + } else { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} + } + } + return optionsMap +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// IsSemver check if string is valid semantic version +func IsSemver(str string) bool { + return rxSemver.MatchString(str) +} + +// IsType check if interface is of some type +func IsType(v interface{}, params ...string) bool { + if len(params) == 1 { + typ := params[0] + return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) + } + return false +} + +// IsTime check if string is valid according to given format +func IsTime(str string, format string) bool { + _, err := time.Parse(format, str) + return err == nil +} + +// IsUnixTime check if string is valid unix timestamp value +func IsUnixTime(str string) bool { + if _, err := strconv.Atoi(str); err == nil { + return true + } + return false +} + +// IsRFC3339 check if string is valid timestamp value according to RFC3339 +func IsRFC3339(str string) bool { + return IsTime(str, time.RFC3339) +} + +// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone. +func IsRFC3339WithoutZone(str string) bool { + return IsTime(str, RF3339WithoutZone) +} + +// IsISO4217 check if string is valid ISO currency code +func IsISO4217(str string) bool { + for _, currency := range ISO4217List { + if str == currency { + return true + } + } + + return false +} + +// ByteLength check string's length +func ByteLength(str string, params ...string) bool { + if len(params) == 2 { + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return len(str) >= int(min) && len(str) <= int(max) + } + + return false +} + +// RuneLength check string's length +// Alias for StringLength +func RuneLength(str string, params ...string) bool { + return StringLength(str, params...) +} + +// IsRsaPub check whether string is valid RSA key +// Alias for IsRsaPublicKey +func IsRsaPub(str string, params ...string) bool { + if len(params) == 1 { + len, _ := ToInt(params[0]) + return IsRsaPublicKey(str, int(len)) + } + + return false +} + +// StringMatches checks if a string matches a given pattern. +func StringMatches(s string, params ...string) bool { + if len(params) == 1 { + pattern := params[0] + return Matches(s, pattern) + } + return false +} + +// StringLength check string's length (including multi byte strings) +func StringLength(str string, params ...string) bool { + + if len(params) == 2 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return strLength >= int(min) && strLength <= int(max) + } + + return false +} + +// MinStringLength check string's minimum length (including multi byte strings) +func MinStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + return strLength >= int(min) + } + + return false +} + +// MaxStringLength check string's maximum length (including multi byte strings) +func MaxStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + max, _ := ToInt(params[0]) + return strLength <= int(max) + } + + return false +} + +// Range check string's length +func Range(str string, params ...string) bool { + if len(params) == 2 { + value, _ := ToFloat(str) + min, _ := ToFloat(params[0]) + max, _ := ToFloat(params[1]) + return InRange(value, min, max) + } + + return false +} + +func IsInRaw(str string, params ...string) bool { + if len(params) == 1 { + rawParams := params[0] + + parsedParams := strings.Split(rawParams, "|") + + return IsIn(str, parsedParams...) + } + + return false +} + +// IsIn check if string str is a member of the set of strings params +func IsIn(str string, params ...string) bool { + for _, param := range params { + if str == param { + return true + } + } + + return false +} + +func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { + if nilPtrAllowedByRequired { + k := v.Kind() + if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { + return true, nil + } + } + + if requiredOption, isRequired := options["required"]; isRequired { + if len(requiredOption.customErrorMessage) > 0 { + return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} + } + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} + } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} + } + // not required and empty is valid + return true, nil +} + +func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { + if !v.IsValid() { + return false, nil + } + + tag := t.Tag.Get(tagName) + + // Check if the field should be ignored + switch tag { + case "": + if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { + if !fieldsRequiredByDefault { + return true, nil + } + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} + } + case "-": + return true, nil + } + + isRootType := false + if options == nil { + isRootType = true + options = parseTagIntoMap(tag) + } + + if !isFieldSet(v) { + // an empty value is not validated, check only required + isValid, resultErr = checkRequired(v, t, options) + for key := range options { + delete(options, key) + } + return isValid, resultErr + } + + var customTypeErrors Errors + optionsOrder := options.orderedKeys() + for _, validatorName := range optionsOrder { + validatorStruct := options[validatorName] + if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { + delete(options, validatorName) + + if result := validatefunc(v.Interface(), o.Interface()); !result { + if len(validatorStruct.customErrorMessage) > 0 { + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) + continue + } + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) + } + } + } + + if len(customTypeErrors.Errors()) > 0 { + return false, customTypeErrors + } + + if isRootType { + // Ensure that we've checked the value by all specified validators before report that the value is valid + defer func() { + delete(options, "optional") + delete(options, "required") + + if isValid && resultErr == nil && len(options) != 0 { + optionsOrder := options.orderedKeys() + for _, validator := range optionsOrder { + isValid = false + resultErr = Error{t.Name, fmt.Errorf( + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} + return + } + } + }() + } + + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // Check whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // Check for interface param validators + for key, value := range InterfaceParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := InterfaceParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + field := fmt.Sprint(v) + if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + } + } + + switch v.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.String: + // for each tag option check the map of validator functions + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // Check whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // Check for param validators + for key, value := range ParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := ParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + // type not yet supported, fail + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} + } + } + + if validatefunc, ok := TagMap[validator]; ok { + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field); !result && !negate || result && negate { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + //Not Yet Supported Types (Fail here!) + err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) + return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} + } + } + } + return true, nil + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return false, &UnsupportedTypeError{v.Type()} + } + var sv stringValues + sv = v.MapKeys() + sort.Sort(sv) + result := true + for i, k := range sv { + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Slice, reflect.Array: + result := true + for i := 0; i < v.Len(); i++ { + var resultItem bool + var err error + if v.Index(i).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.Index(i), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.Index(i).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Interface: + // If the value is an interface then encode its element + if v.IsNil() { + return true, nil + } + return ValidateStruct(v.Interface()) + case reflect.Ptr: + // If the value is a pointer then check its element + if v.IsNil() { + return true, nil + } + return typeCheck(v.Elem(), t, o, options) + case reflect.Struct: + return true, nil + default: + return false, &UnsupportedTypeError{v.Type()} + } +} + +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + +// isFieldSet returns false for nil pointers, interfaces, maps, and slices. For all other values, it returns true. +func isFieldSet(v reflect.Value) bool { + switch v.Kind() { + case reflect.Map, reflect.Slice, reflect.Interface, reflect.Ptr: + return !v.IsNil() + } + + return true +} + +// ErrorByField returns error for specified field of the struct +// validated by ValidateStruct or empty string if there are no errors +// or this field doesn't exists or doesn't have any errors. +func ErrorByField(e error, field string) string { + if e == nil { + return "" + } + return ErrorsByField(e)[field] +} + +// ErrorsByField returns map of errors of the struct validated +// by ValidateStruct or empty map if there are no errors. +func ErrorsByField(e error) map[string]string { + m := make(map[string]string) + if e == nil { + return m + } + // prototype for ValidateStruct + + switch e := e.(type) { + case Error: + m[e.Name] = e.Err.Error() + case Errors: + for _, item := range e.Errors() { + n := ErrorsByField(item) + for k, v := range n { + m[k] = v + } + } + } + + return m +} + +// Error returns string equivalent for reflect.Type +func (e *UnsupportedTypeError) Error() string { + return "validator: unsupported type: " + e.Type.String() +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml new file mode 100644 index 00000000000..bc5f7b0864b --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -0,0 +1,15 @@ +box: golang +build: + steps: + - setup-go-workspace + + - script: + name: go get + code: | + go version + go get -t ./... + + - script: + name: go test + code: | + go test -race -v ./... diff --git a/vendor/github.com/avast/retry-go/.gitignore b/vendor/github.com/avast/retry-go/.gitignore new file mode 100644 index 00000000000..c40eb23f950 --- /dev/null +++ b/vendor/github.com/avast/retry-go/.gitignore @@ -0,0 +1,21 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# dep +vendor/ +Gopkg.lock + +# cover +coverage.txt diff --git a/vendor/github.com/avast/retry-go/.godocdown.tmpl b/vendor/github.com/avast/retry-go/.godocdown.tmpl new file mode 100644 index 00000000000..6873edf8eb3 --- /dev/null +++ b/vendor/github.com/avast/retry-go/.godocdown.tmpl @@ -0,0 +1,37 @@ +# {{ .Name }} + +[![Release](https://img.shields.io/github/release/avast/retry-go.svg?style=flat-square)](https://github.com/avast/retry-go/releases/latest) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square)](LICENSE.md) +[![Travis](https://img.shields.io/travis/avast/retry-go.svg?style=flat-square)](https://travis-ci.org/avast/retry-go) +[![AppVeyor](https://ci.appveyor.com/api/projects/status/fieg9gon3qlq0a9a?svg=true)](https://ci.appveyor.com/project/JaSei/retry-go) +[![Go Report Card](https://goreportcard.com/badge/github.com/avast/retry-go?style=flat-square)](https://goreportcard.com/report/github.com/avast/retry-go) +[![GoDoc](https://godoc.org/github.com/avast/retry-go?status.svg&style=flat-square)](http://godoc.org/github.com/avast/retry-go) +[![codecov.io](https://codecov.io/github/avast/retry-go/coverage.svg?branch=master)](https://codecov.io/github/avast/retry-go?branch=master) +[![Sourcegraph](https://sourcegraph.com/github.com/avast/retry-go/-/badge.svg)](https://sourcegraph.com/github.com/avast/retry-go?badge) + +{{ .EmitSynopsis }} + +{{ .EmitUsage }} + +## Contributing + +Contributions are very much welcome. + +### Makefile + +Makefile provides several handy rules, like README.md `generator` , `setup` for prepare build/dev environment, `test`, `cover`, etc... + +Try `make help` for more information. + +### Before pull request + +please try: +* run tests (`make test`) +* run linter (`make lint`) +* if your IDE don't automaticaly do `go fmt`, run `go fmt` (`make fmt`) + +### README + +README.md are generate from template [.godocdown.tmpl](.godocdown.tmpl) and code documentation via [godocdown](https://github.com/robertkrimen/godocdown). + +Never edit README.md direct, because your change will be lost. diff --git a/vendor/github.com/avast/retry-go/.travis.yml b/vendor/github.com/avast/retry-go/.travis.yml new file mode 100644 index 00000000000..ae3e0b6888f --- /dev/null +++ b/vendor/github.com/avast/retry-go/.travis.yml @@ -0,0 +1,20 @@ +language: go + +go: + - 1.8 + - 1.9 + - "1.10" + - 1.11 + - 1.12 + - 1.13 + - 1.14 + - 1.15 + +install: + - make setup + +script: + - make ci + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/avast/retry-go/Gopkg.toml b/vendor/github.com/avast/retry-go/Gopkg.toml new file mode 100644 index 00000000000..cf8c9eb0e46 --- /dev/null +++ b/vendor/github.com/avast/retry-go/Gopkg.toml @@ -0,0 +1,3 @@ +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.1.4" diff --git a/vendor/github.com/avast/retry-go/LICENSE b/vendor/github.com/avast/retry-go/LICENSE new file mode 100644 index 00000000000..f63fca814f9 --- /dev/null +++ b/vendor/github.com/avast/retry-go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Avast + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/avast/retry-go/Makefile b/vendor/github.com/avast/retry-go/Makefile new file mode 100644 index 00000000000..769816d24f1 --- /dev/null +++ b/vendor/github.com/avast/retry-go/Makefile @@ -0,0 +1,65 @@ +SOURCE_FILES?=$$(go list ./... | grep -v /vendor/) +TEST_PATTERN?=. +TEST_OPTIONS?= +DEP?=$$(which dep) +VERSION?=$$(cat VERSION) +LINTER?=$$(which golangci-lint) +LINTER_VERSION=1.15.0 + +ifeq ($(OS),Windows_NT) + DEP_VERS=dep-windows-amd64 + LINTER_FILE=golangci-lint-$(LINTER_VERSION)-windows-amd64.zip + LINTER_UNPACK= >| app.zip; unzip -j app.zip -d $$GOPATH/bin; rm app.zip +else ifeq ($(OS), Darwin) + LINTER_FILE=golangci-lint-$(LINTER_VERSION)-darwin-amd64.tar.gz + LINTER_UNPACK= | tar xzf - -C $$GOPATH/bin --wildcards --strip 1 "**/golangci-lint" +else + DEP_VERS=dep-linux-amd64 + LINTER_FILE=golangci-lint-$(LINTER_VERSION)-linux-amd64.tar.gz + LINTER_UNPACK= | tar xzf - -C $$GOPATH/bin --wildcards --strip 1 "**/golangci-lint" +endif + +setup: + go get -u github.com/pierrre/gotestcover + go get -u golang.org/x/tools/cmd/cover + go get -u github.com/robertkrimen/godocdown/godocdown + @if [ "$(LINTER)" = "" ]; then\ + curl -L https://github.com/golangci/golangci-lint/releases/download/v$(LINTER_VERSION)/$(LINTER_FILE) $(LINTER_UNPACK) ;\ + chmod +x $$GOPATH/bin/golangci-lint;\ + fi + @if [ "$(DEP)" = "" ]; then\ + curl -L https://github.com/golang/dep/releases/download/v0.3.1/$(DEP_VERS) >| $$GOPATH/bin/dep;\ + chmod +x $$GOPATH/bin/dep;\ + fi + dep ensure + +generate: ## Generate README.md + godocdown >| README.md + +test: generate test_and_cover_report lint + +test_and_cover_report: + gotestcover $(TEST_OPTIONS) -covermode=atomic -coverprofile=coverage.txt $(SOURCE_FILES) -run $(TEST_PATTERN) -timeout=2m + +cover: test ## Run all the tests and opens the coverage report + go tool cover -html=coverage.txt + +fmt: ## gofmt and goimports all go files + find . -name '*.go' -not -wholename './vendor/*' | while read -r file; do gofmt -w -s "$$file"; goimports -w "$$file"; done + +lint: ## Run all the linters + golangci-lint run + +ci: test_and_cover_report ## Run all the tests but no linters - use https://golangci.com integration instead + +build: + go build + +release: ## Release new version + git tag | grep -q $(VERSION) && echo This version was released! Increase VERSION! || git tag $(VERSION) && git push origin $(VERSION) && git tag v$(VERSION) && git push origin v$(VERSION) + +# Absolutely awesome: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html +help: + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.DEFAULT_GOAL := build diff --git a/vendor/github.com/avast/retry-go/README.md b/vendor/github.com/avast/retry-go/README.md new file mode 100644 index 00000000000..80fb73b4b4e --- /dev/null +++ b/vendor/github.com/avast/retry-go/README.md @@ -0,0 +1,361 @@ +# retry + +[![Release](https://img.shields.io/github/release/avast/retry-go.svg?style=flat-square)](https://github.com/avast/retry-go/releases/latest) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square)](LICENSE.md) +[![Travis](https://img.shields.io/travis/avast/retry-go.svg?style=flat-square)](https://travis-ci.org/avast/retry-go) +[![AppVeyor](https://ci.appveyor.com/api/projects/status/fieg9gon3qlq0a9a?svg=true)](https://ci.appveyor.com/project/JaSei/retry-go) +[![Go Report Card](https://goreportcard.com/badge/github.com/avast/retry-go?style=flat-square)](https://goreportcard.com/report/github.com/avast/retry-go) +[![GoDoc](https://godoc.org/github.com/avast/retry-go?status.svg&style=flat-square)](http://godoc.org/github.com/avast/retry-go) +[![codecov.io](https://codecov.io/github/avast/retry-go/coverage.svg?branch=master)](https://codecov.io/github/avast/retry-go?branch=master) +[![Sourcegraph](https://sourcegraph.com/github.com/avast/retry-go/-/badge.svg)](https://sourcegraph.com/github.com/avast/retry-go?badge) + +Simple library for retry mechanism + +slightly inspired by +[Try::Tiny::Retry](https://metacpan.org/pod/Try::Tiny::Retry) + + +### SYNOPSIS + +http get with retry: + + url := "http://example.com" + var body []byte + + err := retry.Do( + func() error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + return nil + }, + ) + + fmt.Println(body) + +[next examples](https://github.com/avast/retry-go/tree/master/examples) + + +### SEE ALSO + +* [giantswarm/retry-go](https://github.com/giantswarm/retry-go) - slightly +complicated interface. + +* [sethgrid/pester](https://github.com/sethgrid/pester) - only http retry for +http calls with retries and backoff + +* [cenkalti/backoff](https://github.com/cenkalti/backoff) - Go port of the +exponential backoff algorithm from Google's HTTP Client Library for Java. Really +complicated interface. + +* [rafaeljesus/retry-go](https://github.com/rafaeljesus/retry-go) - looks good, +slightly similar as this package, don't have 'simple' `Retry` method + +* [matryer/try](https://github.com/matryer/try) - very popular package, +nonintuitive interface (for me) + + +### BREAKING CHANGES + +3.0.0 + +* `DelayTypeFunc` accepts a new parameter `err` - this breaking change affects +only your custom Delay Functions. This change allow [make delay functions based +on error](examples/delay_based_on_error_test.go). + +1.0.2 -> 2.0.0 + +* argument of `retry.Delay` is final delay (no multiplication by `retry.Units` +anymore) + +* function `retry.Units` are removed + +* [more about this breaking change](https://github.com/avast/retry-go/issues/7) + +0.3.0 -> 1.0.0 + +* `retry.Retry` function are changed to `retry.Do` function + +* `retry.RetryCustom` (OnRetry) and `retry.RetryCustomWithOpts` functions are +now implement via functions produces Options (aka `retry.OnRetry`) + +## Usage + +```go +var ( + DefaultAttempts = uint(10) + DefaultDelay = 100 * time.Millisecond + DefaultMaxJitter = 100 * time.Millisecond + DefaultOnRetry = func(n uint, err error) {} + DefaultRetryIf = IsRecoverable + DefaultDelayType = CombineDelay(BackOffDelay, RandomDelay) + DefaultLastErrorOnly = false + DefaultContext = context.Background() +) +``` + +#### func BackOffDelay + +```go +func BackOffDelay(n uint, _ error, config *Config) time.Duration +``` +BackOffDelay is a DelayType which increases delay between consecutive retries + +#### func Do + +```go +func Do(retryableFunc RetryableFunc, opts ...Option) error +``` + +#### func FixedDelay + +```go +func FixedDelay(_ uint, _ error, config *Config) time.Duration +``` +FixedDelay is a DelayType which keeps delay the same through all iterations + +#### func IsRecoverable + +```go +func IsRecoverable(err error) bool +``` +IsRecoverable checks if error is an instance of `unrecoverableError` + +#### func RandomDelay + +```go +func RandomDelay(_ uint, _ error, config *Config) time.Duration +``` +RandomDelay is a DelayType which picks a random delay up to config.maxJitter + +#### func Unrecoverable + +```go +func Unrecoverable(err error) error +``` +Unrecoverable wraps an error in `unrecoverableError` struct + +#### type Config + +```go +type Config struct { +} +``` + + +#### type DelayTypeFunc + +```go +type DelayTypeFunc func(n uint, err error, config *Config) time.Duration +``` + +DelayTypeFunc is called to return the next delay to wait after the retriable +function fails on `err` after `n` attempts. + +#### func CombineDelay + +```go +func CombineDelay(delays ...DelayTypeFunc) DelayTypeFunc +``` +CombineDelay is a DelayType the combines all of the specified delays into a new +DelayTypeFunc + +#### type Error + +```go +type Error []error +``` + +Error type represents list of errors in retry + +#### func (Error) Error + +```go +func (e Error) Error() string +``` +Error method return string representation of Error It is an implementation of +error interface + +#### func (Error) WrappedErrors + +```go +func (e Error) WrappedErrors() []error +``` +WrappedErrors returns the list of errors that this Error is wrapping. It is an +implementation of the `errwrap.Wrapper` interface in package +[errwrap](https://github.com/hashicorp/errwrap) so that `retry.Error` can be +used with that library. + +#### type OnRetryFunc + +```go +type OnRetryFunc func(n uint, err error) +``` + +Function signature of OnRetry function n = count of attempts + +#### type Option + +```go +type Option func(*Config) +``` + +Option represents an option for retry. + +#### func Attempts + +```go +func Attempts(attempts uint) Option +``` +Attempts set count of retry default is 10 + +#### func Context + +```go +func Context(ctx context.Context) Option +``` +Context allow to set context of retry default are Background context + +example of immediately cancellation (maybe it isn't the best example, but it +describes behavior enough; I hope) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + retry.Do( + func() error { + ... + }, + retry.Context(ctx), + ) + +#### func Delay + +```go +func Delay(delay time.Duration) Option +``` +Delay set delay between retry default is 100ms + +#### func DelayType + +```go +func DelayType(delayType DelayTypeFunc) Option +``` +DelayType set type of the delay between retries default is BackOff + +#### func LastErrorOnly + +```go +func LastErrorOnly(lastErrorOnly bool) Option +``` +return the direct last error that came from the retried function default is +false (return wrapped errors with everything) + +#### func MaxDelay + +```go +func MaxDelay(maxDelay time.Duration) Option +``` +MaxDelay set maximum delay between retry does not apply by default + +#### func MaxJitter + +```go +func MaxJitter(maxJitter time.Duration) Option +``` +MaxJitter sets the maximum random Jitter between retries for RandomDelay + +#### func OnRetry + +```go +func OnRetry(onRetry OnRetryFunc) Option +``` +OnRetry function callback are called each retry + +log each retry example: + + retry.Do( + func() error { + return errors.New("some error") + }, + retry.OnRetry(func(n uint, err error) { + log.Printf("#%d: %s\n", n, err) + }), + ) + +#### func RetryIf + +```go +func RetryIf(retryIf RetryIfFunc) Option +``` +RetryIf controls whether a retry should be attempted after an error (assuming +there are any retry attempts remaining) + +skip retry if special error example: + + retry.Do( + func() error { + return errors.New("special error") + }, + retry.RetryIf(func(err error) bool { + if err.Error() == "special error" { + return false + } + return true + }) + ) + +By default RetryIf stops execution if the error is wrapped using +`retry.Unrecoverable`, so above example may also be shortened to: + + retry.Do( + func() error { + return retry.Unrecoverable(errors.New("special error")) + } + ) + +#### type RetryIfFunc + +```go +type RetryIfFunc func(error) bool +``` + +Function signature of retry if function + +#### type RetryableFunc + +```go +type RetryableFunc func() error +``` + +Function signature of retryable function + +## Contributing + +Contributions are very much welcome. + +### Makefile + +Makefile provides several handy rules, like README.md `generator` , `setup` for prepare build/dev environment, `test`, `cover`, etc... + +Try `make help` for more information. + +### Before pull request + +please try: +* run tests (`make test`) +* run linter (`make lint`) +* if your IDE don't automaticaly do `go fmt`, run `go fmt` (`make fmt`) + +### README + +README.md are generate from template [.godocdown.tmpl](.godocdown.tmpl) and code documentation via [godocdown](https://github.com/robertkrimen/godocdown). + +Never edit README.md direct, because your change will be lost. diff --git a/vendor/github.com/avast/retry-go/VERSION b/vendor/github.com/avast/retry-go/VERSION new file mode 100644 index 00000000000..4a36342fcab --- /dev/null +++ b/vendor/github.com/avast/retry-go/VERSION @@ -0,0 +1 @@ +3.0.0 diff --git a/vendor/github.com/avast/retry-go/appveyor.yml b/vendor/github.com/avast/retry-go/appveyor.yml new file mode 100644 index 00000000000..dc5234ac868 --- /dev/null +++ b/vendor/github.com/avast/retry-go/appveyor.yml @@ -0,0 +1,19 @@ +version: "{build}" + +clone_folder: c:\Users\appveyor\go\src\github.com\avast\retry-go + +#os: Windows Server 2012 R2 +platform: x64 + +install: + - copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe + - set GOPATH=C:\Users\appveyor\go + - set PATH=%PATH%;c:\MinGW\bin + - set PATH=%PATH%;%GOPATH%\bin;c:\go\bin + - set GOBIN=%GOPATH%\bin + - go version + - go env + - make setup + +build_script: + - make ci diff --git a/vendor/github.com/avast/retry-go/options.go b/vendor/github.com/avast/retry-go/options.go new file mode 100644 index 00000000000..a6c57207c7e --- /dev/null +++ b/vendor/github.com/avast/retry-go/options.go @@ -0,0 +1,198 @@ +package retry + +import ( + "context" + "math" + "math/rand" + "time" +) + +// Function signature of retry if function +type RetryIfFunc func(error) bool + +// Function signature of OnRetry function +// n = count of attempts +type OnRetryFunc func(n uint, err error) + +// DelayTypeFunc is called to return the next delay to wait after the retriable function fails on `err` after `n` attempts. +type DelayTypeFunc func(n uint, err error, config *Config) time.Duration + +type Config struct { + attempts uint + delay time.Duration + maxDelay time.Duration + maxJitter time.Duration + onRetry OnRetryFunc + retryIf RetryIfFunc + delayType DelayTypeFunc + lastErrorOnly bool + context context.Context + + maxBackOffN uint +} + +// Option represents an option for retry. +type Option func(*Config) + +// return the direct last error that came from the retried function +// default is false (return wrapped errors with everything) +func LastErrorOnly(lastErrorOnly bool) Option { + return func(c *Config) { + c.lastErrorOnly = lastErrorOnly + } +} + +// Attempts set count of retry +// default is 10 +func Attempts(attempts uint) Option { + return func(c *Config) { + c.attempts = attempts + } +} + +// Delay set delay between retry +// default is 100ms +func Delay(delay time.Duration) Option { + return func(c *Config) { + c.delay = delay + } +} + +// MaxDelay set maximum delay between retry +// does not apply by default +func MaxDelay(maxDelay time.Duration) Option { + return func(c *Config) { + c.maxDelay = maxDelay + } +} + +// MaxJitter sets the maximum random Jitter between retries for RandomDelay +func MaxJitter(maxJitter time.Duration) Option { + return func(c *Config) { + c.maxJitter = maxJitter + } +} + +// DelayType set type of the delay between retries +// default is BackOff +func DelayType(delayType DelayTypeFunc) Option { + return func(c *Config) { + c.delayType = delayType + } +} + +// BackOffDelay is a DelayType which increases delay between consecutive retries +func BackOffDelay(n uint, _ error, config *Config) time.Duration { + // 1 << 63 would overflow signed int64 (time.Duration), thus 62. + const max uint = 62 + + if config.maxBackOffN == 0 { + if config.delay <= 0 { + config.delay = 1 + } + + config.maxBackOffN = max - uint(math.Floor(math.Log2(float64(config.delay)))) + } + + if n > config.maxBackOffN { + n = config.maxBackOffN + } + + return config.delay << n +} + +// FixedDelay is a DelayType which keeps delay the same through all iterations +func FixedDelay(_ uint, _ error, config *Config) time.Duration { + return config.delay +} + +// RandomDelay is a DelayType which picks a random delay up to config.maxJitter +func RandomDelay(_ uint, _ error, config *Config) time.Duration { + return time.Duration(rand.Int63n(int64(config.maxJitter))) +} + +// CombineDelay is a DelayType the combines all of the specified delays into a new DelayTypeFunc +func CombineDelay(delays ...DelayTypeFunc) DelayTypeFunc { + const maxInt64 = uint64(math.MaxInt64) + + return func(n uint, err error, config *Config) time.Duration { + var total uint64 + for _, delay := range delays { + total += uint64(delay(n, err, config)) + if total > maxInt64 { + total = maxInt64 + } + } + + return time.Duration(total) + } +} + +// OnRetry function callback are called each retry +// +// log each retry example: +// +// retry.Do( +// func() error { +// return errors.New("some error") +// }, +// retry.OnRetry(func(n uint, err error) { +// log.Printf("#%d: %s\n", n, err) +// }), +// ) +func OnRetry(onRetry OnRetryFunc) Option { + return func(c *Config) { + c.onRetry = onRetry + } +} + +// RetryIf controls whether a retry should be attempted after an error +// (assuming there are any retry attempts remaining) +// +// skip retry if special error example: +// +// retry.Do( +// func() error { +// return errors.New("special error") +// }, +// retry.RetryIf(func(err error) bool { +// if err.Error() == "special error" { +// return false +// } +// return true +// }) +// ) +// +// By default RetryIf stops execution if the error is wrapped using `retry.Unrecoverable`, +// so above example may also be shortened to: +// +// retry.Do( +// func() error { +// return retry.Unrecoverable(errors.New("special error")) +// } +// ) +func RetryIf(retryIf RetryIfFunc) Option { + return func(c *Config) { + c.retryIf = retryIf + } +} + +// Context allow to set context of retry +// default are Background context +// +// example of immediately cancellation (maybe it isn't the best example, but it describes behavior enough; I hope) +// +// ctx, cancel := context.WithCancel(context.Background()) +// cancel() +// +// retry.Do( +// func() error { +// ... +// }, +// retry.Context(ctx), +// ) +func Context(ctx context.Context) Option { + return func(c *Config) { + c.context = ctx + } +} diff --git a/vendor/github.com/avast/retry-go/retry.go b/vendor/github.com/avast/retry-go/retry.go new file mode 100644 index 00000000000..af2d9264165 --- /dev/null +++ b/vendor/github.com/avast/retry-go/retry.go @@ -0,0 +1,225 @@ +/* +Simple library for retry mechanism + +slightly inspired by [Try::Tiny::Retry](https://metacpan.org/pod/Try::Tiny::Retry) + +SYNOPSIS + +http get with retry: + + url := "http://example.com" + var body []byte + + err := retry.Do( + func() error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + return nil + }, + ) + + fmt.Println(body) + +[next examples](https://github.com/avast/retry-go/tree/master/examples) + + +SEE ALSO + +* [giantswarm/retry-go](https://github.com/giantswarm/retry-go) - slightly complicated interface. + +* [sethgrid/pester](https://github.com/sethgrid/pester) - only http retry for http calls with retries and backoff + +* [cenkalti/backoff](https://github.com/cenkalti/backoff) - Go port of the exponential backoff algorithm from Google's HTTP Client Library for Java. Really complicated interface. + +* [rafaeljesus/retry-go](https://github.com/rafaeljesus/retry-go) - looks good, slightly similar as this package, don't have 'simple' `Retry` method + +* [matryer/try](https://github.com/matryer/try) - very popular package, nonintuitive interface (for me) + + +BREAKING CHANGES + +3.0.0 + +* `DelayTypeFunc` accepts a new parameter `err` - this breaking change affects only your custom Delay Functions. This change allow [make delay functions based on error](examples/delay_based_on_error_test.go). + + +1.0.2 -> 2.0.0 + +* argument of `retry.Delay` is final delay (no multiplication by `retry.Units` anymore) + +* function `retry.Units` are removed + +* [more about this breaking change](https://github.com/avast/retry-go/issues/7) + + +0.3.0 -> 1.0.0 + +* `retry.Retry` function are changed to `retry.Do` function + +* `retry.RetryCustom` (OnRetry) and `retry.RetryCustomWithOpts` functions are now implement via functions produces Options (aka `retry.OnRetry`) + + +*/ +package retry + +import ( + "context" + "fmt" + "strings" + "time" +) + +// Function signature of retryable function +type RetryableFunc func() error + +var ( + DefaultAttempts = uint(10) + DefaultDelay = 100 * time.Millisecond + DefaultMaxJitter = 100 * time.Millisecond + DefaultOnRetry = func(n uint, err error) {} + DefaultRetryIf = IsRecoverable + DefaultDelayType = CombineDelay(BackOffDelay, RandomDelay) + DefaultLastErrorOnly = false + DefaultContext = context.Background() +) + +func Do(retryableFunc RetryableFunc, opts ...Option) error { + var n uint + + //default + config := &Config{ + attempts: DefaultAttempts, + delay: DefaultDelay, + maxJitter: DefaultMaxJitter, + onRetry: DefaultOnRetry, + retryIf: DefaultRetryIf, + delayType: DefaultDelayType, + lastErrorOnly: DefaultLastErrorOnly, + context: DefaultContext, + } + + //apply opts + for _, opt := range opts { + opt(config) + } + + if err := config.context.Err(); err != nil { + return err + } + + var errorLog Error + if !config.lastErrorOnly { + errorLog = make(Error, config.attempts) + } else { + errorLog = make(Error, 1) + } + + lastErrIndex := n + for n < config.attempts { + err := retryableFunc() + + if err != nil { + errorLog[lastErrIndex] = unpackUnrecoverable(err) + + if !config.retryIf(err) { + break + } + + config.onRetry(n, err) + + // if this is last attempt - don't wait + if n == config.attempts-1 { + break + } + + delayTime := config.delayType(n, err, config) + if config.maxDelay > 0 && delayTime > config.maxDelay { + delayTime = config.maxDelay + } + + select { + case <-time.After(delayTime): + case <-config.context.Done(): + return config.context.Err() + } + + } else { + return nil + } + + n++ + if !config.lastErrorOnly { + lastErrIndex = n + } + } + + if config.lastErrorOnly { + return errorLog[lastErrIndex] + } + return errorLog +} + +// Error type represents list of errors in retry +type Error []error + +// Error method return string representation of Error +// It is an implementation of error interface +func (e Error) Error() string { + logWithNumber := make([]string, lenWithoutNil(e)) + for i, l := range e { + if l != nil { + logWithNumber[i] = fmt.Sprintf("#%d: %s", i+1, l.Error()) + } + } + + return fmt.Sprintf("All attempts fail:\n%s", strings.Join(logWithNumber, "\n")) +} + +func lenWithoutNil(e Error) (count int) { + for _, v := range e { + if v != nil { + count++ + } + } + + return +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementation of the `errwrap.Wrapper` interface +// in package [errwrap](https://github.com/hashicorp/errwrap) so that +// `retry.Error` can be used with that library. +func (e Error) WrappedErrors() []error { + return e +} + +type unrecoverableError struct { + error +} + +// Unrecoverable wraps an error in `unrecoverableError` struct +func Unrecoverable(err error) error { + return unrecoverableError{err} +} + +// IsRecoverable checks if error is an instance of `unrecoverableError` +func IsRecoverable(err error) bool { + _, isUnrecoverable := err.(unrecoverableError) + return !isUnrecoverable +} + +func unpackUnrecoverable(err error) error { + if unrecoverable, isUnrecoverable := err.(unrecoverableError); isUnrecoverable { + return unrecoverable.error + } + + return err +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/.gitignore b/vendor/github.com/bmatcuk/doublestar/v4/.gitignore new file mode 100644 index 00000000000..af212ecc281 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/.gitignore @@ -0,0 +1,32 @@ +# vi +*~ +*.swp +*.swo + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# test directory +test/ diff --git a/vendor/github.com/bmatcuk/doublestar/v4/.travis.yml b/vendor/github.com/bmatcuk/doublestar/v4/.travis.yml new file mode 100644 index 00000000000..334e0bfaa4d --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.16 + +os: + - linux + - windows + +before_install: + - go get -t -v ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) + diff --git a/vendor/github.com/bmatcuk/doublestar/v4/LICENSE b/vendor/github.com/bmatcuk/doublestar/v4/LICENSE new file mode 100644 index 00000000000..309c9d1d113 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Bob Matcuk + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/bmatcuk/doublestar/v4/README.md b/vendor/github.com/bmatcuk/doublestar/v4/README.md new file mode 100644 index 00000000000..a86bdd40b30 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/README.md @@ -0,0 +1,278 @@ +# doublestar + +Path pattern matching and globbing supporting `doublestar` (`**`) patterns. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/bmatcuk/doublestar)](https://pkg.go.dev/github.com/bmatcuk/doublestar/v4) +[![Release](https://img.shields.io/github/release/bmatcuk/doublestar.svg?branch=master)](https://github.com/bmatcuk/doublestar/releases) +[![Build Status](https://travis-ci.com/bmatcuk/doublestar.svg?branch=master)](https://travis-ci.com/bmatcuk/doublestar) +[![codecov.io](https://img.shields.io/codecov/c/github/bmatcuk/doublestar.svg?branch=master)](https://codecov.io/github/bmatcuk/doublestar?branch=master) + +## About + +#### [Upgrading?](UPGRADING.md) + +**doublestar** is a [golang] implementation of path pattern matching and +globbing with support for "doublestar" (aka globstar: `**`) patterns. + +doublestar patterns match files and directories recursively. For example, if +you had the following directory structure: + +```bash +grandparent +`-- parent + |-- child1 + `-- child2 +``` + +You could find the children with patterns such as: `**/child*`, +`grandparent/**/child?`, `**/parent/*`, or even just `**` by itself (which will +return all files and directories recursively). + +Bash's globstar is doublestar's inspiration and, as such, works similarly. +Note that the doublestar must appear as a path component by itself. A pattern +such as `/path**` is invalid and will be treated the same as `/path*`, but +`/path*/**` should achieve the desired result. Additionally, `/path/**` will +match all directories and files under the path directory, but `/path/**/` will +only match directories. + +v4 is a complete rewrite with a focus on performance. Additionally, +[doublestar] has been updated to use the new [io/fs] package for filesystem +access. As a result, it is only supported by [golang] v1.16+. + +## Installation + +**doublestar** can be installed via `go get`: + +```bash +go get github.com/bmatcuk/doublestar/v4 +``` + +To use it in your code, you must import it: + +```go +import "github.com/bmatcuk/doublestar/v4" +``` + +## Usage + +### Match + +```go +func Match(pattern, name string) (bool, error) +``` + +Match returns true if `name` matches the file name `pattern` ([see +"patterns"]). `name` and `pattern` are split on forward slash (`/`) characters +and may be relative or absolute. + +Match requires pattern to match all of name, not just a substring. The only +possible returned error is ErrBadPattern, when pattern is malformed. + +Note: this is meant as a drop-in replacement for `path.Match()` which always +uses `'/'` as the path separator. If you want to support systems which use a +different path separator (such as Windows), what you want is `PathMatch()`. +Alternatively, you can run `filepath.ToSlash()` on both pattern and name and +then use this function. + + +### PathMatch + +```go +func PathMatch(pattern, name string) (bool, error) +``` + +PathMatch returns true if `name` matches the file name `pattern` ([see +"patterns"]). The difference between Match and PathMatch is that PathMatch will +automatically use your system's path separator to split `name` and `pattern`. +On systems where the path separator is `'\'`, escaping will be disabled. + +Note: this is meant as a drop-in replacement for `filepath.Match()`. It assumes +that both `pattern` and `name` are using the system's path separator. If you +can't be sure of that, use `filepath.ToSlash()` on both `pattern` and `name`, +and then use the `Match()` function instead. + +### Glob + +```go +func Glob(fsys fs.FS, pattern string) ([]string, error) +``` + +Glob returns the names of all files matching pattern or nil if there is no +matching file. The syntax of patterns is the same as in `Match()`. The pattern +may describe hierarchical names such as `usr/*/bin/ed`. + +Glob ignores file system errors such as I/O errors reading directories. The +only possible returned error is ErrBadPattern, reporting that the pattern is +malformed. + +Note: this is meant as a drop-in replacement for `io/fs.Glob()`. Like +`io/fs.Glob()`, this function assumes that your pattern uses `/` as the path +separator even if that's not correct for your OS (like Windows). If you aren't +sure if that's the case, you can use `filepath.ToSlash()` on your pattern +before calling `Glob()`. + +Like `io/fs.Glob()`, patterns containing `/./`, `/../`, or starting with `/` +will return no results and no errors. This seems to be a [conscious +decision](https://github.com/golang/go/issues/44092#issuecomment-774132549), +even if counter-intuitive. You can use [SplitPattern] to divide a pattern into +a base path (to initialize an `FS` object) and pattern. + +### GlobWalk + +```go +type GlobWalkFunc func(path string, d fs.DirEntry) error + +func GlobWalk(fsys fs.FS, pattern string, fn GlobWalkFunc) error +``` + +GlobWalk calls the callback function `fn` for every file matching pattern. The +syntax of pattern is the same as in Match() and the behavior is the same as +Glob(), with regard to limitations (such as patterns containing `/./`, `/../`, +or starting with `/`). The pattern may describe hierarchical names such as +usr/*/bin/ed. + +GlobWalk may have a small performance benefit over Glob if you do not need a +slice of matches because it can avoid allocating memory for the matches. +Additionally, GlobWalk gives you access to the `fs.DirEntry` objects for each +match, and lets you quit early by returning a non-nil error from your callback +function. + +GlobWalk ignores file system errors such as I/O errors reading directories. +GlobWalk may return ErrBadPattern, reporting that the pattern is malformed. +Additionally, if the callback function `fn` returns an error, GlobWalk will +exit immediately and return that error. + +Like Glob(), this function assumes that your pattern uses `/` as the path +separator even if that's not correct for your OS (like Windows). If you aren't +sure if that's the case, you can use filepath.ToSlash() on your pattern before +calling GlobWalk(). + +### SplitPattern + +```go +func SplitPattern(p string) (base, pattern string) +``` + +SplitPattern is a utility function. Given a pattern, SplitPattern will return +two strings: the first string is everything up to the last slash (`/`) that +appears _before_ any unescaped "meta" characters (ie, `*?[{`). The second +string is everything after that slash. For example, given the pattern: + +``` +../../path/to/meta*/** + ^----------- split here +``` + +SplitPattern returns "../../path/to" and "meta*/**". This is useful for +initializing os.DirFS() to call Glob() because Glob() will silently fail if +your pattern includes `/./` or `/../`. For example: + +```go +base, pattern := SplitPattern("../../path/to/meta*/**") +fsys := os.DirFS(base) +matches, err := Glob(fsys, pattern) +``` + +If SplitPattern cannot find somewhere to split the pattern (for example, +`meta*/**`), it will return "." and the unaltered pattern (`meta*/**` in this +example). + +Of course, it is your responsibility to decide if the returned base path is +"safe" in the context of your application. Perhaps you could use Match() to +validate against a list of approved base directories? + +### ValidatePattern + +```go +func ValidatePattern(s string) bool +``` + +Validate a pattern. Patterns are validated while they run in Match(), +PathMatch(), and Glob(), so, you normally wouldn't need to call this. However, +there are cases where this might be useful: for example, if your program allows +a user to enter a pattern that you'll run at a later time, you might want to +validate it. + +ValidatePattern assumes your pattern uses '/' as the path separator. + +### ValidatePathPattern + +```go +func ValidatePathPattern(s string) bool +``` + +Like ValidatePattern, only uses your OS path separator. In other words, use +ValidatePattern if you would normally use Match() or Glob(). Use +ValidatePathPattern if you would normally use PathMatch(). Keep in mind, Glob() +requires '/' separators, even if your OS uses something else. + +### Patterns + +**doublestar** supports the following special terms in the patterns: + +Special Terms | Meaning +------------- | ------- +`*` | matches any sequence of non-path-separators +`/**/` | matches zero or more directories +`?` | matches any single non-path-separator character +`[class]` | matches any single non-path-separator character against a class of characters ([see "character classes"]) +`{alt1,...}` | matches a sequence of characters if one of the comma-separated alternatives matches + +Any character with a special meaning can be escaped with a backslash (`\`). + +A doublestar (`**`) should appear surrounded by path separators such as `/**/`. +A mid-pattern doublestar (`**`) behaves like bash's globstar option: a pattern +such as `path/to/**.txt` would return the same results as `path/to/*.txt`. The +pattern you're looking for is `path/to/**/*.txt`. + +#### Character Classes + +Character classes support the following: + +Class | Meaning +---------- | ------- +`[abc]` | matches any single character within the set +`[a-z]` | matches any single character in the range +`[^class]` | matches any single character which does *not* match the class +`[!class]` | same as `^`: negates the class + +## Performance + +``` +goos: darwin +goarch: amd64 +pkg: github.com/bmatcuk/doublestar/v4 +cpu: Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz +BenchmarkMatch-8 285639 3868 ns/op 0 B/op 0 allocs/op +BenchmarkGoMatch-8 286945 3726 ns/op 0 B/op 0 allocs/op +BenchmarkPathMatch-8 320511 3493 ns/op 0 B/op 0 allocs/op +BenchmarkGoPathMatch-8 304236 3434 ns/op 0 B/op 0 allocs/op +BenchmarkGlob-8 466 2501123 ns/op 190225 B/op 2849 allocs/op +BenchmarkGlobWalk-8 476 2536293 ns/op 184017 B/op 2750 allocs/op +BenchmarkGoGlob-8 463 2574836 ns/op 194249 B/op 2929 allocs/op +``` + +These benchmarks (in `doublestar_test.go`) compare Match() to path.Match(), +PathMath() to filepath.Match(), and Glob() + GlobWalk() to io/fs.Glob(). They +only run patterns that the standard go packages can understand as well (so, no +`{alts}` or `**`) for a fair comparison. Of course, alts and doublestars will +be less performant than the other pattern meta characters. + +Alts are essentially like running multiple patterns, the number of which can +get large if your pattern has alts nested inside alts. This affects both +matching (ie, Match()) and globbing (Glob()). + +`**` performance in matching is actually pretty similar to a regular `*`, but +can cause a large number of reads when globbing as it will need to recursively +traverse your filesystem. + +## License + +[MIT License](LICENSE) + +[SplitPattern]: #splitpattern +[doublestar]: https://github.com/bmatcuk/doublestar +[golang]: http://golang.org/ +[io/fs]: https://golang.org/pkg/io/fs/ +[see "character classes"]: #character-classes +[see "patterns"]: #patterns diff --git a/vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md b/vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md new file mode 100644 index 00000000000..25aace3db01 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md @@ -0,0 +1,63 @@ +# Upgrading from v3 to v4 + +v4 is a complete rewrite with a focus on performance. Additionally, +[doublestar] has been updated to use the new [io/fs] package for filesystem +access. As a result, it is only supported by [golang] v1.16+. + +`Match()` and `PathMatch()` mostly did not change, besides big performance +improvements. Their API is the same. However, note the following corner cases: + +* In previous versions of [doublestar], `PathMatch()` could accept patterns + that used either platform-specific path separators, or `/`. This was + undocumented and didn't match `filepath.Match()`. In v4, both `pattern` and + `name` must be using appropriate path separators for the platform. You can + use `filepath.FromSlash()` to change `/` to platform-specific separators if + you aren't sure. +* In previous versions of [doublestar], a pattern such as `path/to/a/**` would + _not_ match `path/to/a`. In v4, this pattern _will_ match because if `a` was + a directory, `Glob()` would return it. In other words, the following returns + true: `Match("path/to/a/**", "path/to/a")` + +`Glob()` changed from using a [doublestar]-specific filesystem abstraction (the +`OS` interface) to the [io/fs] package. As a result, it now takes a `fs.FS` as +its first argument. This change has a couple ramifications: + +* Like `io/fs.Glob`, `pattern` must use a `/` as path separator, even on + platforms that use something else. You can use `filepath.ToSlash()` on your + patterns if you aren't sure. +* Patterns that contain `/./` or `/../` are invalid. The [io/fs] package + rejects them, returning an IO error. Since `Glob()` ignores IO errors, it'll + end up being silently rejected. You can run `path.Clean()` to ensure they are + removed from the pattern. + +v4 also added a `GlobWalk()` function that is slightly more performant than +`Glob()` if you just need to iterate over the results and don't need a string +slice. You also get `fs.DirEntry` objects for each result, and can quit early +if your callback returns an error. + +# Upgrading from v2 to v3 + +v3 introduced using `!` to negate character classes, in addition to `^`. If any +of your patterns include a character class that starts with an exclamation mark +(ie, `[!...]`), you'll need to update the pattern to escape or move the +exclamation mark. Note that, like the caret (`^`), it only negates the +character class if it is the first character in the character class. + +# Upgrading from v1 to v2 + +The change from v1 to v2 was fairly minor: the return type of the `Open` method +on the `OS` interface was changed from `*os.File` to `File`, a new interface +exported by doublestar. The new `File` interface only defines the functionality +doublestar actually needs (`io.Closer` and `Readdir`), making it easier to use +doublestar with [go-billy], [afero], or something similar. If you were using +this functionality, updating should be as easy as updating `Open's` return +type, since `os.File` already implements `doublestar.File`. + +If you weren't using this functionality, updating should be as easy as changing +your dependencies to point to v2. + +[afero]: https://github.com/spf13/afero +[doublestar]: https://github.com/bmatcuk/doublestar +[go-billy]: https://github.com/src-d/go-billy +[golang]: http://golang.org/ +[io/fs]: https://golang.org/pkg/io/fs/ diff --git a/vendor/github.com/bmatcuk/doublestar/v4/doublestar.go b/vendor/github.com/bmatcuk/doublestar/v4/doublestar.go new file mode 100644 index 00000000000..269f6f0d0ae --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/doublestar.go @@ -0,0 +1,8 @@ +package doublestar + +import ( + "path" +) + +// ErrBadPattern indicates a pattern was malformed. +var ErrBadPattern = path.ErrBadPattern diff --git a/vendor/github.com/bmatcuk/doublestar/v4/glob.go b/vendor/github.com/bmatcuk/doublestar/v4/glob.go new file mode 100644 index 00000000000..17014eb922f --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/glob.go @@ -0,0 +1,395 @@ +package doublestar + +import ( + "io/fs" + "path" +) + +// Glob returns the names of all files matching pattern or nil if there is no +// matching file. The syntax of pattern is the same as in Match(). The pattern +// may describe hierarchical names such as usr/*/bin/ed. +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, reporting that the +// pattern is malformed. +// +// Note: this is meant as a drop-in replacement for io/fs.Glob(). Like +// io/fs.Glob(), this function assumes that your pattern uses `/` as the path +// separator even if that's not correct for your OS (like Windows). If you +// aren't sure if that's the case, you can use filepath.ToSlash() on your +// pattern before calling Glob(). +// +// Like `io/fs.Glob()`, patterns containing `/./`, `/../`, or starting with `/` +// will return no results and no errors. You can use SplitPattern to divide a +// pattern into a base path (to initialize an `FS` object) and pattern. +// +func Glob(fsys fs.FS, pattern string) ([]string, error) { + if !ValidatePattern(pattern) { + return nil, ErrBadPattern + } + if hasMidDoubleStar(pattern) { + // If the pattern has a `**` anywhere but the very end, GlobWalk is more + // performant because it can get away with less allocations. If the pattern + // ends in a `**`, both methods are pretty much the same, but Glob has a + // _very_ slight advantage because of lower function call overhead. + var matches []string + err := doGlobWalk(fsys, pattern, true, func(p string, d fs.DirEntry) error { + matches = append(matches, p) + return nil + }) + return matches, err + } + return doGlob(fsys, pattern, nil, true) +} + +// Does the actual globbin' +func doGlob(fsys fs.FS, pattern string, m []string, firstSegment bool) (matches []string, err error) { + matches = m + patternStart := indexMeta(pattern) + if patternStart == -1 { + // pattern doesn't contain any meta characters - does a file matching the + // pattern exist? + if exists(fsys, pattern) { + matches = append(matches, pattern) + return + } else { + return + } + } + + dir := "." + splitIdx := lastIndexSlashOrAlt(pattern) + if splitIdx != -1 { + if pattern[splitIdx] == '}' { + openingIdx := indexMatchedOpeningAlt(pattern[:splitIdx]) + if openingIdx == -1 { + // if there's no matching opening index, technically Match() will treat + // an unmatched `}` as nothing special, so... we will, too! + splitIdx = lastIndexSlash(pattern[:splitIdx]) + } else { + // otherwise, we have to handle the alts: + return globAlts(fsys, pattern, openingIdx, splitIdx, matches, firstSegment) + } + } + + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + + // if `splitIdx` is less than `patternStart`, we know `dir` has no meta + // characters. They would be equal if they are both -1, which means `dir` + // will be ".", and we know that doesn't have meta characters either. + if splitIdx <= patternStart{ + return globDir(fsys, dir, pattern, matches, firstSegment) + } + + var dirs []string + dirs, err = doGlob(fsys, dir, matches, false) + if err != nil { + return + } + for _, d := range dirs { + matches, err = globDir(fsys, d, pattern, matches, firstSegment) + if err != nil { + return + } + } + + return +} + +// handle alts in the glob pattern - `openingIdx` and `closingIdx` are the +// indexes of `{` and `}`, respectively +func globAlts(fsys fs.FS, pattern string, openingIdx, closingIdx int, m []string, firstSegment bool) (matches []string, err error) { + matches = m + + var dirs []string + startIdx := 0 + afterIdx := closingIdx + 1 + splitIdx := lastIndexSlashOrAlt(pattern[:openingIdx]) + if splitIdx == -1 || pattern[splitIdx] == '}' { + // no common prefix + dirs = []string{""} + } else { + // our alts have a common prefix that we can process first + dirs, err = doGlob(fsys, pattern[:splitIdx], matches, false) + if err != nil { + return + } + + startIdx = splitIdx + 1 + } + + for _, d := range dirs { + patIdx := openingIdx + 1 + altResultsStartIdx := len(matches) + thisResultStartIdx := altResultsStartIdx + for patIdx < closingIdx { + nextIdx := indexNextAlt(pattern[patIdx:closingIdx], true) + if nextIdx == -1 { + nextIdx = closingIdx + } else { + nextIdx += patIdx + } + + alt := buildAlt(d, pattern, startIdx, openingIdx, patIdx, nextIdx, afterIdx) + matches, err = doGlob(fsys, alt, matches, firstSegment) + if err != nil { + return + } + + matchesLen := len(matches) + if altResultsStartIdx != thisResultStartIdx && thisResultStartIdx != matchesLen { + // Alts can result in matches that aren't sorted, or, worse, duplicates + // (consider the trivial pattern `path/to/{a,*}`). Since doGlob returns + // sorted results, we can do a sort of in-place merge and remove + // duplicates. But, we only need to do this if this isn't the first alt + // (ie, `altResultsStartIdx != thisResultsStartIdx`) and if the latest + // alt actually added some matches (`thisResultStartIdx != + // len(matches)`) + matches = sortAndRemoveDups(matches, altResultsStartIdx, thisResultStartIdx, matchesLen) + + // length of matches may have changed + thisResultStartIdx = len(matches) + } else { + thisResultStartIdx = matchesLen + } + + patIdx = nextIdx + 1 + } + } + + return +} + +// find files/subdirectories in the given `dir` that match `pattern` +func globDir(fsys fs.FS, dir, pattern string, matches []string, canMatchFiles bool) (m []string, e error) { + m = matches + + if pattern == "" { + // pattern can be an empty string if the original pattern ended in a slash, + // in which case, we should just return dir, but only if it actually exists + // and it's a directory (or a symlink to a directory) + if isPathDir(fsys, dir) { + m = append(m, dir) + } + return + } + + if pattern == "**" { + m = globDoubleStar(fsys, dir, m, canMatchFiles) + return + } + + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + // ignore IO errors + return + } + + var matched bool + for _, info := range dirs { + name := info.Name() + if canMatchFiles || isDir(fsys, dir, name, info) { + matched, e = matchWithSeparator(pattern, name, '/', false) + if e != nil { + return + } + if matched { + m = append(m, path.Join(dir, name)) + } + } + } + + return +} + +func globDoubleStar(fsys fs.FS, dir string, matches []string, canMatchFiles bool) []string { + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + // ignore IO errors + return matches + } + + // `**` can match *this* dir, so add it + matches = append(matches, dir) + for _, info := range dirs { + name := info.Name() + if isDir(fsys, dir, name, info) { + matches = globDoubleStar(fsys, path.Join(dir, name), matches, canMatchFiles) + } else if canMatchFiles { + matches = append(matches, path.Join(dir, name)) + } + } + + return matches +} + +// Returns true if the pattern has a doublestar in the middle of the pattern. +// In this case, GlobWalk is faster because it can get away with less +// allocations. However, Glob has a _very_ slight edge if the pattern ends in +// `**`. +func hasMidDoubleStar(p string) bool { + // subtract 3: 2 because we want to return false if the pattern ends in `**` + // (Glob is _very_ slightly faster in that case), and the extra 1 because our + // loop checks p[i] and p[i+1]. + l := len(p) - 3 + for i := 0; i < l; i++ { + if p[i] == '\\' { + // escape next byte + i++ + } else if p[i] == '*' && p[i+1] == '*' { + return true + } + } + return false +} + +// Returns the index of the first unescaped meta character, or negative 1. +func indexMeta(s string) int { + var c byte + l := len(s) + for i := 0; i < l; i++ { + c = s[i] + if c == '*' || c == '?' || c == '[' || c == '{' { + return i + } else if c == '\\' { + // skip next byte + i++ + } + } + return -1 +} + +// Returns the index of the last unescaped slash or closing alt (`}`) in the +// string, or negative 1. +func lastIndexSlashOrAlt(s string) int { + for i := len(s) - 1; i >= 0; i-- { + if (s[i] == '/' || s[i] == '}') && (i == 0 || s[i-1] != '\\') { + return i + } + } + return -1 +} + +// Returns the index of the last unescaped slash in the string, or negative 1. +func lastIndexSlash(s string) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == '/' && (i == 0 || s[i-1] != '\\') { + return i + } + } + return -1 +} + +// Assuming the byte after the end of `s` is a closing `}`, this function will +// find the index of the matching `{`. That is, it'll skip over any nested `{}` +// and account for escaping. +func indexMatchedOpeningAlt(s string) int { + alts := 1 + for i := len(s) - 1; i >= 0; i-- { + if s[i] == '}' && (i == 0 || s[i-1] != '\\') { + alts++ + } else if s[i] == '{' && (i == 0 || s[i-1] != '\\') { + if alts--; alts == 0 { + return i + } + } + } + return -1 +} + +// Returns true if the path exists +func exists(fsys fs.FS, name string) bool { + if _, err := fs.Stat(fsys, name); err != nil { + return false + } + return true +} + +// Returns true if the path is a directory, or a symlink to a directory +func isPathDir(fsys fs.FS, name string) bool { + info, err := fs.Stat(fsys, name) + if err != nil { + return false + } + return info.IsDir() +} + +// Returns whether or not the given DirEntry is a directory. If the DirEntry +// represents a symbolic link, the link is followed by running fs.Stat() on +// `path.Join(dir, name)` +func isDir(fsys fs.FS, dir string, name string, info fs.DirEntry) bool { + if (info.Type() & fs.ModeSymlink) > 0 { + finfo, err := fs.Stat(fsys, path.Join(dir, name)) + if err != nil { + return false + } + return finfo.IsDir() + } + return info.IsDir() +} + +// Builds a string from an alt +func buildAlt(prefix, pattern string, startIdx, openingIdx, currentIdx, nextIdx, afterIdx int) string { + // pattern: + // ignored/start{alts,go,here}remaining - len = 36 + // | | | | ^--- afterIdx = 27 + // | | | \--------- nextIdx = 21 + // | | \----------- currentIdx = 19 + // | \----------------- openingIdx = 13 + // \---------------------- startIdx = 8 + // + // result: + // prefix/startgoremaining - len = 7 + 5 + 2 + 9 = 23 + var buf []byte + patLen := len(pattern) + size := (openingIdx - startIdx) + (nextIdx - currentIdx) + (patLen - afterIdx) + if prefix != "" { + buf = make([]byte, 0, size + len(prefix) + 1) + buf = append(buf, prefix...) + buf = append(buf, '/') + } else { + buf = make([]byte, 0, size) + } + buf = append(buf, pattern[startIdx:openingIdx]...) + buf = append(buf, pattern[currentIdx:nextIdx]...) + if afterIdx < patLen { + buf = append(buf, pattern[afterIdx:]...) + } + return string(buf) +} + +// Running alts can produce results that are not sorted, and, worse, can cause +// duplicates (consider the trivial pattern `path/to/{a,*}`). Since we know +// each run of doGlob is sorted, we can basically do the "merge" step of a +// merge sort in-place. +func sortAndRemoveDups(matches []string, idx1, idx2, l int) []string { + var tmp string + for ; idx1 < idx2; idx1++ { + if matches[idx1] < matches[idx2] { + // order is correct + continue + } else if matches[idx1] > matches[idx2] { + // need to swap and then re-sort matches above idx2 + tmp = matches[idx1] + matches[idx1] = matches[idx2] + + shft := idx2 + 1 + for ; shft < l && matches[shft] < tmp; shft++ { + matches[shft - 1] = matches[shft] + } + matches[shft - 1] = tmp + } else { + // duplicate - shift matches above idx2 down one and decrement l + for shft := idx2 + 1; shft < l; shft++ { + matches[shft - 1] = matches[shft] + } + if l--; idx2 == l { + // nothing left to do... matches[idx2:] must have been full of dups + break + } + } + } + return matches[:l] +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/globwalk.go b/vendor/github.com/bmatcuk/doublestar/v4/globwalk.go new file mode 100644 index 00000000000..769f6d3a924 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/globwalk.go @@ -0,0 +1,275 @@ +package doublestar + +import ( + "io/fs" + "path" +) + +// Callback function for GlobWalk(). If the function returns an error, GlobWalk +// will end immediately and return the same error. +type GlobWalkFunc func(path string, d fs.DirEntry) error + +// GlobWalk calls the callback function `fn` for every file matching pattern. +// The syntax of pattern is the same as in Match() and the behavior is the same +// as Glob(), with regard to limitations (such as patterns containing `/./`, +// `/../`, or starting with `/`). The pattern may describe hierarchical names +// such as usr/*/bin/ed. +// +// GlobWalk may have a small performance benefit over Glob if you do not need a +// slice of matches because it can avoid allocating memory for the matches. +// Additionally, GlobWalk gives you access to the `fs.DirEntry` objects for +// each match, and lets you quit early by returning a non-nil error from your +// callback function. +// +// GlobWalk ignores file system errors such as I/O errors reading directories. +// GlobWalk may return ErrBadPattern, reporting that the pattern is malformed. +// Additionally, if the callback function `fn` returns an error, GlobWalk will +// exit immediately and return that error. +// +// Like Glob(), this function assumes that your pattern uses `/` as the path +// separator even if that's not correct for your OS (like Windows). If you +// aren't sure if that's the case, you can use filepath.ToSlash() on your +// pattern before calling GlobWalk(). +// +func GlobWalk(fsys fs.FS, pattern string, fn GlobWalkFunc) error { + if !ValidatePattern(pattern) { + return ErrBadPattern + } + return doGlobWalk(fsys, pattern, true, fn) +} + +// Actually execute GlobWalk +func doGlobWalk(fsys fs.FS, pattern string, firstSegment bool, fn GlobWalkFunc) error { + patternStart := indexMeta(pattern) + if patternStart == -1 { + // pattern doesn't contain any meta characters - does a file matching the + // pattern exist? + info, err := fs.Stat(fsys, pattern) + if err == nil { + err = fn(pattern, dirEntryFromFileInfo(info)) + return err + } else { + // ignore IO errors + return nil + } + } + + dir := "." + splitIdx := lastIndexSlashOrAlt(pattern) + if splitIdx != -1 { + if pattern[splitIdx] == '}' { + openingIdx := indexMatchedOpeningAlt(pattern[:splitIdx]) + if openingIdx == -1 { + // if there's no matching opening index, technically Match() will treat + // an unmatched `}` as nothing special, so... we will, too! + splitIdx = lastIndexSlash(pattern[:splitIdx]) + } else { + // otherwise, we have to handle the alts: + return globAltsWalk(fsys, pattern, openingIdx, splitIdx, firstSegment, fn) + } + } + + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + + // if `splitIdx` is less than `patternStart`, we know `dir` has no meta + // characters. They would be equal if they are both -1, which means `dir` + // will be ".", and we know that doesn't have meta characters either. + if splitIdx <= patternStart { + return globDirWalk(fsys, dir, pattern, firstSegment, fn) + } + + return doGlobWalk(fsys, dir, false, func(p string, d fs.DirEntry) error { + if err := globDirWalk(fsys, p, pattern, firstSegment, fn); err != nil { + return err + } + return nil + }) +} + +// handle alts in the glob pattern - `openingIdx` and `closingIdx` are the +// indexes of `{` and `}`, respectively +func globAltsWalk(fsys fs.FS, pattern string, openingIdx, closingIdx int, firstSegment bool, fn GlobWalkFunc) (err error) { + var matches []DirEntryWithFullPath + startIdx := 0 + afterIdx := closingIdx + 1 + splitIdx := lastIndexSlashOrAlt(pattern[:openingIdx]) + if splitIdx == -1 || pattern[splitIdx] == '}' { + // no common prefix + matches, err = doGlobAltsWalk(fsys, "", pattern, startIdx, openingIdx, closingIdx, afterIdx, firstSegment, matches) + if err != nil { + return + } + } else { + // our alts have a common prefix that we can process first + startIdx = splitIdx + 1 + err = doGlobWalk(fsys, pattern[:splitIdx], false, func(p string, d fs.DirEntry) (e error) { + matches, e = doGlobAltsWalk(fsys, p, pattern, startIdx, openingIdx, closingIdx, afterIdx, firstSegment, matches) + return e + }) + if err != nil { + return + } + } + + for _, m := range matches { + if err = fn(m.Path, m.Entry); err != nil { + return + } + } + + return +} + +// runs actual matching for alts +func doGlobAltsWalk(fsys fs.FS, d, pattern string, startIdx, openingIdx, closingIdx, afterIdx int, firstSegment bool, m []DirEntryWithFullPath) (matches []DirEntryWithFullPath, err error) { + matches = m + matchesLen := len(m) + patIdx := openingIdx + 1 + for patIdx < closingIdx { + nextIdx := indexNextAlt(pattern[patIdx:closingIdx], true) + if nextIdx == -1 { + nextIdx = closingIdx + } else { + nextIdx += patIdx + } + + alt := buildAlt(d, pattern, startIdx, openingIdx, patIdx, nextIdx, afterIdx) + err = doGlobWalk(fsys, alt, firstSegment, func(p string, d fs.DirEntry) error { + // insertion sort, ignoring dups + insertIdx := matchesLen + for insertIdx > 0 && matches[insertIdx-1].Path > p { + insertIdx-- + } + if insertIdx > 0 && matches[insertIdx-1].Path == p { + // dup + return nil + } + + // append to grow the slice, then insert + entry := DirEntryWithFullPath{d, p} + matches = append(matches, entry) + for i := matchesLen; i > insertIdx; i-- { + matches[i] = matches[i-1] + } + matches[insertIdx] = entry + matchesLen++ + + return nil + }) + if err != nil { + return + } + + patIdx = nextIdx + 1 + } + + return +} + +func globDirWalk(fsys fs.FS, dir, pattern string, canMatchFiles bool, fn GlobWalkFunc) (e error) { + if pattern == "" { + // pattern can be an empty string if the original pattern ended in a slash, + // in which case, we should just return dir, but only if it actually exists + // and it's a directory (or a symlink to a directory) + info, err := fs.Stat(fsys, dir) + if err != nil || !info.IsDir() { + return nil + } + return fn(dir, dirEntryFromFileInfo(info)) + } + + if pattern == "**" { + // `**` can match *this* dir + info, err := fs.Stat(fsys, dir) + if err != nil || !info.IsDir() { + return nil + } + if e = fn(dir, dirEntryFromFileInfo(info)); e != nil { + return + } + return globDoubleStarWalk(fsys, dir, canMatchFiles, fn) + } + + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + // ignore IO errors + return nil + } + + var matched bool + for _, info := range dirs { + name := info.Name() + if canMatchFiles || isDir(fsys, dir, name, info) { + matched, e = matchWithSeparator(pattern, name, '/', false) + if e != nil { + return + } + if matched { + if e = fn(path.Join(dir, name), info); e != nil { + return + } + } + } + } + + return +} + +func globDoubleStarWalk(fsys fs.FS, dir string, canMatchFiles bool, fn GlobWalkFunc) (e error) { + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + // ignore IO errors + return + } + + // `**` can match *this* dir, so add it + for _, info := range dirs { + name := info.Name() + if isDir(fsys, dir, name, info) { + p := path.Join(dir, name) + if e = fn(p, info); e != nil { + return + } + if e = globDoubleStarWalk(fsys, p, canMatchFiles, fn); e != nil { + return + } + } else if canMatchFiles { + if e = fn(path.Join(dir, name), info); e != nil { + return + } + } + } + + return +} + +type DirEntryFromFileInfo struct { + fi fs.FileInfo +} + +func (d *DirEntryFromFileInfo) Name() string { + return d.fi.Name() +} + +func (d *DirEntryFromFileInfo) IsDir() bool { + return d.fi.IsDir() +} + +func (d *DirEntryFromFileInfo) Type() fs.FileMode { + return d.fi.Mode().Type() +} + +func (d *DirEntryFromFileInfo) Info() (fs.FileInfo, error) { + return d.fi, nil +} + +func dirEntryFromFileInfo(fi fs.FileInfo) fs.DirEntry { + return &DirEntryFromFileInfo{fi} +} + +type DirEntryWithFullPath struct { + Entry fs.DirEntry + Path string +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/go.mod b/vendor/github.com/bmatcuk/doublestar/v4/go.mod new file mode 100644 index 00000000000..2d915a43609 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/go.mod @@ -0,0 +1,3 @@ +module github.com/bmatcuk/doublestar/v4 + +go 1.16 diff --git a/vendor/github.com/bmatcuk/doublestar/v4/match.go b/vendor/github.com/bmatcuk/doublestar/v4/match.go new file mode 100644 index 00000000000..5fef2cde6f5 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/match.go @@ -0,0 +1,374 @@ +package doublestar + +import ( + "path/filepath" + "unicode/utf8" +) + +// Match reports whether name matches the shell pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-path-separators +// '/**/' matches zero or more directories +// '?' matches any single non-path-separator character +// '[' [ '^' '!' ] { character-range } ']' +// character class (must be non-empty) +// starting with `^` or `!` negates the class +// '{' { term } [ ',' { term } ... ] '}' +// alternatives +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match returns true if `name` matches the file name `pattern`. `name` and +// `pattern` are split on forward slash (`/`) characters and may be relative or +// absolute. +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// A doublestar (`**`) should appear surrounded by path separators such as +// `/**/`. A mid-pattern doublestar (`**`) behaves like bash's globstar +// option: a pattern such as `path/to/**.txt` would return the same results as +// `path/to/*.txt`. The pattern you're looking for is `path/to/**/*.txt`. +// +// Note: this is meant as a drop-in replacement for path.Match() which +// always uses '/' as the path separator. If you want to support systems +// which use a different path separator (such as Windows), what you want +// is PathMatch(). Alternatively, you can run filepath.ToSlash() on both +// pattern and name and then use this function. +// +func Match(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, '/', true) +} + +// PathMatch returns true if `name` matches the file name `pattern`. The +// difference between Match and PathMatch is that PathMatch will automatically +// use your system's path separator to split `name` and `pattern`. On systems +// where the path separator is `'\'`, escaping will be disabled. +// +// Note: this is meant as a drop-in replacement for filepath.Match(). It +// assumes that both `pattern` and `name` are using the system's path +// separator. If you can't be sure of that, use filepath.ToSlash() on both +// `pattern` and `name`, and then use the Match() function instead. +// +func PathMatch(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, filepath.Separator, true) +} + +func matchWithSeparator(pattern, name string, separator rune, validate bool) (matched bool, err error) { + doublestarPatternBacktrack := -1 + doublestarNameBacktrack := -1 + starPatternBacktrack := -1 + starNameBacktrack := -1 + patIdx := 0 + nameIdx := 0 + patLen := len(pattern) + nameLen := len(name) + startOfSegment := true +MATCH: + for nameIdx < nameLen { + if patIdx < patLen { + switch pattern[patIdx] { + case '*': + if patIdx++; patIdx < patLen && pattern[patIdx] == '*' { + // doublestar - must begin with a path separator, otherwise we'll + // treat it like a single star like bash + patIdx++ + if startOfSegment { + if patIdx >= patLen { + // pattern ends in `/**`: return true + return true, nil + } + + // doublestar must also end with a path separator, otherwise we're + // just going to treat the doublestar as a single star like bash + patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:]) + if patRune == separator { + patIdx += patRuneLen + + doublestarPatternBacktrack = patIdx + doublestarNameBacktrack = nameIdx + starPatternBacktrack = -1 + starNameBacktrack = -1 + continue + } + } + } + startOfSegment = false + + starPatternBacktrack = patIdx + starNameBacktrack = nameIdx + continue + + case '?': + startOfSegment = false + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + if nameRune == separator { + // `?` cannot match the separator + break + } + + patIdx++ + nameIdx += nameRuneLen + continue + + case '[': + startOfSegment = false + if patIdx++; patIdx >= patLen { + // class didn't end + return false, ErrBadPattern + } + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + + matched := false + negate := pattern[patIdx] == '!' || pattern[patIdx] == '^' + if negate { + patIdx++ + } + + if patIdx >= patLen || pattern[patIdx] == ']' { + // class didn't end or empty character class + return false, ErrBadPattern + } + + last := utf8.MaxRune + for patIdx < patLen && pattern[patIdx] != ']' { + patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:]) + patIdx += patRuneLen + + // match a range + if last < utf8.MaxRune && patRune == '-' && patIdx < patLen && pattern[patIdx] != ']' { + if pattern[patIdx] == '\\' { + // next character is escaped + patIdx++ + } + patRune, patRuneLen = utf8.DecodeRuneInString(pattern[patIdx:]) + patIdx += patRuneLen + + if last <= nameRune && nameRune <= patRune { + matched = true + break + } + + // didn't match range - reset `last` + last = utf8.MaxRune + continue + } + + // not a range - check if the next rune is escaped + if patRune == '\\' { + patRune, patRuneLen = utf8.DecodeRuneInString(pattern[patIdx:]) + patIdx += patRuneLen + } + + // check if the rune matches + if patRune == nameRune { + matched = true + break + } + + // no matches yet + last = patRune + } + + if matched == negate { + // failed to match - if we reached the end of the pattern, that means + // we never found a closing `]` + if patIdx >= patLen { + return false, ErrBadPattern + } + break + } + + closingIdx := indexUnescapedByte(pattern[patIdx:], ']', true) + if closingIdx == -1 { + // no closing `]` + return false, ErrBadPattern + } + + patIdx += closingIdx + 1 + nameIdx += nameRuneLen + continue + + case '{': + startOfSegment = false + patIdx++ + closingIdx := indexMatchedClosingAlt(pattern[patIdx:], separator != '\\') + if closingIdx == -1 { + // no closing `}` + return false, ErrBadPattern + } + closingIdx += patIdx + + for ;; { + commaIdx := indexNextAlt(pattern[patIdx:closingIdx], separator != '\\') + if commaIdx == -1 { + break + } + commaIdx += patIdx + + result, err := matchWithSeparator(pattern[patIdx:commaIdx] + pattern[closingIdx+1:], name[nameIdx:], separator, validate) + if result || err != nil { + return result, err + } + + patIdx = commaIdx + 1 + } + return matchWithSeparator(pattern[patIdx:closingIdx] + pattern[closingIdx+1:], name[nameIdx:], separator, validate) + + case '\\': + if separator != '\\' { + // next rune is "escaped" in the pattern - literal match + if patIdx++; patIdx >= patLen { + // pattern ended + return false, ErrBadPattern + } + } + fallthrough + + default: + patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:]) + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + if patRune != nameRune { + if separator != '\\' && patIdx > 0 && pattern[patIdx-1] == '\\' { + // if this rune was meant to be escaped, we need to move patIdx + // back to the backslash before backtracking or validating below + patIdx-- + } + break + } + + patIdx += patRuneLen + nameIdx += nameRuneLen + startOfSegment = patRune == separator + continue + } + } + + if starPatternBacktrack >= 0 { + // `*` backtrack, but only if the `name` rune isn't the separator + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[starNameBacktrack:]) + if nameRune != separator { + starNameBacktrack += nameRuneLen + patIdx = starPatternBacktrack + nameIdx = starNameBacktrack + startOfSegment = false + continue + } + } + + if doublestarPatternBacktrack >= 0 { + // `**` backtrack, advance `name` past next separator + nameIdx = doublestarNameBacktrack + for nameIdx < nameLen { + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + nameIdx += nameRuneLen + if nameRune == separator { + doublestarNameBacktrack = nameIdx + patIdx = doublestarPatternBacktrack + startOfSegment = true + continue MATCH + } + } + } + + if validate && patIdx < patLen && !doValidatePattern(pattern[patIdx:], separator) { + return false, ErrBadPattern + } + return false, nil + } + + if nameIdx < nameLen { + // we reached the end of `pattern` before the end of `name` + return false, nil + } + + // we've reached the end of `name`; we've successfully matched if we've also + // reached the end of `pattern`, or if the rest of `pattern` can match a + // zero-length string + return isZeroLengthPattern(pattern[patIdx:], separator) +} + +func isZeroLengthPattern(pattern string, separator rune) (ret bool, err error) { + // `/**` is a special case - a pattern such as `path/to/a/**` *should* match + // `path/to/a` because `a` might be a directory + if pattern == "" || pattern == "*" || pattern == "**" || pattern == string(separator) + "**" { + return true, nil + } + + if pattern[0] == '{' { + closingIdx := indexMatchedClosingAlt(pattern[1:], separator != '\\') + if closingIdx == -1 { + // no closing '}' + return false, ErrBadPattern + } + closingIdx += 1 + + patIdx := 1 + for ;; { + commaIdx := indexNextAlt(pattern[patIdx:closingIdx], separator != '\\') + if commaIdx == -1 { + break + } + commaIdx += patIdx + + ret, err = isZeroLengthPattern(pattern[patIdx:commaIdx] + pattern[closingIdx+1:], separator) + if ret || err != nil { + return + } + + patIdx = commaIdx + 1 + } + return isZeroLengthPattern(pattern[patIdx:closingIdx] + pattern[closingIdx+1:], separator) + } + + // no luck - validate the rest of the pattern + if !doValidatePattern(pattern, separator) { + return false, ErrBadPattern + } + return false, nil +} + +// Finds the index of the first unescaped byte `c`, or negative 1. +func indexUnescapedByte(s string, c byte, allowEscaping bool) int { + l := len(s) + for i := 0; i < l; i++ { + if allowEscaping && s[i] == '\\' { + // skip next byte + i++ + } else if s[i] == c { + return i + } + } + return -1 +} + +// Assuming the byte before the beginning of `s` is an opening `{`, this +// function will find the index of the matching `}`. That is, it'll skip over +// any nested `{}` and account for escaping +func indexMatchedClosingAlt(s string, allowEscaping bool) int { + alts := 1 + l := len(s) + for i := 0; i < l; i++ { + if allowEscaping && s[i] == '\\' { + // skip next byte + i++ + } else if s[i] == '{' { + alts++ + } else if s[i] == '}' { + if alts--; alts == 0 { + return i + } + } + } + return -1 +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/utils.go b/vendor/github.com/bmatcuk/doublestar/v4/utils.go new file mode 100644 index 00000000000..af7e647b62a --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/utils.go @@ -0,0 +1,69 @@ +package doublestar + +// SplitPattern is a utility function. Given a pattern, SplitPattern will +// return two strings: the first string is everything up to the last slash +// (`/`) that appears _before_ any unescaped "meta" characters (ie, `*?[{`). +// The second string is everything after that slash. For example, given the +// pattern: +// +// ../../path/to/meta*/** +// ^----------- split here +// +// SplitPattern returns "../../path/to" and "meta*/**". This is useful for +// initializing os.DirFS() to call Glob() because Glob() will silently fail if +// your pattern includes `/./` or `/../`. For example: +// +// base, pattern := SplitPattern("../../path/to/meta*/**") +// fsys := os.DirFS(base) +// matches, err := Glob(fsys, pattern) +// +// If SplitPattern cannot find somewhere to split the pattern (for example, +// `meta*/**`), it will return "." and the unaltered pattern (`meta*/**` in +// this example). +// +// Of course, it is your responsibility to decide if the returned base path is +// "safe" in the context of your application. Perhaps you could use Match() to +// validate against a list of approved base directories? +// +func SplitPattern(p string) (base, pattern string) { + base = "." + pattern = p + + splitIdx := -1 + for i := 0; i < len(p); i++ { + c := p[i] + if c == '\\' { + i++ + } else if c == '/' { + splitIdx = i + } else if c == '*' || c == '?' || c == '[' || c == '{' { + break + } + } + + if splitIdx >= 0 { + return p[:splitIdx], p[splitIdx+1:] + } + + return +} + +// Finds the next comma, but ignores any commas that appear inside nested `{}`. +// Assumes that each opening bracket has a corresponding closing bracket. +func indexNextAlt(s string, allowEscaping bool) int { + alts := 1 + l := len(s) + for i := 0; i < l; i++ { + if allowEscaping && s[i] == '\\' { + // skip next byte + i++ + } else if s[i] == '{' { + alts++ + } else if s[i] == '}' { + alts-- + } else if s[i] == ',' && alts == 1 { + return i + } + } + return -1 +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/validate.go b/vendor/github.com/bmatcuk/doublestar/v4/validate.go new file mode 100644 index 00000000000..c689b9ebab8 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/validate.go @@ -0,0 +1,82 @@ +package doublestar + +import "path/filepath" + +// Validate a pattern. Patterns are validated while they run in Match(), +// PathMatch(), and Glob(), so, you normally wouldn't need to call this. +// However, there are cases where this might be useful: for example, if your +// program allows a user to enter a pattern that you'll run at a later time, +// you might want to validate it. +// +// ValidatePattern assumes your pattern uses '/' as the path separator. +// +func ValidatePattern(s string) bool { + return doValidatePattern(s, '/') +} + +// Like ValidatePattern, only uses your OS path separator. In other words, use +// ValidatePattern if you would normally use Match() or Glob(). Use +// ValidatePathPattern if you would normally use PathMatch(). Keep in mind, +// Glob() requires '/' separators, even if your OS uses something else. +// +func ValidatePathPattern(s string) bool { + return doValidatePattern(s, filepath.Separator) +} + +func doValidatePattern(s string, separator rune) bool { + altDepth := 0 + l := len(s) +VALIDATE: + for i := 0; i < l; i++ { + switch s[i] { + case '\\': + if separator != '\\' { + // skip the next byte - return false if there is no next byte + if i++; i >= l { + return false + } + } + continue + + case '[': + if i++; i >= l { + // class didn't end + return false + } + if s[i] == '^' || s[i] == '!' { + i++ + } + if i >= l || s[i] == ']' { + // class didn't end or empty character class + return false + } + + for ; i < l; i++ { + if separator != '\\' && s[i] == '\\' { + i++ + } else if s[i] == ']' { + // looks good + continue VALIDATE + } + } + + // class didn't end + return false + + case '{': + altDepth++ + continue + + case '}': + if altDepth == 0 { + // alt end without a corresponding start + return false + } + altDepth-- + continue + } + } + + // valid as long as all alts are closed + return altDepth == 0 +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 0668a66cf70..be2b3436062 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -15,7 +15,7 @@ type roffRenderer struct { extensions blackfriday.Extensions listCounters []int firstHeader bool - defineTerm bool + firstDD bool listDepth int } @@ -42,7 +42,8 @@ const ( quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" listCloseTag = "\n.RE\n" - arglistTag = "\n.TP\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" tableStart = "\n.TS\nallbox;\n" tableEnd = ".TE\n" tableCellStart = "T{\n" @@ -90,7 +91,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering switch node.Type { case blackfriday.Text: - r.handleText(w, node, entering) + escapeSpecialChars(w, node.Literal) case blackfriday.Softbreak: out(w, crTag) case blackfriday.Hardbreak: @@ -150,40 +151,21 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering out(w, codeCloseTag) case blackfriday.Table: r.handleTable(w, node, entering) - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) case blackfriday.TableHead: case blackfriday.TableBody: case blackfriday.TableRow: // no action as cell entries do all the nroff formatting return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags default: fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) } return walkAction } -func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - // handle special roff table cell text encapsulation - if node.Parent.Type == blackfriday.TableCell { - if len(node.Literal) > 30 { - start = tableCellStart - end = tableCellEnd - } else { - // end rows that aren't terminated by "tableCellEnd" with a cr if end of row - if node.Parent.Next == nil && !node.Parent.IsHeader { - end = crTag - } - } - } - out(w, start) - escapeSpecialChars(w, node.Literal) - out(w, end) -} - func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { if entering { switch node.Level { @@ -230,15 +212,20 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering if node.ListFlags&blackfriday.ListTypeOrdered != 0 { out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { + // DT (definition term): line just before DD (see below). + out(w, dtTag) + r.firstDD = true } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // state machine for handling terms and following definitions - // since blackfriday does not distinguish them properly, nor - // does it seperate them into separate lists as it should - if !r.defineTerm { - out(w, arglistTag) - r.defineTerm = true + // DD (definition description): line that starts with ": ". + // + // We have to distinguish between the first DD and the + // subsequent ones, as there should be no vertical + // whitespace between the DT and the first DD. + if r.firstDD { + r.firstDD = false } else { - r.defineTerm = false + out(w, dd2Tag) } } else { out(w, ".IP \\(bu 2\n") @@ -251,7 +238,7 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { if entering { out(w, tableStart) - //call walker to count cells (and rows?) so format section can be produced + // call walker to count cells (and rows?) so format section can be produced columns := countColumns(node) out(w, strings.Repeat("l ", columns)+"\n") out(w, strings.Repeat("l ", columns)+".\n") @@ -261,28 +248,41 @@ func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering } func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - if node.IsHeader { - start = codespanTag - end = codespanCloseTag - } if entering { + var start string if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { - out(w, "\t"+start) - } else { - out(w, start) + start = "\t" + } + if node.IsHeader { + start += codespanTag + } else if nodeLiteralSize(node) > 30 { + start += tableCellStart } + out(w, start) } else { - // need to carriage return if we are at the end of the header row - if node.IsHeader && node.Next == nil { - end = end + crTag + var end string + if node.IsHeader { + end = codespanCloseTag + } else if nodeLiteralSize(node) > 30 { + end = tableCellEnd + } + if node.Next == nil && end != tableCellEnd { + // Last cell: need to carriage return if we are at the end of the + // header row and content isn't wrapped in a "tablecell" + end += crTag } out(w, end) } } +func nodeLiteralSize(node *blackfriday.Node) int { + total := 0 + for n := node.FirstChild; n != nil; n = n.FirstChild { + total += len(n.Literal) + } + return total +} + // because roff format requires knowing the column count before outputting any table // data we need to walk a table tree and count the columns func countColumns(node *blackfriday.Node) int { @@ -309,15 +309,6 @@ func out(w io.Writer, output string) { io.WriteString(w, output) // nolint: errcheck } -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - func escapeSpecialChars(w io.Writer, text []byte) { for i := 0; i < len(text); i++ { // escape initial apostrophe or period @@ -328,7 +319,7 @@ func escapeSpecialChars(w io.Writer, text []byte) { // directly copy normal characters org := i - for i < len(text) && !needsBackslash(text[i]) { + for i < len(text) && text[i] != '\\' { i++ } if i > org { diff --git a/vendor/github.com/creasty/defaults/.gitignore b/vendor/github.com/creasty/defaults/.gitignore new file mode 100644 index 00000000000..e43b0f98895 --- /dev/null +++ b/vendor/github.com/creasty/defaults/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/vendor/github.com/creasty/defaults/.travis.yml b/vendor/github.com/creasty/defaults/.travis.yml new file mode 100644 index 00000000000..b54679533d6 --- /dev/null +++ b/vendor/github.com/creasty/defaults/.travis.yml @@ -0,0 +1,20 @@ +language: go +go: 1.13 + +branches: + only: + - master + +env: + global: + # CODECOV_TOKEN + - secure: "O4Hay0TPFW2eq1+Y9vlW4W8VIY9QlKzGgEoL8mgbHQ67RrvqaB7Ft6kdmUaVztu9a5ZB4QwwU8dCFV6D/+dY710qqayQghY2r4igqExzkMyRa3InQtGjhYd9CVpjbR5cXYV1PL9h/Jf3PTTUCmSJp0tpqZCce4BezgnhdSjccGzx3wlzj66+bDdpCzRkWtCvYvI4Fxg5aM5kBke1Ti+aLdEQNzDpkbM38/iyUQDuM+y6ZO+AuP9zqdMY82O6yEMXJppPtVnfqhTJQyDiEF6J6h3TauOb9nKhu8Eg0d2b0HseTRVcCigH3usCm6WYmqhInmsbdAge9gSs9SdYq06VYghG/AvnuRNbmGn3DHuRelyH0gBuXrzZxNP9nfPego4bvk6jQvPSu/flB7JHixkDkFBCO1b2R3ZdOdT6fg+qPQBMxEGy3YJ6ylZ0oqC0AByC+0OP7Hc/xv5U4uqnJ5oBVt4yt26sEjlMs8piXnkoNDoXVjgjfpqLLMyuSRzL1nbf7P270E7L0hDzWInObWlM7FuAl6ghb4j20jvx+4C5UKb8JJPMdbEOcDWZuJOnpThpcNIeP08Xks1wDJ5R01cxK2gja8Hmg+nF320bHybn3RRyOke7tuC3Egez2QaKEoQl9YmcepO+TENcsFk86v8Le68UHi8mJps5mm7iuJb2xyQ=" + +before_install: + - go get -u golang.org/x/lint/golint + +script: + - make ci-test + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/creasty/defaults/LICENSE b/vendor/github.com/creasty/defaults/LICENSE new file mode 100644 index 00000000000..1483dd2d83e --- /dev/null +++ b/vendor/github.com/creasty/defaults/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2017-present Yuki Iwanaga + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/creasty/defaults/Makefile b/vendor/github.com/creasty/defaults/Makefile new file mode 100644 index 00000000000..bd42d46f4b0 --- /dev/null +++ b/vendor/github.com/creasty/defaults/Makefile @@ -0,0 +1,30 @@ +SHELL := /bin/bash -eu -o pipefail + +GO_TEST_FLAGS := -v + +PACKAGE_DIRS := $(shell go list ./... 2> /dev/null | grep -v /vendor/) +SRC_FILES := $(shell find . -name '*.go' -not -path './vendor/*') + + +# Tasks +#----------------------------------------------- +.PHONY: lint +lint: + @gofmt -e -d -s $(SRC_FILES) | awk '{ e = 1; print $0 } END { if (e) exit(1) }' + @echo $(SRC_FILES) | xargs -n1 golint -set_exit_status + @go vet $(PACKAGE_DIRS) + +.PHONY: test +test: lint + @go test $(GO_TEST_FLAGS) $(PACKAGE_DIRS) + +.PHONY: ci-test +ci-test: lint + @echo > coverage.txt + @for d in $(PACKAGE_DIRS); do \ + go test -coverprofile=profile.out -covermode=atomic -race -v $$d; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done diff --git a/vendor/github.com/creasty/defaults/README.md b/vendor/github.com/creasty/defaults/README.md new file mode 100644 index 00000000000..918864ac2cd --- /dev/null +++ b/vendor/github.com/creasty/defaults/README.md @@ -0,0 +1,69 @@ +defaults +======== + +[![Build Status](https://travis-ci.org/creasty/defaults.svg?branch=master)](https://travis-ci.org/creasty/defaults) +[![codecov](https://codecov.io/gh/creasty/defaults/branch/master/graph/badge.svg)](https://codecov.io/gh/creasty/defaults) +[![GitHub release](https://img.shields.io/github/release/creasty/defaults.svg)](https://github.com/creasty/defaults/releases) +[![License](https://img.shields.io/github/license/creasty/defaults.svg)](./LICENSE) + +Initialize structs with default values + +- Supports almost all kind of types + - Scalar types + - `int/8/16/32/64`, `uint/8/16/32/64`, `float32/64` + - `uintptr`, `bool`, `string` + - Complex types + - `map`, `slice`, `struct` + - Aliased types + - `time.Duration` + - e.g., `type Enum string` + - Pointer types + - e.g., `*SampleStruct`, `*int` +- Recursively initializes fields in a struct +- Dynamically sets default values by [`defaults.Setter`](./setter.go) interface +- Preserves non-initial values from being reset with a default value + + +Usage +----- + +```go +type Gender string + +type Sample struct { + Name string `default:"John Smith"` + Age int `default:"27"` + Gender Gender `default:"m"` + + Slice []string `default:"[]"` + SliceByJSON []int `default:"[1, 2, 3]"` // Supports JSON + Map map[string]int `default:"{}"` + MapByJSON map[string]int `default:"{\"foo\": 123}"` + + Struct OtherStruct `default:"{}"` + StructPtr *OtherStruct `default:"{\"Foo\": 123}"` + + NoTag OtherStruct // Recurses into a nested struct by default + OptOut OtherStruct `default:"-"` // Opt-out +} + +type OtherStruct struct { + Hello string `default:"world"` // Tags in a nested struct also work + Foo int `default:"-"` + Random int `default:"-"` +} + +// SetDefaults implements defaults.Setter interface +func (s *OtherStruct) SetDefaults() { + if defaults.CanUpdate(s.Random) { // Check if it's a zero value (recommended) + s.Random = rand.Int() // Set a dynamic value + } +} +``` + +```go +obj := &Sample{} +if err := defaults.Set(obj); err != nil { + panic(err) +} +``` diff --git a/vendor/github.com/creasty/defaults/defaults.go b/vendor/github.com/creasty/defaults/defaults.go new file mode 100644 index 00000000000..d4baf0f07dc --- /dev/null +++ b/vendor/github.com/creasty/defaults/defaults.go @@ -0,0 +1,198 @@ +package defaults + +import ( + "encoding/json" + "errors" + "reflect" + "strconv" + "time" +) + +var ( + errInvalidType = errors.New("not a struct pointer") +) + +const ( + fieldName = "default" +) + +// Set initializes members in a struct referenced by a pointer. +// Maps and slices are initialized by `make` and other primitive types are set with default values. +// `ptr` should be a struct pointer +func Set(ptr interface{}) error { + if reflect.TypeOf(ptr).Kind() != reflect.Ptr { + return errInvalidType + } + + v := reflect.ValueOf(ptr).Elem() + t := v.Type() + + if t.Kind() != reflect.Struct { + return errInvalidType + } + + for i := 0; i < t.NumField(); i++ { + if defaultVal := t.Field(i).Tag.Get(fieldName); defaultVal != "-" { + if err := setField(v.Field(i), defaultVal); err != nil { + return err + } + } + } + callSetter(ptr) + return nil +} + +// MustSet function is a wrapper of Set function +// It will call Set and panic if err not equals nil. +func MustSet(ptr interface{}) { + if err := Set(ptr); err != nil { + panic(err) + } +} + +func setField(field reflect.Value, defaultVal string) error { + if !field.CanSet() { + return nil + } + + if !shouldInitializeField(field, defaultVal) { + return nil + } + + isInitial := isInitialValue(field) + if isInitial { + switch field.Kind() { + case reflect.Bool: + if val, err := strconv.ParseBool(defaultVal); err == nil { + field.Set(reflect.ValueOf(val).Convert(field.Type())) + } + case reflect.Int: + if val, err := strconv.ParseInt(defaultVal, 0, strconv.IntSize); err == nil { + field.Set(reflect.ValueOf(int(val)).Convert(field.Type())) + } + case reflect.Int8: + if val, err := strconv.ParseInt(defaultVal, 0, 8); err == nil { + field.Set(reflect.ValueOf(int8(val)).Convert(field.Type())) + } + case reflect.Int16: + if val, err := strconv.ParseInt(defaultVal, 0, 16); err == nil { + field.Set(reflect.ValueOf(int16(val)).Convert(field.Type())) + } + case reflect.Int32: + if val, err := strconv.ParseInt(defaultVal, 0, 32); err == nil { + field.Set(reflect.ValueOf(int32(val)).Convert(field.Type())) + } + case reflect.Int64: + if val, err := time.ParseDuration(defaultVal); err == nil { + field.Set(reflect.ValueOf(val).Convert(field.Type())) + } else if val, err := strconv.ParseInt(defaultVal, 0, 64); err == nil { + field.Set(reflect.ValueOf(val).Convert(field.Type())) + } + case reflect.Uint: + if val, err := strconv.ParseUint(defaultVal, 0, strconv.IntSize); err == nil { + field.Set(reflect.ValueOf(uint(val)).Convert(field.Type())) + } + case reflect.Uint8: + if val, err := strconv.ParseUint(defaultVal, 0, 8); err == nil { + field.Set(reflect.ValueOf(uint8(val)).Convert(field.Type())) + } + case reflect.Uint16: + if val, err := strconv.ParseUint(defaultVal, 0, 16); err == nil { + field.Set(reflect.ValueOf(uint16(val)).Convert(field.Type())) + } + case reflect.Uint32: + if val, err := strconv.ParseUint(defaultVal, 0, 32); err == nil { + field.Set(reflect.ValueOf(uint32(val)).Convert(field.Type())) + } + case reflect.Uint64: + if val, err := strconv.ParseUint(defaultVal, 0, 64); err == nil { + field.Set(reflect.ValueOf(val).Convert(field.Type())) + } + case reflect.Uintptr: + if val, err := strconv.ParseUint(defaultVal, 0, strconv.IntSize); err == nil { + field.Set(reflect.ValueOf(uintptr(val)).Convert(field.Type())) + } + case reflect.Float32: + if val, err := strconv.ParseFloat(defaultVal, 32); err == nil { + field.Set(reflect.ValueOf(float32(val)).Convert(field.Type())) + } + case reflect.Float64: + if val, err := strconv.ParseFloat(defaultVal, 64); err == nil { + field.Set(reflect.ValueOf(val).Convert(field.Type())) + } + case reflect.String: + field.Set(reflect.ValueOf(defaultVal).Convert(field.Type())) + + case reflect.Slice: + ref := reflect.New(field.Type()) + ref.Elem().Set(reflect.MakeSlice(field.Type(), 0, 0)) + if defaultVal != "" && defaultVal != "[]" { + if err := json.Unmarshal([]byte(defaultVal), ref.Interface()); err != nil { + return err + } + } + field.Set(ref.Elem().Convert(field.Type())) + case reflect.Map: + ref := reflect.New(field.Type()) + ref.Elem().Set(reflect.MakeMap(field.Type())) + if defaultVal != "" && defaultVal != "{}" { + if err := json.Unmarshal([]byte(defaultVal), ref.Interface()); err != nil { + return err + } + } + field.Set(ref.Elem().Convert(field.Type())) + case reflect.Struct: + if defaultVal != "" && defaultVal != "{}" { + if err := json.Unmarshal([]byte(defaultVal), field.Addr().Interface()); err != nil { + return err + } + } + case reflect.Ptr: + field.Set(reflect.New(field.Type().Elem())) + } + } + + switch field.Kind() { + case reflect.Ptr: + if isInitial || field.Elem().Kind() == reflect.Struct { + setField(field.Elem(), defaultVal) + callSetter(field.Interface()) + } + case reflect.Struct: + if err := Set(field.Addr().Interface()); err != nil { + return err + } + case reflect.Slice: + for j := 0; j < field.Len(); j++ { + if err := setField(field.Index(j), defaultVal); err != nil { + return err + } + } + } + + return nil +} + +func isInitialValue(field reflect.Value) bool { + return reflect.DeepEqual(reflect.Zero(field.Type()).Interface(), field.Interface()) +} + +func shouldInitializeField(field reflect.Value, tag string) bool { + switch field.Kind() { + case reflect.Struct: + return true + case reflect.Ptr: + if !field.IsNil() && field.Elem().Kind() == reflect.Struct { + return true + } + case reflect.Slice: + return field.Len() > 0 || tag != "" + } + + return tag != "" +} + +// CanUpdate returns true when the given value is an initial value of its type +func CanUpdate(v interface{}) bool { + return isInitialValue(reflect.ValueOf(v)) +} diff --git a/vendor/github.com/creasty/defaults/go.mod b/vendor/github.com/creasty/defaults/go.mod new file mode 100644 index 00000000000..fae962c837e --- /dev/null +++ b/vendor/github.com/creasty/defaults/go.mod @@ -0,0 +1,3 @@ +module github.com/creasty/defaults + +go 1.13 diff --git a/vendor/github.com/creasty/defaults/setter.go b/vendor/github.com/creasty/defaults/setter.go new file mode 100644 index 00000000000..1f64aa65993 --- /dev/null +++ b/vendor/github.com/creasty/defaults/setter.go @@ -0,0 +1,12 @@ +package defaults + +// Setter is an interface for setting default values +type Setter interface { + SetDefaults() +} + +func callSetter(v interface{}) { + if ds, ok := v.(Setter); ok { + ds.SetDefaults() + } +} diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 00000000000..c3569600463 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 00000000000..5d37e294c5f --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index e9b5520a1c5..69b0b40a5c3 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,112 +1,179 @@ -# A more minimal logging API for Go +# A minimal logging API for Go -Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what -he has to say, and it largely aligns with my own experiences. Too many -choices of levels means inconsistent logs. +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: -This package offers a purely abstract interface, based on these ideas but with -a few twists. Code can depend on just this interface and have the actual -logging implementation be injected from callers. Ideally only `main()` knows -what logging implementation is being used. +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } -# Differences from Dave's ideas + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas The main differences are: -1) Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. I disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. I -restrict the API to just 2 types of logs: info and error. +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. Info logs are things you want to tell the user which are not errors. Error logs are, well, errors. If your code receives an `error` from a subordinate function call and is logging that `error` *and not returning it*, use error logs. -2) Verbosity-levels on info logs. This gives developers a chance to indicate +2. Verbosity-levels on info logs. This gives developers a chance to indicate arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug". Superficially this +semantic meaning such as "warning", "trace", and "debug." Superficially this may feel very similar, but the primary difference is the lack of semantics. Because verbosity is a numerical value, it's safe to assume that an app running with higher verbosity means more (and less important) logs will be generated. -This is a BETA grade API. +## Implementations (non-exhaustive) There are implementations for the following logging libraries: +- **a function**: [funcr](https://github.com/go-logr/logr/tree/master/funcr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): - [stdr](https://github.com/go-logr/stdr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) -# FAQ +## FAQ -## Conceptual +### Conceptual -## Why structured logging? +#### Why structured logging? -- **Structured logs are more easily queriable**: Since you've got +- **Structured logs are more easily queryable**: Since you've got key-value pairs, it's much easier to query your structured logs for particular values by filtering on the contents of a particular key -- think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc + the name and namespace of the reconciled object, etc. -- **Structured logging makes it easier to have cross-referencable logs**: +- **Structured logging makes it easier to have cross-referenceable logs**: Similarly to searchability, if you maintain conventions around your keys, it becomes easy to gather all log lines related to a particular concept. - + - **Structured logs allow better dimensions of filtering**: if you have structure to your logs, you've got more precise control over how much information is logged -- you might choose in a particular configuration to log certain keys but not others, only log lines where a certain key - matches a certain value, etc, instead of just having v-levels and names + matches a certain value, etc., instead of just having v-levels and names to key off of. - **Structured logs better represent structured data**: sometimes, the data that you want to log is inherently structured (think tuple-link - objects). Structured logs allow you to preserve that structure when + objects.) Structured logs allow you to preserve that structure when outputting. -## Why V-levels? +#### Why V-levels? **V-levels give operators an easy way to control the chattiness of log operations**. V-levels provide a way for a given package to distinguish the relative importance or verbosity of a given log message. Then, if a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. +of the package can simply change the v-levels for that library. -## Why not more named levels, like Warning? +#### Why not named levels, like Info/Warning/Error? Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences from Dave's ideas](#differences-from-daves-ideas). -## Why not allow format strings, too? +#### Why not allow format strings, too? **Format strings negate many of the benefits of structured logs**: - They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc + regular expressions, etc. - They don't store structured data well, since contents are flattened into - a string + a string. -- They're not cross-referencable +- They're not cross-referenceable. -- They don't compress easily, since the message is not constant +- They don't compress easily, since the message is not constant. -(unless you turn positional parameters into key-value pairs with numerical +(Unless you turn positional parameters into key-value pairs with numerical keys, at which point you've gotten key-value logging with meaningless -keys) +keys.) -## Practical +### Practical -## Why key-value pairs, and not a map? +#### Why key-value pairs, and not a map? Key-value pairs are *much* easier to optimize, especially around allocations. Zap (a structured logger that inspired logr's interface) has @@ -117,26 +184,26 @@ While the interface ends up being a little less obvious, you get potentially better performance, plus avoid making users type `map[string]string{}` every time they want to log. -## What if my V-levels differ between libraries? +#### What if my V-levels differ between libraries? That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` function to pass different loggers to different libraries. +`WithName` method to pass different loggers to different libraries. Generally, you should take care to ensure that you have relatively consistent V-levels within a given logger, however, as this makes deciding on what verbosity of logs to request easier. -## But I *really* want to use a format string! +#### But I really want to use a format string! That's not actually a question. Assuming your question is "how do I convert my mental model of logging with format strings to logging with constant messages": -1. figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. 2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair + it, and add that as a key value pair. For instance, consider the following examples (all taken from spots in the Kubernetes codebase): @@ -150,34 +217,59 @@ Kubernetes codebase): response when requesting url", "attempt", retries, "after seconds", seconds, "url", url)` -If you *really* must use a format string, place it as a key value, and -call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to reflect over type %T")` becomes `logger.Info("unable to reflect over type", "type", fmt.Sprintf("%T"))`. In general though, the cases where this is necessary should be few and far between. -## How do I choose my V-levels? +#### How do I choose my V-levels? This is basically the only hard constraint: increase V-levels to denote more verbose or more debug-y logs. Otherwise, you can start out with `0` as "you always want to see this", `1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack". +`10` as "I would like to performance-test your log collection stack." Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs). +info-type logs.) + +#### How do I choose my keys? -## How do I choose my keys +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. -- make your keys human-readable -- constant keys are generally a good idea -- be consistent across your codebase -- keys should naturally match parts of the message string +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). While key names are mostly unrestricted (and spaces are acceptable), it's generally a good idea to stick to printable ascii characters, or at least match the general character set of your log lines. +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + [warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 2bafb13d15c..9d92a38f1d7 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -16,36 +16,39 @@ limitations under the License. package logr -// Discard returns a valid Logger that discards all messages logged to it. -// It can be used whenever the caller is not interested in the logs. +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. func Discard() Logger { - return DiscardLogger{} + return Logger{ + level: 0, + sink: discardLogSink{}, + } } -// DiscardLogger is a Logger that discards all messages. -type DiscardLogger struct{} +// discardLogSink is a LogSink that discards all messages. +type discardLogSink struct{} -func (l DiscardLogger) Enabled() bool { - return false +// Verify that it actually implements the interface +var _ LogSink = discardLogSink{} + +func (l discardLogSink) Init(RuntimeInfo) { } -func (l DiscardLogger) Info(msg string, keysAndValues ...interface{}) { +func (l discardLogSink) Enabled(int) bool { + return false } -func (l DiscardLogger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l discardLogSink) Info(int, string, ...interface{}) { } -func (l DiscardLogger) V(level int) Logger { - return l +func (l discardLogSink) Error(error, string, ...interface{}) { } -func (l DiscardLogger) WithValues(keysAndValues ...interface{}) Logger { +func (l discardLogSink) WithValues(...interface{}) LogSink { return l } -func (l DiscardLogger) WithName(name string) Logger { +func (l discardLogSink) WithName(string) LogSink { return l } - -// Verify that it actually implements the interface -var _ Logger = DiscardLogger{} diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod index 591884e91f1..7baec9b5707 100644 --- a/vendor/github.com/go-logr/logr/go.mod +++ b/vendor/github.com/go-logr/logr/go.mod @@ -1,3 +1,3 @@ module github.com/go-logr/logr -go 1.14 +go 1.16 diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 842428bd3a3..4e84ab7c8ef 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -16,85 +16,103 @@ limitations under the License. // This design derives from Dave Cheney's blog: // http://dave.cheney.net/2015/11/05/lets-talk-about-logging -// -// This is a BETA grade API. Until there is a significant 2nd implementation, -// I don't really know how it will change. -// Package logr defines abstract interfaces for logging. Packages can depend on -// these interfaces and callers can implement logging in whatever way is -// appropriate. +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. // -// Usage +// # Usage // -// Logging is done using a Logger. Loggers can have name prefixes and named -// values attached, so that all log messages logged with that Logger have some -// base context associated. +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". // -// The term "key" is used to refer to the name associated with a particular -// value, to disambiguate it from the general Logger name. +// With Go's standard log package, we might write: +// log.Printf("setting target value %s", targetValue) // -// For instance, suppose we're trying to reconcile the state of an object, and -// we want to log that we've made some decision. +// With logr's structured logging, we'd write: +// logger.Info("setting target", "value", targetValue) // -// With the traditional log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// Errors are much the same. Instead of: +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // -// With logr's structured logging, we'd write: -// // elsewhere in the file, set up the logger to log with the prefix of -// // "reconcilers", and the named value target-type=Foo, for extra context. -// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") +// We'd write: +// logger.Error(err, "failed to open the pod bay door", "user", user) // -// // later on... -// log.Info("setting foo on object", "value", targetValue, "object", object) +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). // -// Depending on our logging implementation, we could then make logging decisions -// based on field values (like only logging such events for objects in a certain -// namespace), or copy the structured information into a structured log store. +// # Verbosity // -// For logging errors, Logger has a method called Error. Suppose we wanted to -// log an error while reconciling. With the traditional log package, we might -// write: -// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). // -// With logr, we'd instead write: -// // assuming the above setup for log -// log.Error(err, "unable to reconcile object", "object", object) +// Where we might have written: +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } // -// This functions similarly to: -// log.Info("unable to reconcile object", "error", err, "object", object) +// We can write: +// logger.V(2).Info("an unusual thing happened") // -// However, it ensures that a standard key for the error value ("error") is used -// across all error logging. Furthermore, certain implementations may choose to -// attach additional information (such as stack traces) on calls to Error, so -// it's preferred to use Error to log errors. +// # Logger Names // -// Parts of a log line +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: // -// Each log message from a Logger has four types of context: -// logger name, log verbosity, log message, and the named values. +// logger.WithName("compactor").Info("started", "time", time.Now()) // -// The Logger name consists of a series of name "segments" added by successive -// calls to WithName. These name segments will be joined in some way by the -// underlying implementation. It is strongly recommended that name segments -// contain simple identifiers (letters, digits, and hyphen), and do not contain -// characters that could muddle the log output or confuse the joining operation -// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). // -// Log verbosity represents how little a log matters. Level zero, the default, -// matters most. Increasing levels matter less and less. Try to avoid lots of -// different verbosity levels, and instead provide useful keys, logger names, -// and log messages for users to filter on. It's illegal to pass a log level -// below zero. +// # Saved Values +// +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: +// +// With the standard log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr we'd write: +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) +// +// # Best Practices +// +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. // // The log message consists of a constant message attached to the log line. // This should generally be a simple description of what's occurring, and should -// never be a format string. +// never be a format string. Variable information can then be attached using +// named values. // -// Variable information can then be attached using named values (key/value -// pairs). Keys are arbitrary strings, while values may be any Go value. +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. // -// Key Naming Conventions +// # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: @@ -102,6 +120,7 @@ limitations under the License. // * be constant (not dependent on input data) // * contain only printable characters // * not contain whitespace or punctuation +// * use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -111,20 +130,22 @@ limitations under the License. // generally best to avoid using the following keys, as they're frequently used // by implementations: // -// * `"caller"`: the calling information (file/line) of a particular log line. -// * `"error"`: the underlying error value in the `Error` method. -// * `"level"`: the log level. -// * `"logger"`: the name of the associated logger. -// * `"msg"`: the log message. -// * `"stacktrace"`: the stack trace associated with a particular log line or -// error (often from the `Error` message). -// * `"ts"`: the timestamp for a log line. +// * "caller": the calling information (file/line) of a particular log line. +// * "error": the underlying error value in the `Error` method. +// * "level": the log level. +// * "logger": the name of the associated logger. +// * "msg": the log message. +// * "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message). +// * "ts": the timestamp for a log line. // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // +// # Break Glass +// // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: // // Underlier exposes access to the underlying logging implementation. @@ -134,81 +155,220 @@ limitations under the License. // type Underlier interface { // GetUnderlying() // } +// +// Logger grants access to the sink to enable type assertions like this: +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink()(impl.Underlier) { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. package logr import ( "context" ) -// TODO: consider adding back in format strings if they're really needed -// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) -// TODO: consider other bits of glog functionality like Flush, OutputStats +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + sink.Init(runtimeInfo) + return logger +} -// Logger represents the ability to log messages, both errors and not. -type Logger interface { - // Enabled tests whether this Logger is enabled. For example, commandline - // flags might be used to set the logging verbosity and disable some info - // logs. - Enabled() bool +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} - // Info logs a non-error message with the given key/value pairs as context. - // - // The msg argument should be used to add some constant description to - // the log line. The key/value pairs can then be used to add additional - // variable information. The key/value pairs should alternate string - // keys and arbitrary values. - Info(msg string, keysAndValues ...interface{}) - - // Error logs an error, with the given message and key/value pairs as context. - // It functions similarly to calling Info with the "error" named value, but may - // have unique behavior, and should be preferred for logging errors (see the - // package documentations for more information). - // - // The msg field should be used to add context to any underlying error, - // while the err field should be used to attach the actual error that - // triggered this log line, if present. - Error(err error, msg string, keysAndValues ...interface{}) +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if level < 0 { + level = 0 + } + l.level += level + return l +} - // V returns an Logger value for a specific verbosity level, relative to - // this Logger. In other words, V values are additive. V higher verbosity - // level means a log message is less important. It's illegal to pass a log - // level less than zero. - V(level int) Logger - - // WithValues adds some key-value pairs of context to a logger. - // See Info for documentation on how key/value pairs work. - WithValues(keysAndValues ...interface{}) Logger - - // WithName adds a new element to the logger's name. - // Successive calls with WithName continue to append - // suffixes to the logger's name. It's strongly recommended - // that name segments contain only letters, digits, and hyphens - // (see the package documentation for more information). - WithName(name string) Logger +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + l.setSink(l.sink.WithValues(keysAndValues...)) + return l } -// InfoLogger provides compatibility with code that relies on the v0.1.0 -// interface. +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + l.setSink(l.sink.WithName(name)) + return l +} + +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. // -// Deprecated: InfoLogger is an artifact of early versions of this API. New -// users should never use it and existing users should use Logger instead. This -// will be removed in a future release. -type InfoLogger = Logger +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} + +// contextKey is how we find Loggers in a context.Context. type contextKey struct{} -// FromContext returns a Logger constructed from ctx or nil if no -// logger details are found. -func FromContext(ctx context.Context) Logger { +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v + return v, nil } - return nil + return Logger{}, notFoundError{} } -// FromContextOrDiscard returns a Logger constructed from ctx or a Logger -// that discards all messages if no logger details are found. +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. func FromContextOrDiscard(ctx context.Context) Logger { if v, ok := ctx.Value(contextKey{}).(Logger); ok { return v @@ -217,12 +377,59 @@ func FromContextOrDiscard(ctx context.Context) Logger { return Discard() } -// NewContext returns a new context derived from ctx that embeds the Logger. -func NewContext(ctx context.Context, l Logger) context.Context { - return context.WithValue(ctx, contextKey{}, l) +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) } -// CallDepthLogger represents a Logger that knows how to climb the call stack +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a Logger that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -232,35 +439,41 @@ func NewContext(ctx context.Context, l Logger) context.Context { // // This is an optional interface and implementations are not required to // support it. -type CallDepthLogger interface { - Logger - - // WithCallDepth returns a Logger that will offset the call stack by the - // specified number of frames when logging call site information. If depth - // is 0 the attribution should be to the direct caller of this method. If - // depth is 1 the attribution should skip 1 call frame, and so on. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. // Successive calls to this are additive. - WithCallDepth(depth int) Logger + WithCallDepth(depth int) LogSink } -// WithCallDepth returns a Logger that will offset the call stack by the -// specified number of frames when logging call site information, if possible. -// This is useful for users who have helper functions between the "real" call -// site and the actual calls to Logger methods. If depth is 0 the attribution -// should be to the direct caller of this function. If depth is 1 the -// attribution should skip 1 call frame, and so on. Successive calls to this -// are additive. +// CallStackHelperLogSink represents a Logger that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. // -// If the underlying log implementation supports the CallDepthLogger interface, -// the WithCallDepth method will be called and the result returned. If the -// implementation does not support CallDepthLogger, the original Logger will be -// returned. +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. // -// Callers which care about whether this was supported or not should test for -// CallDepthLogger support themselves. -func WithCallDepth(logger Logger, depth int) Logger { - if decorator, ok := logger.(CallDepthLogger); ok { - return decorator.WithCallDepth(depth) - } - return logger +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() } diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/.gitignore b/vendor/github.com/go-ozzo/ozzo-validation/v4/.gitignore new file mode 100644 index 00000000000..ef6e33fc481 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Idea Directory +.idea + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +.DS_Store diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/.travis.yml b/vendor/github.com/go-ozzo/ozzo-validation/v4/.travis.yml new file mode 100644 index 00000000000..28febbdae1a --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/.travis.yml @@ -0,0 +1,17 @@ +dist: bionic + +language: go + +go: + - 1.13.x + +install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - go get golang.org/x/lint/golint + +script: + - test -z "`gofmt -l -d .`" + - test -z "`golint ./...`" + - go test -v -covermode=count -coverprofile=coverage.out + - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/LICENSE b/vendor/github.com/go-ozzo/ozzo-validation/v4/LICENSE new file mode 100644 index 00000000000..d235be9cc6d --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/LICENSE @@ -0,0 +1,17 @@ +The MIT License (MIT) +Copyright (c) 2016, Qiang Xue + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software +and associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/README.md b/vendor/github.com/go-ozzo/ozzo-validation/v4/README.md new file mode 100644 index 00000000000..6c14bbf7f02 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/README.md @@ -0,0 +1,703 @@ +# ozzo-validation + +[![GoDoc](https://godoc.org/github.com/go-ozzo/ozzo-validation?status.png)](http://godoc.org/github.com/go-ozzo/ozzo-validation) +[![Build Status](https://travis-ci.org/go-ozzo/ozzo-validation.svg?branch=master)](https://travis-ci.org/go-ozzo/ozzo-validation) +[![Coverage Status](https://coveralls.io/repos/github/go-ozzo/ozzo-validation/badge.svg?branch=master)](https://coveralls.io/github/go-ozzo/ozzo-validation?branch=master) +[![Go Report](https://goreportcard.com/badge/github.com/go-ozzo/ozzo-validation)](https://goreportcard.com/report/github.com/go-ozzo/ozzo-validation) + +## Description + +ozzo-validation is a Go package that provides configurable and extensible data validation capabilities. +It has the following features: + +* use normal programming constructs rather than error-prone struct tags to specify how data should be validated. +* can validate data of different types, e.g., structs, strings, byte slices, slices, maps, arrays. +* can validate custom data types as long as they implement the `Validatable` interface. +* can validate data types that implement the `sql.Valuer` interface (e.g. `sql.NullString`). +* customizable and well-formatted validation errors. +* error code and message translation support. +* provide a rich set of validation rules right out of box. +* extremely easy to create and use custom validation rules. + +For an example on how this library is used in an application, please refer to [go-rest-api](https://github.com/qiangxue/go-rest-api) which is a starter kit for building RESTful APIs in Go. + +## Requirements + +Go 1.13 or above. + + +## Getting Started + +The ozzo-validation package mainly includes a set of validation rules and two validation methods. You use +validation rules to describe how a value should be considered valid, and you call either `validation.Validate()` +or `validation.ValidateStruct()` to validate the value. + + +### Installation + +Run the following command to install the package: + +``` +go get github.com/go-ozzo/ozzo-validation +``` + +### Validating a Simple Value + +For a simple value, such as a string or an integer, you may use `validation.Validate()` to validate it. For example, + +```go +package main + +import ( + "fmt" + + "github.com/go-ozzo/ozzo-validation/v4" + "github.com/go-ozzo/ozzo-validation/v4/is" +) + +func main() { + data := "example" + err := validation.Validate(data, + validation.Required, // not empty + validation.Length(5, 100), // length between 5 and 100 + is.URL, // is a valid URL + ) + fmt.Println(err) + // Output: + // must be a valid URL +} +``` + +The method `validation.Validate()` will run through the rules in the order that they are listed. If a rule fails +the validation, the method will return the corresponding error and skip the rest of the rules. The method will +return nil if the value passes all validation rules. + + +### Validating a Struct + +For a struct value, you usually want to check if its fields are valid. For example, in a RESTful application, you +may unmarshal the request payload into a struct and then validate the struct fields. If one or multiple fields +are invalid, you may want to get an error describing which fields are invalid. You can use `validation.ValidateStruct()` +to achieve this purpose. A single struct can have rules for multiple fields, and a field can be associated with multiple +rules. For example, + +```go +type Address struct { + Street string + City string + State string + Zip string +} + +func (a Address) Validate() error { + return validation.ValidateStruct(&a, + // Street cannot be empty, and the length must between 5 and 50 + validation.Field(&a.Street, validation.Required, validation.Length(5, 50)), + // City cannot be empty, and the length must between 5 and 50 + validation.Field(&a.City, validation.Required, validation.Length(5, 50)), + // State cannot be empty, and must be a string consisting of two letters in upper case + validation.Field(&a.State, validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))), + // State cannot be empty, and must be a string consisting of five digits + validation.Field(&a.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), + ) +} + +a := Address{ + Street: "123", + City: "Unknown", + State: "Virginia", + Zip: "12345", +} + +err := a.Validate() +fmt.Println(err) +// Output: +// Street: the length must be between 5 and 50; State: must be in a valid format. +``` + +Note that when calling `validation.ValidateStruct` to validate a struct, you should pass to the method a pointer +to the struct instead of the struct itself. Similarly, when calling `validation.Field` to specify the rules +for a struct field, you should use a pointer to the struct field. + +When the struct validation is performed, the fields are validated in the order they are specified in `ValidateStruct`. +And when each field is validated, its rules are also evaluated in the order they are associated with the field. +If a rule fails, an error is recorded for that field, and the validation will continue with the next field. + + +### Validating a Map + +Sometimes you might need to work with dynamic data stored in maps rather than a typed model. You can use `validation.Map()` +in this situation. A single map can have rules for multiple keys, and a key can be associated with multiple +rules. For example, + +```go +c := map[string]interface{}{ + "Name": "Qiang Xue", + "Email": "q", + "Address": map[string]interface{}{ + "Street": "123", + "City": "Unknown", + "State": "Virginia", + "Zip": "12345", + }, +} + +err := validation.Validate(c, + validation.Map( + // Name cannot be empty, and the length must be between 5 and 20. + validation.Key("Name", validation.Required, validation.Length(5, 20)), + // Email cannot be empty and should be in a valid email format. + validation.Key("Email", validation.Required, is.Email), + // Validate Address using its own validation rules + validation.Key("Address", validation.Map( + // Street cannot be empty, and the length must between 5 and 50 + validation.Key("Street", validation.Required, validation.Length(5, 50)), + // City cannot be empty, and the length must between 5 and 50 + validation.Key("City", validation.Required, validation.Length(5, 50)), + // State cannot be empty, and must be a string consisting of two letters in upper case + validation.Key("State", validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))), + // State cannot be empty, and must be a string consisting of five digits + validation.Key("Zip", validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), + )), + ), +) +fmt.Println(err) +// Output: +// Address: (State: must be in a valid format; Street: the length must be between 5 and 50.); Email: must be a valid email address. +``` + +When the map validation is performed, the keys are validated in the order they are specified in `Map`. +And when each key is validated, its rules are also evaluated in the order they are associated with the key. +If a rule fails, an error is recorded for that key, and the validation will continue with the next key. + + +### Validation Errors + +The `validation.ValidateStruct` method returns validation errors found in struct fields in terms of `validation.Errors` +which is a map of fields and their corresponding errors. Nil is returned if validation passes. + +By default, `validation.Errors` uses the struct tags named `json` to determine what names should be used to +represent the invalid fields. The type also implements the `json.Marshaler` interface so that it can be marshaled +into a proper JSON object. For example, + +```go +type Address struct { + Street string `json:"street"` + City string `json:"city"` + State string `json:"state"` + Zip string `json:"zip"` +} + +// ...perform validation here... + +err := a.Validate() +b, _ := json.Marshal(err) +fmt.Println(string(b)) +// Output: +// {"street":"the length must be between 5 and 50","state":"must be in a valid format"} +``` + +You may modify `validation.ErrorTag` to use a different struct tag name. + +If you do not like the magic that `ValidateStruct` determines error keys based on struct field names or corresponding +tag values, you may use the following alternative approach: + +```go +c := Customer{ + Name: "Qiang Xue", + Email: "q", + Address: Address{ + State: "Virginia", + }, +} + +err := validation.Errors{ + "name": validation.Validate(c.Name, validation.Required, validation.Length(5, 20)), + "email": validation.Validate(c.Name, validation.Required, is.Email), + "zip": validation.Validate(c.Address.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), +}.Filter() +fmt.Println(err) +// Output: +// email: must be a valid email address; zip: cannot be blank. +``` + +In the above example, we build a `validation.Errors` by a list of names and the corresponding validation results. +At the end we call `Errors.Filter()` to remove from `Errors` all nils which correspond to those successful validation +results. The method will return nil if `Errors` is empty. + +The above approach is very flexible as it allows you to freely build up your validation error structure. You can use +it to validate both struct and non-struct values. Compared to using `ValidateStruct` to validate a struct, +it has the drawback that you have to redundantly specify the error keys while `ValidateStruct` can automatically +find them out. + + +### Internal Errors + +Internal errors are different from validation errors in that internal errors are caused by malfunctioning code (e.g. +a validator making a remote call to validate some data when the remote service is down) rather +than the data being validated. When an internal error happens during data validation, you may allow the user to resubmit +the same data to perform validation again, hoping the program resumes functioning. On the other hand, if data validation +fails due to data error, the user should generally not resubmit the same data again. + +To differentiate internal errors from validation errors, when an internal error occurs in a validator, wrap it +into `validation.InternalError` by calling `validation.NewInternalError()`. The user of the validator can then check +if a returned error is an internal error or not. For example, + +```go +if err := a.Validate(); err != nil { + if e, ok := err.(validation.InternalError); ok { + // an internal error happened + fmt.Println(e.InternalError()) + } +} +``` + + +## Validatable Types + +A type is validatable if it implements the `validation.Validatable` interface. + +When `validation.Validate` is used to validate a validatable value, if it does not find any error with the +given validation rules, it will further call the value's `Validate()` method. + +Similarly, when `validation.ValidateStruct` is validating a struct field whose type is validatable, it will call +the field's `Validate` method after it passes the listed rules. + +> Note: When implementing `validation.Validatable`, do not call `validation.Validate()` to validate the value in its +> original type because this will cause infinite loops. For example, if you define a new type `MyString` as `string` +> and implement `validation.Validatable` for `MyString`, within the `Validate()` function you should cast the value +> to `string` first before calling `validation.Validate()` to validate it. + +In the following example, the `Address` field of `Customer` is validatable because `Address` implements +`validation.Validatable`. Therefore, when validating a `Customer` struct with `validation.ValidateStruct`, +validation will "dive" into the `Address` field. + +```go +type Customer struct { + Name string + Gender string + Email string + Address Address +} + +func (c Customer) Validate() error { + return validation.ValidateStruct(&c, + // Name cannot be empty, and the length must be between 5 and 20. + validation.Field(&c.Name, validation.Required, validation.Length(5, 20)), + // Gender is optional, and should be either "Female" or "Male". + validation.Field(&c.Gender, validation.In("Female", "Male")), + // Email cannot be empty and should be in a valid email format. + validation.Field(&c.Email, validation.Required, is.Email), + // Validate Address using its own validation rules + validation.Field(&c.Address), + ) +} + +c := Customer{ + Name: "Qiang Xue", + Email: "q", + Address: Address{ + Street: "123 Main Street", + City: "Unknown", + State: "Virginia", + Zip: "12345", + }, +} + +err := c.Validate() +fmt.Println(err) +// Output: +// Address: (State: must be in a valid format.); Email: must be a valid email address. +``` + +Sometimes, you may want to skip the invocation of a type's `Validate` method. To do so, simply associate +a `validation.Skip` rule with the value being validated. + +### Maps/Slices/Arrays of Validatables + +When validating an iterable (map, slice, or array), whose element type implements the `validation.Validatable` interface, +the `validation.Validate` method will call the `Validate` method of every non-nil element. +The validation errors of the elements will be returned as `validation.Errors` which maps the keys of the +invalid elements to their corresponding validation errors. For example, + +```go +addresses := []Address{ + Address{State: "MD", Zip: "12345"}, + Address{Street: "123 Main St", City: "Vienna", State: "VA", Zip: "12345"}, + Address{City: "Unknown", State: "NC", Zip: "123"}, +} +err := validation.Validate(addresses) +fmt.Println(err) +// Output: +// 0: (City: cannot be blank; Street: cannot be blank.); 2: (Street: cannot be blank; Zip: must be in a valid format.). +``` + +When using `validation.ValidateStruct` to validate a struct, the above validation procedure also applies to those struct +fields which are map/slices/arrays of validatables. + +#### Each + +The `Each` validation rule allows you to apply a set of rules to each element of an array, slice, or map. + +```go +type Customer struct { + Name string + Emails []string +} + +func (c Customer) Validate() error { + return validation.ValidateStruct(&c, + // Name cannot be empty, and the length must be between 5 and 20. + validation.Field(&c.Name, validation.Required, validation.Length(5, 20)), + // Emails are optional, but if given must be valid. + validation.Field(&c.Emails, validation.Each(is.Email)), + ) +} + +c := Customer{ + Name: "Qiang Xue", + Emails: []Email{ + "valid@example.com", + "invalid", + }, +} + +err := c.Validate() +fmt.Println(err) +// Output: +// Emails: (1: must be a valid email address.). +``` + +### Pointers + +When a value being validated is a pointer, most validation rules will validate the actual value pointed to by the pointer. +If the pointer is nil, these rules will skip the validation. + +An exception is the `validation.Required` and `validation.NotNil` rules. When a pointer is nil, they +will report a validation error. + + +### Types Implementing `sql.Valuer` + +If a data type implements the `sql.Valuer` interface (e.g. `sql.NullString`), the built-in validation rules will handle +it properly. In particular, when a rule is validating such data, it will call the `Value()` method and validate +the returned value instead. + + +### Required vs. Not Nil + +When validating input values, there are two different scenarios about checking if input values are provided or not. + +In the first scenario, an input value is considered missing if it is not entered or it is entered as a zero value +(e.g. an empty string, a zero integer). You can use the `validation.Required` rule in this case. + +In the second scenario, an input value is considered missing only if it is not entered. A pointer field is usually +used in this case so that you can detect if a value is entered or not by checking if the pointer is nil or not. +You can use the `validation.NotNil` rule to ensure a value is entered (even if it is a zero value). + + +### Embedded Structs + +The `validation.ValidateStruct` method will properly validate a struct that contains embedded structs. In particular, +the fields of an embedded struct are treated as if they belong directly to the containing struct. For example, + +```go +type Employee struct { + Name string +} + +type Manager struct { + Employee + Level int +} + +m := Manager{} +err := validation.ValidateStruct(&m, + validation.Field(&m.Name, validation.Required), + validation.Field(&m.Level, validation.Required), +) +fmt.Println(err) +// Output: +// Level: cannot be blank; Name: cannot be blank. +``` + +In the above code, we use `&m.Name` to specify the validation of the `Name` field of the embedded struct `Employee`. +And the validation error uses `Name` as the key for the error associated with the `Name` field as if `Name` a field +directly belonging to `Manager`. + +If `Employee` implements the `validation.Validatable` interface, we can also use the following code to validate +`Manager`, which generates the same validation result: + +```go +func (e Employee) Validate() error { + return validation.ValidateStruct(&e, + validation.Field(&e.Name, validation.Required), + ) +} + +err := validation.ValidateStruct(&m, + validation.Field(&m.Employee), + validation.Field(&m.Level, validation.Required), +) +fmt.Println(err) +// Output: +// Level: cannot be blank; Name: cannot be blank. +``` + + +### Conditional Validation + +Sometimes, we may want to validate a value only when certain condition is met. For example, we want to ensure the +`unit` struct field is not empty only when the `quantity` field is not empty; or we may want to ensure either `email` +or `phone` is provided. The so-called conditional validation can be achieved with the help of `validation.When`. +The following code implements the aforementioned examples: + +```go +result := validation.ValidateStruct(&a, + validation.Field(&a.Unit, validation.When(a.Quantity != "", validation.Required).Else(validation.Nil)), + validation.Field(&a.Phone, validation.When(a.Email == "", validation.Required.Error('Either phone or Email is required.')), + validation.Field(&a.Email, validation.When(a.Phone == "", validation.Required.Error('Either phone or Email is required.')), +) +``` + +Note that `validation.When` and `validation.When.Else` can take a list of validation rules. These rules will be executed only when the condition is true (When) or false (Else). + +The above code can also be simplified using the shortcut `validation.Required.When`: + +```go +result := validation.ValidateStruct(&a, + validation.Field(&a.Unit, validation.Required.When(a.Quantity != ""), validation.Nil.When(a.Quantity == "")), + validation.Field(&a.Phone, validation.Required.When(a.Email == "").Error('Either phone or Email is required.')), + validation.Field(&a.Email, validation.Required.When(a.Phone == "").Error('Either phone or Email is required.')), +) +``` + +### Customizing Error Messages + +All built-in validation rules allow you to customize their error messages. To do so, simply call the `Error()` method +of the rules. For example, + +```go +data := "2123" +err := validation.Validate(data, + validation.Required.Error("is required"), + validation.Match(regexp.MustCompile("^[0-9]{5}$")).Error("must be a string with five digits"), +) +fmt.Println(err) +// Output: +// must be a string with five digits +``` + +You can also customize the pre-defined error(s) of a built-in rule such that the customization applies to *every* +instance of the rule. For example, the `Required` rule uses the pre-defined error `ErrRequired`. You can customize it +during the application initialization: +```go +validation.ErrRequired = validation.ErrRequired.SetMessage("the value is required") +``` + +### Error Code and Message Translation + +The errors returned by the validation rules implement the `Error` interface which contains the `Code()` method +to provide the error code information. While the message of a validation error is often customized, the code is immutable. +You can use error code to programmatically check a validation error or look for the translation of the corresponding message. + +If you are developing your own validation rules, you can use `validation.NewError()` to create a validation error which +implements the aforementioned `Error` interface. + +## Creating Custom Rules + +Creating a custom rule is as simple as implementing the `validation.Rule` interface. The interface contains a single +method as shown below, which should validate the value and return the validation error, if any: + +```go +// Validate validates a value and returns an error if validation fails. +Validate(value interface{}) error +``` + +If you already have a function with the same signature as shown above, you can call `validation.By()` to turn +it into a validation rule. For example, + +```go +func checkAbc(value interface{}) error { + s, _ := value.(string) + if s != "abc" { + return errors.New("must be abc") + } + return nil +} + +err := validation.Validate("xyz", validation.By(checkAbc)) +fmt.Println(err) +// Output: must be abc +``` + +If your validation function takes additional parameters, you can use the following closure trick: + +```go +func stringEquals(str string) validation.RuleFunc { + return func(value interface{}) error { + s, _ := value.(string) + if s != str { + return errors.New("unexpected string") + } + return nil + } +} + +err := validation.Validate("xyz", validation.By(stringEquals("abc"))) +fmt.Println(err) +// Output: unexpected string +``` + + +### Rule Groups + +When a combination of several rules are used in multiple places, you may use the following trick to create a +rule group so that your code is more maintainable. + +```go +var NameRule = []validation.Rule{ + validation.Required, + validation.Length(5, 20), +} + +type User struct { + FirstName string + LastName string +} + +func (u User) Validate() error { + return validation.ValidateStruct(&u, + validation.Field(&u.FirstName, NameRule...), + validation.Field(&u.LastName, NameRule...), + ) +} +``` + +In the above example, we create a rule group `NameRule` which consists of two validation rules. We then use this rule +group to validate both `FirstName` and `LastName`. + + +## Context-aware Validation + +While most validation rules are self-contained, some rules may depend dynamically on a context. A rule may implement the +`validation.RuleWithContext` interface to support the so-called context-aware validation. + +To validate an arbitrary value with a context, call `validation.ValidateWithContext()`. The `context.Conext` parameter +will be passed along to those rules that implement `validation.RuleWithContext`. + +To validate the fields of a struct with a context, call `validation.ValidateStructWithContext()`. + +You can define a context-aware rule from scratch by implementing both `validation.Rule` and `validation.RuleWithContext`. +You can also use `validation.WithContext()` to turn a function into a context-aware rule. For example, + + +```go +rule := validation.WithContext(func(ctx context.Context, value interface{}) error { + if ctx.Value("secret") == value.(string) { + return nil + } + return errors.New("value incorrect") +}) +value := "xyz" +ctx := context.WithValue(context.Background(), "secret", "example") +err := validation.ValidateWithContext(ctx, value, rule) +fmt.Println(err) +// Output: value incorrect +``` + +When performing context-aware validation, if a rule does not implement `validation.RuleWithContext`, its +`validation.Rule` will be used instead. + + +## Built-in Validation Rules + +The following rules are provided in the `validation` package: + +* `In(...interface{})`: checks if a value can be found in the given list of values. +* `NotIn(...interface{})`: checks if a value is NOT among the given list of values. +* `Length(min, max int)`: checks if the length of a value is within the specified range. + This rule should only be used for validating strings, slices, maps, and arrays. +* `RuneLength(min, max int)`: checks if the length of a string is within the specified range. + This rule is similar as `Length` except that when the value being validated is a string, it checks + its rune length instead of byte length. +* `Min(min interface{})` and `Max(max interface{})`: checks if a value is within the specified range. + These two rules should only be used for validating int, uint, float and time.Time types. +* `Match(*regexp.Regexp)`: checks if a value matches the specified regular expression. + This rule should only be used for strings and byte slices. +* `Date(layout string)`: checks if a string value is a date whose format is specified by the layout. + By calling `Min()` and/or `Max()`, you can check additionally if the date is within the specified range. +* `Required`: checks if a value is not empty (neither nil nor zero). +* `NotNil`: checks if a pointer value is not nil. Non-pointer values are considered valid. +* `NilOrNotEmpty`: checks if a value is a nil pointer or a non-empty value. This differs from `Required` in that it treats a nil pointer as valid. +* `Nil`: checks if a value is a nil pointer. +* `Empty`: checks if a value is empty. nil pointers are considered valid. +* `Skip`: this is a special rule used to indicate that all rules following it should be skipped (including the nested ones). +* `MultipleOf`: checks if the value is a multiple of the specified range. +* `Each(rules ...Rule)`: checks the elements within an iterable (map/slice/array) with other rules. +* `When(condition, rules ...Rule)`: validates with the specified rules only when the condition is true. +* `Else(rules ...Rule)`: must be used with `When(condition, rules ...Rule)`, validates with the specified rules only when the condition is false. + +The `is` sub-package provides a list of commonly used string validation rules that can be used to check if the format +of a value satisfies certain requirements. Note that these rules only handle strings and byte slices and if a string + or byte slice is empty, it is considered valid. You may use a `Required` rule to ensure a value is not empty. +Below is the whole list of the rules provided by the `is` package: + +* `Email`: validates if a string is an email or not. It also checks if the MX record exists for the email domain. +* `EmailFormat`: validates if a string is an email or not. It does NOT check the existence of the MX record. +* `URL`: validates if a string is a valid URL +* `RequestURL`: validates if a string is a valid request URL +* `RequestURI`: validates if a string is a valid request URI +* `Alpha`: validates if a string contains English letters only (a-zA-Z) +* `Digit`: validates if a string contains digits only (0-9) +* `Alphanumeric`: validates if a string contains English letters and digits only (a-zA-Z0-9) +* `UTFLetter`: validates if a string contains unicode letters only +* `UTFDigit`: validates if a string contains unicode decimal digits only +* `UTFLetterNumeric`: validates if a string contains unicode letters and numbers only +* `UTFNumeric`: validates if a string contains unicode number characters (category N) only +* `LowerCase`: validates if a string contains lower case unicode letters only +* `UpperCase`: validates if a string contains upper case unicode letters only +* `Hexadecimal`: validates if a string is a valid hexadecimal number +* `HexColor`: validates if a string is a valid hexadecimal color code +* `RGBColor`: validates if a string is a valid RGB color in the form of rgb(R, G, B) +* `Int`: validates if a string is a valid integer number +* `Float`: validates if a string is a floating point number +* `UUIDv3`: validates if a string is a valid version 3 UUID +* `UUIDv4`: validates if a string is a valid version 4 UUID +* `UUIDv5`: validates if a string is a valid version 5 UUID +* `UUID`: validates if a string is a valid UUID +* `CreditCard`: validates if a string is a valid credit card number +* `ISBN10`: validates if a string is an ISBN version 10 +* `ISBN13`: validates if a string is an ISBN version 13 +* `ISBN`: validates if a string is an ISBN (either version 10 or 13) +* `JSON`: validates if a string is in valid JSON format +* `ASCII`: validates if a string contains ASCII characters only +* `PrintableASCII`: validates if a string contains printable ASCII characters only +* `Multibyte`: validates if a string contains multibyte characters +* `FullWidth`: validates if a string contains full-width characters +* `HalfWidth`: validates if a string contains half-width characters +* `VariableWidth`: validates if a string contains both full-width and half-width characters +* `Base64`: validates if a string is encoded in Base64 +* `DataURI`: validates if a string is a valid base64-encoded data URI +* `E164`: validates if a string is a valid E164 phone number (+19251232233) +* `CountryCode2`: validates if a string is a valid ISO3166 Alpha 2 country code +* `CountryCode3`: validates if a string is a valid ISO3166 Alpha 3 country code +* `DialString`: validates if a string is a valid dial string that can be passed to Dial() +* `MAC`: validates if a string is a MAC address +* `IP`: validates if a string is a valid IP address (either version 4 or 6) +* `IPv4`: validates if a string is a valid version 4 IP address +* `IPv6`: validates if a string is a valid version 6 IP address +* `Subdomain`: validates if a string is valid subdomain +* `Domain`: validates if a string is valid domain +* `DNSName`: validates if a string is valid DNS name +* `Host`: validates if a string is a valid IP (both v4 and v6) or a valid DNS name +* `Port`: validates if a string is a valid port number +* `MongoID`: validates if a string is a valid Mongo ID +* `Latitude`: validates if a string is a valid latitude +* `Longitude`: validates if a string is a valid longitude +* `SSN`: validates if a string is a social security number (SSN) +* `Semver`: validates if a string is a valid semantic version + +## Credits + +The `is` sub-package wraps the excellent validators provided by the [govalidator](https://github.com/asaskevich/govalidator) package. diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/UPGRADE.md b/vendor/github.com/go-ozzo/ozzo-validation/v4/UPGRADE.md new file mode 100644 index 00000000000..73bd222a7c3 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/UPGRADE.md @@ -0,0 +1,64 @@ +# Upgrade Instructions + +## Upgrade from 3.x to 4.x + +If you are customizing the error messages of the following built-in validation rules, you may need to +change the parameter placeholders from `%v` to the Go template variable placeholders. +* `Length`: use `{{.min}}` and `{{.max}}` in the error message to refer to the minimum and maximum lengths. +* `Min`: use `{{.threshold}}` in the error message to refer to the lower bound. +* `Max`: use `{{.threshold}}` in the error message to refer to the upper bound +* `MultipleOf`: use `{{.base}}` in the error message to refer to the base of the multiples. + +For example, + ```go +// 3.x +lengthRule := validation.Length(2,10).Error("the length must be between %v and %v") + +// 4.x +lengthRule := validation.Length(2,10).Error("the length must be between {{.min}} and {{.max}}") +``` + +## Upgrade from 2.x to 3.x + +* Instead of using `StructRules` to define struct validation rules, use `ValidateStruct()` to declare and perform + struct validation. The following code snippet shows how to modify your code: +```go +// 2.x usage +err := validation.StructRules{}. + Add("Street", validation.Required, validation.Length(5, 50)). + Add("City", validation.Required, validation.Length(5, 50)). + Add("State", validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))). + Add("Zip", validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))). + Validate(a) + +// 3.x usage +err := validation.ValidateStruct(&a, + validation.Field(&a.Street, validation.Required, validation.Length(5, 50)), + validation.Field(&a.City, validation.Required, validation.Length(5, 50)), + validation.Field(&a.State, validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))), + validation.Field(&a.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), +) +``` + +* Instead of using `Rules` to declare a rule list and use it to validate a value, call `Validate()` with the rules directly. +```go +data := "example" + +// 2.x usage +rules := validation.Rules{ + validation.Required, + validation.Length(5, 100), + is.URL, +} +err := rules.Validate(data) + +// 3.x usage +err := validation.Validate(data, + validation.Required, + validation.Length(5, 100), + is.URL, +) +``` + +* The default struct tags used for determining error keys is changed from `validation` to `json`. You may modify + `validation.ErrorTag` to change it back. \ No newline at end of file diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/absent.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/absent.go new file mode 100644 index 00000000000..91fefe1364b --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/absent.go @@ -0,0 +1,67 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +var ( + // ErrNil is the error that returns when a value is not nil. + ErrNil = NewError("validation_nil", "must be blank") + // ErrEmpty is the error that returns when a not nil value is not empty. + ErrEmpty = NewError("validation_empty", "must be blank") +) + +// Nil is a validation rule that checks if a value is nil. +// It is the opposite of NotNil rule +var Nil = absentRule{condition: true, skipNil: false} + +// Empty checks if a not nil value is empty. +var Empty = absentRule{condition: true, skipNil: true} + +type absentRule struct { + condition bool + err Error + skipNil bool +} + +// Validate checks if the given value is valid or not. +func (r absentRule) Validate(value interface{}) error { + if r.condition { + value, isNil := Indirect(value) + if !r.skipNil && !isNil || r.skipNil && !isNil && !IsEmpty(value) { + if r.err != nil { + return r.err + } + if r.skipNil { + return ErrEmpty + } + return ErrNil + } + } + return nil +} + +// When sets the condition that determines if the validation should be performed. +func (r absentRule) When(condition bool) absentRule { + r.condition = condition + return r +} + +// Error sets the error message for the rule. +func (r absentRule) Error(message string) absentRule { + if r.err == nil { + if r.skipNil { + r.err = ErrEmpty + } else { + r.err = ErrNil + } + } + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r absentRule) ErrorObject(err Error) absentRule { + r.err = err + return r +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/date.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/date.go new file mode 100644 index 00000000000..31da0e32789 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/date.go @@ -0,0 +1,102 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "time" +) + +var ( + // ErrDateInvalid is the error that returns in case of an invalid date. + ErrDateInvalid = NewError("validation_date_invalid", "must be a valid date") + // ErrDateOutOfRange is the error that returns in case of an invalid date. + ErrDateOutOfRange = NewError("validation_date_out_of_range", "the date is out of range") +) + +// DateRule is a validation rule that validates date/time string values. +type DateRule struct { + layout string + min, max time.Time + err, rangeErr Error +} + +// Date returns a validation rule that checks if a string value is in a format that can be parsed into a date. +// The format of the date should be specified as the layout parameter which accepts the same value as that for time.Parse. +// For example, +// validation.Date(time.ANSIC) +// validation.Date("02 Jan 06 15:04 MST") +// validation.Date("2006-01-02") +// +// By calling Min() and/or Max(), you can let the Date rule to check if a parsed date value is within +// the specified date range. +// +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func Date(layout string) DateRule { + return DateRule{ + layout: layout, + err: ErrDateInvalid, + rangeErr: ErrDateOutOfRange, + } +} + +// Error sets the error message that is used when the value being validated is not a valid date. +func (r DateRule) Error(message string) DateRule { + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct that is used when the value being validated is not a valid date.. +func (r DateRule) ErrorObject(err Error) DateRule { + r.err = err + return r +} + +// RangeError sets the error message that is used when the value being validated is out of the specified Min/Max date range. +func (r DateRule) RangeError(message string) DateRule { + r.rangeErr = r.rangeErr.SetMessage(message) + return r +} + +// RangeErrorObject sets the error struct that is used when the value being validated is out of the specified Min/Max date range. +func (r DateRule) RangeErrorObject(err Error) DateRule { + r.rangeErr = err + return r +} + +// Min sets the minimum date range. A zero value means skipping the minimum range validation. +func (r DateRule) Min(min time.Time) DateRule { + r.min = min + return r +} + +// Max sets the maximum date range. A zero value means skipping the maximum range validation. +func (r DateRule) Max(max time.Time) DateRule { + r.max = max + return r +} + +// Validate checks if the given value is a valid date. +func (r DateRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + str, err := EnsureString(value) + if err != nil { + return err + } + + date, err := time.Parse(r.layout, str) + if err != nil { + return r.err + } + + if !r.min.IsZero() && r.min.After(date) || !r.max.IsZero() && date.After(r.max) { + return r.rangeErr + } + + return nil +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/each.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/each.go new file mode 100644 index 00000000000..2cc72534700 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/each.go @@ -0,0 +1,97 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "context" + "errors" + "reflect" + "strconv" +) + +// Each returns a validation rule that loops through an iterable (map, slice or array) +// and validates each value inside with the provided rules. +// An empty iterable is considered valid. Use the Required rule to make sure the iterable is not empty. +func Each(rules ...Rule) EachRule { + return EachRule{ + rules: rules, + } +} + +// EachRule is a validation rule that validates elements in a map/slice/array using the specified list of rules. +type EachRule struct { + rules []Rule +} + +// Validate loops through the given iterable and calls the Ozzo Validate() method for each value. +func (r EachRule) Validate(value interface{}) error { + return r.ValidateWithContext(nil, value) +} + +// ValidateWithContext loops through the given iterable and calls the Ozzo ValidateWithContext() method for each value. +func (r EachRule) ValidateWithContext(ctx context.Context, value interface{}) error { + errs := Errors{} + + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Map: + for _, k := range v.MapKeys() { + val := r.getInterface(v.MapIndex(k)) + var err error + if ctx == nil { + err = Validate(val, r.rules...) + } else { + err = ValidateWithContext(ctx, val, r.rules...) + } + if err != nil { + errs[r.getString(k)] = err + } + } + case reflect.Slice, reflect.Array: + for i := 0; i < v.Len(); i++ { + val := r.getInterface(v.Index(i)) + var err error + if ctx == nil { + err = Validate(val, r.rules...) + } else { + err = ValidateWithContext(ctx, val, r.rules...) + } + if err != nil { + errs[strconv.Itoa(i)] = err + } + } + default: + return errors.New("must be an iterable (map, slice or array)") + } + + if len(errs) > 0 { + return errs + } + return nil +} + +func (r EachRule) getInterface(value reflect.Value) interface{} { + switch value.Kind() { + case reflect.Ptr, reflect.Interface: + if value.IsNil() { + return nil + } + return value.Elem().Interface() + default: + return value.Interface() + } +} + +func (r EachRule) getString(value reflect.Value) string { + switch value.Kind() { + case reflect.Ptr, reflect.Interface: + if value.IsNil() { + return "" + } + return value.Elem().String() + default: + return value.String() + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/error.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/error.go new file mode 100644 index 00000000000..e75b1de870d --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/error.go @@ -0,0 +1,180 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + "text/template" +) + +type ( + // Error interface represents an validation error + Error interface { + Error() string + Code() string + Message() string + SetMessage(string) Error + Params() map[string]interface{} + SetParams(map[string]interface{}) Error + } + + // ErrorObject is the default validation error + // that implements the Error interface. + ErrorObject struct { + code string + message string + params map[string]interface{} + } + + // Errors represents the validation errors that are indexed by struct field names, map or slice keys. + // values are Error or Errors (for map, slice and array error value is Errors). + Errors map[string]error + + // InternalError represents an error that should NOT be treated as a validation error. + InternalError interface { + error + InternalError() error + } + + internalError struct { + error + } +) + +// NewInternalError wraps a given error into an InternalError. +func NewInternalError(err error) InternalError { + return internalError{error: err} +} + +// InternalError returns the actual error that it wraps around. +func (e internalError) InternalError() error { + return e.error +} + +// SetCode set the error's translation code. +func (e ErrorObject) SetCode(code string) Error { + e.code = code + return e +} + +// Code get the error's translation code. +func (e ErrorObject) Code() string { + return e.code +} + +// SetParams set the error's params. +func (e ErrorObject) SetParams(params map[string]interface{}) Error { + e.params = params + return e +} + +// AddParam add parameter to the error's parameters. +func (e ErrorObject) AddParam(name string, value interface{}) Error { + if e.params == nil { + e.params = make(map[string]interface{}) + } + + e.params[name] = value + return e +} + +// Params returns the error's params. +func (e ErrorObject) Params() map[string]interface{} { + return e.params +} + +// SetMessage set the error's message. +func (e ErrorObject) SetMessage(message string) Error { + e.message = message + return e +} + +// Message return the error's message. +func (e ErrorObject) Message() string { + return e.message +} + +// Error returns the error message. +func (e ErrorObject) Error() string { + if len(e.params) == 0 { + return e.message + } + + res := bytes.Buffer{} + _ = template.Must(template.New("err").Parse(e.message)).Execute(&res, e.params) + + return res.String() +} + +// Error returns the error string of Errors. +func (es Errors) Error() string { + if len(es) == 0 { + return "" + } + + keys := make([]string, len(es)) + i := 0 + for key := range es { + keys[i] = key + i++ + } + sort.Strings(keys) + + var s strings.Builder + for i, key := range keys { + if i > 0 { + s.WriteString("; ") + } + if errs, ok := es[key].(Errors); ok { + _, _ = fmt.Fprintf(&s, "%v: (%v)", key, errs) + } else { + _, _ = fmt.Fprintf(&s, "%v: %v", key, es[key].Error()) + } + } + s.WriteString(".") + return s.String() +} + +// MarshalJSON converts the Errors into a valid JSON. +func (es Errors) MarshalJSON() ([]byte, error) { + errs := map[string]interface{}{} + for key, err := range es { + if ms, ok := err.(json.Marshaler); ok { + errs[key] = ms + } else { + errs[key] = err.Error() + } + } + return json.Marshal(errs) +} + +// Filter removes all nils from Errors and returns back the updated Errors as an error. +// If the length of Errors becomes 0, it will return nil. +func (es Errors) Filter() error { + for key, value := range es { + if value == nil { + delete(es, key) + } + } + if len(es) == 0 { + return nil + } + return es +} + +// NewError create new validation error. +func NewError(code, message string) Error { + return ErrorObject{ + code: code, + message: message, + } +} + +// Assert that our ErrorObject implements the Error interface. +var _ Error = ErrorObject{} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/go.mod b/vendor/github.com/go-ozzo/ozzo-validation/v4/go.mod new file mode 100644 index 00000000000..359af9418fd --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/go.mod @@ -0,0 +1,8 @@ +module github.com/go-ozzo/ozzo-validation/v4 + +go 1.13 + +require ( + github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 + github.com/stretchr/testify v1.4.0 +) diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/go.sum b/vendor/github.com/go-ozzo/ozzo-validation/v4/go.sum new file mode 100644 index 00000000000..7e52201b7f7 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/go.sum @@ -0,0 +1,14 @@ +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/in.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/in.go new file mode 100644 index 00000000000..97e17ffddeb --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/in.go @@ -0,0 +1,57 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "reflect" +) + +// ErrInInvalid is the error that returns in case of an invalid value for "in" rule. +var ErrInInvalid = NewError("validation_in_invalid", "must be a valid value") + +// In returns a validation rule that checks if a value can be found in the given list of values. +// reflect.DeepEqual() will be used to determine if two values are equal. +// For more details please refer to https://golang.org/pkg/reflect/#DeepEqual +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func In(values ...interface{}) InRule { + return InRule{ + elements: values, + err: ErrInInvalid, + } +} + +// InRule is a validation rule that validates if a value can be found in the given list of values. +type InRule struct { + elements []interface{} + err Error +} + +// Validate checks if the given value is valid or not. +func (r InRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + for _, e := range r.elements { + if reflect.DeepEqual(e, value) { + return nil + } + } + + return r.err +} + +// Error sets the error message for the rule. +func (r InRule) Error(message string) InRule { + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r InRule) ErrorObject(err Error) InRule { + r.err = err + return r +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/is/rules.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/is/rules.go new file mode 100644 index 00000000000..3cad2155ba7 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/is/rules.go @@ -0,0 +1,283 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package is provides a list of commonly used string validation rules. +package is + +import ( + "regexp" + "unicode" + + "github.com/asaskevich/govalidator" + validation "github.com/go-ozzo/ozzo-validation/v4" +) + +var ( + // ErrEmail is the error that returns in case of an invalid email. + ErrEmail = validation.NewError("validation_is_email", "must be a valid email address") + // ErrURL is the error that returns in case of an invalid URL. + ErrURL = validation.NewError("validation_is_url", "must be a valid URL") + // ErrRequestURL is the error that returns in case of an invalid request URL. + ErrRequestURL = validation.NewError("validation_is_request_url", "must be a valid request URL") + // ErrRequestURI is the error that returns in case of an invalid request URI. + ErrRequestURI = validation.NewError("validation_request_is_request_uri", "must be a valid request URI") + // ErrAlpha is the error that returns in case of an invalid alpha value. + ErrAlpha = validation.NewError("validation_is_alpha", "must contain English letters only") + // ErrDigit is the error that returns in case of an invalid digit value. + ErrDigit = validation.NewError("validation_is_digit", "must contain digits only") + // ErrAlphanumeric is the error that returns in case of an invalid alphanumeric value. + ErrAlphanumeric = validation.NewError("validation_is_alphanumeric", "must contain English letters and digits only") + // ErrUTFLetter is the error that returns in case of an invalid utf letter value. + ErrUTFLetter = validation.NewError("validation_is_utf_letter", "must contain unicode letter characters only") + // ErrUTFDigit is the error that returns in case of an invalid utf digit value. + ErrUTFDigit = validation.NewError("validation_is_utf_digit", "must contain unicode decimal digits only") + // ErrUTFLetterNumeric is the error that returns in case of an invalid utf numeric or letter value. + ErrUTFLetterNumeric = validation.NewError("validation_is utf_letter_numeric", "must contain unicode letters and numbers only") + // ErrUTFNumeric is the error that returns in case of an invalid utf numeric value. + ErrUTFNumeric = validation.NewError("validation_is_utf_numeric", "must contain unicode number characters only") + // ErrLowerCase is the error that returns in case of an invalid lower case value. + ErrLowerCase = validation.NewError("validation_is_lower_case", "must be in lower case") + // ErrUpperCase is the error that returns in case of an invalid upper case value. + ErrUpperCase = validation.NewError("validation_is_upper_case", "must be in upper case") + // ErrHexadecimal is the error that returns in case of an invalid hexadecimal number. + ErrHexadecimal = validation.NewError("validation_is_hexadecimal", "must be a valid hexadecimal number") + // ErrHexColor is the error that returns in case of an invalid hexadecimal color code. + ErrHexColor = validation.NewError("validation_is_hex_color", "must be a valid hexadecimal color code") + // ErrRGBColor is the error that returns in case of an invalid RGB color code. + ErrRGBColor = validation.NewError("validation_is_rgb_color", "must be a valid RGB color code") + // ErrInt is the error that returns in case of an invalid integer value. + ErrInt = validation.NewError("validation_is_int", "must be an integer number") + // ErrFloat is the error that returns in case of an invalid float value. + ErrFloat = validation.NewError("validation_is_float", "must be a floating point number") + // ErrUUIDv3 is the error that returns in case of an invalid UUIDv3 value. + ErrUUIDv3 = validation.NewError("validation_is_uuid_v3", "must be a valid UUID v3") + // ErrUUIDv4 is the error that returns in case of an invalid UUIDv4 value. + ErrUUIDv4 = validation.NewError("validation_is_uuid_v4", "must be a valid UUID v4") + // ErrUUIDv5 is the error that returns in case of an invalid UUIDv5 value. + ErrUUIDv5 = validation.NewError("validation_is_uuid_v5", "must be a valid UUID v5") + // ErrUUID is the error that returns in case of an invalid UUID value. + ErrUUID = validation.NewError("validation_is_uuid", "must be a valid UUID") + // ErrCreditCard is the error that returns in case of an invalid credit card number. + ErrCreditCard = validation.NewError("validation_is_credit_card", "must be a valid credit card number") + // ErrISBN10 is the error that returns in case of an invalid ISBN-10 value. + ErrISBN10 = validation.NewError("validation_is_isbn_10", "must be a valid ISBN-10") + // ErrISBN13 is the error that returns in case of an invalid ISBN-13 value. + ErrISBN13 = validation.NewError("validation_is_isbn_13", "must be a valid ISBN-13") + // ErrISBN is the error that returns in case of an invalid ISBN value. + ErrISBN = validation.NewError("validation_is_isbn", "must be a valid ISBN") + // ErrJSON is the error that returns in case of an invalid JSON. + ErrJSON = validation.NewError("validation_is_json", "must be in valid JSON format") + // ErrASCII is the error that returns in case of an invalid ASCII. + ErrASCII = validation.NewError("validation_is_ascii", "must contain ASCII characters only") + // ErrPrintableASCII is the error that returns in case of an invalid printable ASCII value. + ErrPrintableASCII = validation.NewError("validation_is_printable_ascii", "must contain printable ASCII characters only") + // ErrMultibyte is the error that returns in case of an invalid multibyte value. + ErrMultibyte = validation.NewError("validation_is_multibyte", "must contain multibyte characters") + // ErrFullWidth is the error that returns in case of an invalid full-width value. + ErrFullWidth = validation.NewError("validation_is_full_width", "must contain full-width characters") + // ErrHalfWidth is the error that returns in case of an invalid half-width value. + ErrHalfWidth = validation.NewError("validation_is_half_width", "must contain half-width characters") + // ErrVariableWidth is the error that returns in case of an invalid variable width value. + ErrVariableWidth = validation.NewError("validation_is_variable_width", "must contain both full-width and half-width characters") + // ErrBase64 is the error that returns in case of an invalid base54 value. + ErrBase64 = validation.NewError("validation_is_base64", "must be encoded in Base64") + // ErrDataURI is the error that returns in case of an invalid data URI. + ErrDataURI = validation.NewError("validation_is_data_uri", "must be a Base64-encoded data URI") + // ErrE164 is the error that returns in case of an invalid e165. + ErrE164 = validation.NewError("validation_is_e164_number", "must be a valid E164 number") + // ErrCountryCode2 is the error that returns in case of an invalid two-letter country code. + ErrCountryCode2 = validation.NewError("validation_is_country_code_2_letter", "must be a valid two-letter country code") + // ErrCountryCode3 is the error that returns in case of an invalid three-letter country code. + ErrCountryCode3 = validation.NewError("validation_is_country_code_3_letter", "must be a valid three-letter country code") + // ErrCurrencyCode is the error that returns in case of an invalid currency code. + ErrCurrencyCode = validation.NewError("validation_is_currency_code", "must be valid ISO 4217 currency code") + // ErrDialString is the error that returns in case of an invalid string. + ErrDialString = validation.NewError("validation_is_dial_string", "must be a valid dial string") + // ErrMac is the error that returns in case of an invalid mac address. + ErrMac = validation.NewError("validation_is_mac_address", "must be a valid MAC address") + // ErrIP is the error that returns in case of an invalid IP. + ErrIP = validation.NewError("validation_is_ip", "must be a valid IP address") + // ErrIPv4 is the error that returns in case of an invalid IPv4. + ErrIPv4 = validation.NewError("validation_is_ipv4", "must be a valid IPv4 address") + // ErrIPv6 is the error that returns in case of an invalid IPv6. + ErrIPv6 = validation.NewError("validation_is_ipv6", "must be a valid IPv6 address") + // ErrSubdomain is the error that returns in case of an invalid subdomain. + ErrSubdomain = validation.NewError("validation_is_sub_domain", "must be a valid subdomain") + // ErrDomain is the error that returns in case of an invalid domain. + ErrDomain = validation.NewError("validation_is_domain", "must be a valid domain") + // ErrDNSName is the error that returns in case of an invalid DNS name. + ErrDNSName = validation.NewError("validation_is_dns_name", "must be a valid DNS name") + // ErrHost is the error that returns in case of an invalid host. + ErrHost = validation.NewError("validation_is_host", "must be a valid IP address or DNS name") + // ErrPort is the error that returns in case of an invalid port. + ErrPort = validation.NewError("validation_is_port", "must be a valid port number") + // ErrMongoID is the error that returns in case of an invalid MongoID. + ErrMongoID = validation.NewError("validation_is_mongo_id", "must be a valid hex-encoded MongoDB ObjectId") + // ErrLatitude is the error that returns in case of an invalid latitude. + ErrLatitude = validation.NewError("validation_is_latitude", "must be a valid latitude") + // ErrLongitude is the error that returns in case of an invalid longitude. + ErrLongitude = validation.NewError("validation_is_longitude", "must be a valid longitude") + // ErrSSN is the error that returns in case of an invalid SSN. + ErrSSN = validation.NewError("validation_is_ssn", "must be a valid social security number") + // ErrSemver is the error that returns in case of an invalid semver. + ErrSemver = validation.NewError("validation_is_semver", "must be a valid semantic version") +) + +var ( + // Email validates if a string is an email or not. It also checks if the MX record exists for the email domain. + Email = validation.NewStringRuleWithError(govalidator.IsExistingEmail, ErrEmail) + // EmailFormat validates if a string is an email or not. Note that it does NOT check if the MX record exists or not. + EmailFormat = validation.NewStringRuleWithError(govalidator.IsEmail, ErrEmail) + // URL validates if a string is a valid URL + URL = validation.NewStringRuleWithError(govalidator.IsURL, ErrURL) + // RequestURL validates if a string is a valid request URL + RequestURL = validation.NewStringRuleWithError(govalidator.IsRequestURL, ErrRequestURL) + // RequestURI validates if a string is a valid request URI + RequestURI = validation.NewStringRuleWithError(govalidator.IsRequestURI, ErrRequestURI) + // Alpha validates if a string contains English letters only (a-zA-Z) + Alpha = validation.NewStringRuleWithError(govalidator.IsAlpha, ErrAlpha) + // Digit validates if a string contains digits only (0-9) + Digit = validation.NewStringRuleWithError(isDigit, ErrDigit) + // Alphanumeric validates if a string contains English letters and digits only (a-zA-Z0-9) + Alphanumeric = validation.NewStringRuleWithError(govalidator.IsAlphanumeric, ErrAlphanumeric) + // UTFLetter validates if a string contains unicode letters only + UTFLetter = validation.NewStringRuleWithError(govalidator.IsUTFLetter, ErrUTFLetter) + // UTFDigit validates if a string contains unicode decimal digits only + UTFDigit = validation.NewStringRuleWithError(govalidator.IsUTFDigit, ErrUTFDigit) + // UTFLetterNumeric validates if a string contains unicode letters and numbers only + UTFLetterNumeric = validation.NewStringRuleWithError(govalidator.IsUTFLetterNumeric, ErrUTFLetterNumeric) + // UTFNumeric validates if a string contains unicode number characters (category N) only + UTFNumeric = validation.NewStringRuleWithError(isUTFNumeric, ErrUTFNumeric) + // LowerCase validates if a string contains lower case unicode letters only + LowerCase = validation.NewStringRuleWithError(govalidator.IsLowerCase, ErrLowerCase) + // UpperCase validates if a string contains upper case unicode letters only + UpperCase = validation.NewStringRuleWithError(govalidator.IsUpperCase, ErrUpperCase) + // Hexadecimal validates if a string is a valid hexadecimal number + Hexadecimal = validation.NewStringRuleWithError(govalidator.IsHexadecimal, ErrHexadecimal) + // HexColor validates if a string is a valid hexadecimal color code + HexColor = validation.NewStringRuleWithError(govalidator.IsHexcolor, ErrHexColor) + // RGBColor validates if a string is a valid RGB color in the form of rgb(R, G, B) + RGBColor = validation.NewStringRuleWithError(govalidator.IsRGBcolor, ErrRGBColor) + // Int validates if a string is a valid integer number + Int = validation.NewStringRuleWithError(govalidator.IsInt, ErrInt) + // Float validates if a string is a floating point number + Float = validation.NewStringRuleWithError(govalidator.IsFloat, ErrFloat) + // UUIDv3 validates if a string is a valid version 3 UUID + UUIDv3 = validation.NewStringRuleWithError(govalidator.IsUUIDv3, ErrUUIDv3) + // UUIDv4 validates if a string is a valid version 4 UUID + UUIDv4 = validation.NewStringRuleWithError(govalidator.IsUUIDv4, ErrUUIDv4) + // UUIDv5 validates if a string is a valid version 5 UUID + UUIDv5 = validation.NewStringRuleWithError(govalidator.IsUUIDv5, ErrUUIDv5) + // UUID validates if a string is a valid UUID + UUID = validation.NewStringRuleWithError(govalidator.IsUUID, ErrUUID) + // CreditCard validates if a string is a valid credit card number + CreditCard = validation.NewStringRuleWithError(govalidator.IsCreditCard, ErrCreditCard) + // ISBN10 validates if a string is an ISBN version 10 + ISBN10 = validation.NewStringRuleWithError(govalidator.IsISBN10, ErrISBN10) + // ISBN13 validates if a string is an ISBN version 13 + ISBN13 = validation.NewStringRuleWithError(govalidator.IsISBN13, ErrISBN13) + // ISBN validates if a string is an ISBN (either version 10 or 13) + ISBN = validation.NewStringRuleWithError(isISBN, ErrISBN) + // JSON validates if a string is in valid JSON format + JSON = validation.NewStringRuleWithError(govalidator.IsJSON, ErrJSON) + // ASCII validates if a string contains ASCII characters only + ASCII = validation.NewStringRuleWithError(govalidator.IsASCII, ErrASCII) + // PrintableASCII validates if a string contains printable ASCII characters only + PrintableASCII = validation.NewStringRuleWithError(govalidator.IsPrintableASCII, ErrPrintableASCII) + // Multibyte validates if a string contains multibyte characters + Multibyte = validation.NewStringRuleWithError(govalidator.IsMultibyte, ErrMultibyte) + // FullWidth validates if a string contains full-width characters + FullWidth = validation.NewStringRuleWithError(govalidator.IsFullWidth, ErrFullWidth) + // HalfWidth validates if a string contains half-width characters + HalfWidth = validation.NewStringRuleWithError(govalidator.IsHalfWidth, ErrHalfWidth) + // VariableWidth validates if a string contains both full-width and half-width characters + VariableWidth = validation.NewStringRuleWithError(govalidator.IsVariableWidth, ErrVariableWidth) + // Base64 validates if a string is encoded in Base64 + Base64 = validation.NewStringRuleWithError(govalidator.IsBase64, ErrBase64) + // DataURI validates if a string is a valid base64-encoded data URI + DataURI = validation.NewStringRuleWithError(govalidator.IsDataURI, ErrDataURI) + // E164 validates if a string is a valid ISO3166 Alpha 2 country code + E164 = validation.NewStringRuleWithError(isE164Number, ErrE164) + // CountryCode2 validates if a string is a valid ISO3166 Alpha 2 country code + CountryCode2 = validation.NewStringRuleWithError(govalidator.IsISO3166Alpha2, ErrCountryCode2) + // CountryCode3 validates if a string is a valid ISO3166 Alpha 3 country code + CountryCode3 = validation.NewStringRuleWithError(govalidator.IsISO3166Alpha3, ErrCountryCode3) + // CurrencyCode validates if a string is a valid IsISO4217 currency code. + CurrencyCode = validation.NewStringRuleWithError(govalidator.IsISO4217, ErrCurrencyCode) + // DialString validates if a string is a valid dial string that can be passed to Dial() + DialString = validation.NewStringRuleWithError(govalidator.IsDialString, ErrDialString) + // MAC validates if a string is a MAC address + MAC = validation.NewStringRuleWithError(govalidator.IsMAC, ErrMac) + // IP validates if a string is a valid IP address (either version 4 or 6) + IP = validation.NewStringRuleWithError(govalidator.IsIP, ErrIP) + // IPv4 validates if a string is a valid version 4 IP address + IPv4 = validation.NewStringRuleWithError(govalidator.IsIPv4, ErrIPv4) + // IPv6 validates if a string is a valid version 6 IP address + IPv6 = validation.NewStringRuleWithError(govalidator.IsIPv6, ErrIPv6) + // Subdomain validates if a string is valid subdomain + Subdomain = validation.NewStringRuleWithError(isSubdomain, ErrSubdomain) + // Domain validates if a string is valid domain + Domain = validation.NewStringRuleWithError(isDomain, ErrDomain) + // DNSName validates if a string is valid DNS name + DNSName = validation.NewStringRuleWithError(govalidator.IsDNSName, ErrDNSName) + // Host validates if a string is a valid IP (both v4 and v6) or a valid DNS name + Host = validation.NewStringRuleWithError(govalidator.IsHost, ErrHost) + // Port validates if a string is a valid port number + Port = validation.NewStringRuleWithError(govalidator.IsPort, ErrPort) + // MongoID validates if a string is a valid Mongo ID + MongoID = validation.NewStringRuleWithError(govalidator.IsMongoID, ErrMongoID) + // Latitude validates if a string is a valid latitude + Latitude = validation.NewStringRuleWithError(govalidator.IsLatitude, ErrLatitude) + // Longitude validates if a string is a valid longitude + Longitude = validation.NewStringRuleWithError(govalidator.IsLongitude, ErrLongitude) + // SSN validates if a string is a social security number (SSN) + SSN = validation.NewStringRuleWithError(govalidator.IsSSN, ErrSSN) + // Semver validates if a string is a valid semantic version + Semver = validation.NewStringRuleWithError(govalidator.IsSemver, ErrSemver) +) + +var ( + reDigit = regexp.MustCompile("^[0-9]+$") + // Subdomain regex source: https://stackoverflow.com/a/7933253 + reSubdomain = regexp.MustCompile(`^[A-Za-z0-9](?:[A-Za-z0-9\-]{0,61}[A-Za-z0-9])?$`) + // E164 regex source: https://stackoverflow.com/a/23299989 + reE164 = regexp.MustCompile(`^\+?[1-9]\d{1,14}$`) + // Domain regex source: https://stackoverflow.com/a/7933253 + // Slightly modified: Removed 255 max length validation since Go regex does not + // support lookarounds. More info: https://stackoverflow.com/a/38935027 + reDomain = regexp.MustCompile(`^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-z0-9])?\.)+(?:[a-zA-Z]{1,63}| xn--[a-z0-9]{1,59})$`) +) + +func isISBN(value string) bool { + return govalidator.IsISBN(value, 10) || govalidator.IsISBN(value, 13) +} + +func isDigit(value string) bool { + return reDigit.MatchString(value) +} + +func isE164Number(value string) bool { + return reE164.MatchString(value) +} + +func isSubdomain(value string) bool { + return reSubdomain.MatchString(value) +} + +func isDomain(value string) bool { + if len(value) > 255 { + return false + } + + return reDomain.MatchString(value) +} + +func isUTFNumeric(value string) bool { + for _, c := range value { + if unicode.IsNumber(c) == false { + return false + } + } + return true +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/length.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/length.go new file mode 100644 index 00000000000..b26e1fa2851 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/length.go @@ -0,0 +1,104 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "unicode/utf8" +) + +var ( + // ErrLengthTooLong is the error that returns in case of too long length. + ErrLengthTooLong = NewError("validation_length_too_long", "the length must be no more than {{.max}}") + // ErrLengthTooShort is the error that returns in case of too short length. + ErrLengthTooShort = NewError("validation_length_too_short", "the length must be no less than {{.min}}") + // ErrLengthInvalid is the error that returns in case of an invalid length. + ErrLengthInvalid = NewError("validation_length_invalid", "the length must be exactly {{.min}}") + // ErrLengthOutOfRange is the error that returns in case of out of range length. + ErrLengthOutOfRange = NewError("validation_length_out_of_range", "the length must be between {{.min}} and {{.max}}") + // ErrLengthEmptyRequired is the error that returns in case of non-empty value. + ErrLengthEmptyRequired = NewError("validation_length_empty_required", "the value must be empty") +) + +// Length returns a validation rule that checks if a value's length is within the specified range. +// If max is 0, it means there is no upper bound for the length. +// This rule should only be used for validating strings, slices, maps, and arrays. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func Length(min, max int) LengthRule { + return LengthRule{min: min, max: max, err: buildLengthRuleError(min, max)} +} + +// RuneLength returns a validation rule that checks if a string's rune length is within the specified range. +// If max is 0, it means there is no upper bound for the length. +// This rule should only be used for validating strings, slices, maps, and arrays. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +// If the value being validated is not a string, the rule works the same as Length. +func RuneLength(min, max int) LengthRule { + r := Length(min, max) + r.rune = true + + return r +} + +// LengthRule is a validation rule that checks if a value's length is within the specified range. +type LengthRule struct { + err Error + + min, max int + rune bool +} + +// Validate checks if the given value is valid or not. +func (r LengthRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + var ( + l int + err error + ) + if s, ok := value.(string); ok && r.rune { + l = utf8.RuneCountInString(s) + } else if l, err = LengthOfValue(value); err != nil { + return err + } + + if r.min > 0 && l < r.min || r.max > 0 && l > r.max || r.min == 0 && r.max == 0 && l > 0 { + return r.err + } + + return nil +} + +// Error sets the error message for the rule. +func (r LengthRule) Error(message string) LengthRule { + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r LengthRule) ErrorObject(err Error) LengthRule { + r.err = err + return r +} + +func buildLengthRuleError(min, max int) (err Error) { + if min == 0 && max > 0 { + err = ErrLengthTooLong + } else if min > 0 && max == 0 { + err = ErrLengthTooShort + } else if min > 0 && max > 0 { + if min == max { + err = ErrLengthInvalid + } else { + err = ErrLengthOutOfRange + } + } else { + err = ErrLengthEmptyRequired + } + + return err.SetParams(map[string]interface{}{"min": min, "max": max}) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/map.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/map.go new file mode 100644 index 00000000000..106b6d80a6e --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/map.go @@ -0,0 +1,144 @@ +package validation + +import ( + "context" + "errors" + "fmt" + "reflect" +) + +var ( + // ErrNotMap is the error that the value being validated is not a map. + ErrNotMap = errors.New("only a map can be validated") + + // ErrKeyWrongType is the error returned in case of an incorrect key type. + ErrKeyWrongType = NewError("validation_key_wrong_type", "key not the correct type") + + // ErrKeyMissing is the error returned in case of a missing key. + ErrKeyMissing = NewError("validation_key_missing", "required key is missing") + + // ErrKeyUnexpected is the error returned in case of an unexpected key. + ErrKeyUnexpected = NewError("validation_key_unexpected", "key not expected") +) + +type ( + // MapRule represents a rule set associated with a map. + MapRule struct { + keys []*KeyRules + allowExtraKeys bool + } + + // KeyRules represents a rule set associated with a map key. + KeyRules struct { + key interface{} + optional bool + rules []Rule + } +) + +// Map returns a validation rule that checks the keys and values of a map. +// This rule should only be used for validating maps, or a validation error will be reported. +// Use Key() to specify map keys that need to be validated. Each Key() call specifies a single key which can +// be associated with multiple rules. +// For example, +// validation.Map( +// validation.Key("Name", validation.Required), +// validation.Key("Value", validation.Required, validation.Length(5, 10)), +// ) +// +// A nil value is considered valid. Use the Required rule to make sure a map value is present. +func Map(keys ...*KeyRules) MapRule { + return MapRule{keys: keys} +} + +// AllowExtraKeys configures the rule to ignore extra keys. +func (r MapRule) AllowExtraKeys() MapRule { + r.allowExtraKeys = true + return r +} + +// Validate checks if the given value is valid or not. +func (r MapRule) Validate(m interface{}) error { + return r.ValidateWithContext(nil, m) +} + +// ValidateWithContext checks if the given value is valid or not. +func (r MapRule) ValidateWithContext(ctx context.Context, m interface{}) error { + value := reflect.ValueOf(m) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + if value.Kind() != reflect.Map { + // must be a map + return NewInternalError(ErrNotMap) + } + if value.IsNil() { + // treat a nil map as valid + return nil + } + + errs := Errors{} + kt := value.Type().Key() + + var extraKeys map[interface{}]bool + if !r.allowExtraKeys { + extraKeys = make(map[interface{}]bool, value.Len()) + for _, k := range value.MapKeys() { + extraKeys[k.Interface()] = true + } + } + + for _, kr := range r.keys { + var err error + if kv := reflect.ValueOf(kr.key); !kt.AssignableTo(kv.Type()) { + err = ErrKeyWrongType + } else if vv := value.MapIndex(kv); !vv.IsValid() { + if !kr.optional { + err = ErrKeyMissing + } + } else if ctx == nil { + err = Validate(vv.Interface(), kr.rules...) + } else { + err = ValidateWithContext(ctx, vv.Interface(), kr.rules...) + } + if err != nil { + if ie, ok := err.(InternalError); ok && ie.InternalError() != nil { + return err + } + errs[getErrorKeyName(kr.key)] = err + } + if !r.allowExtraKeys { + delete(extraKeys, kr.key) + } + } + + if !r.allowExtraKeys { + for key := range extraKeys { + errs[getErrorKeyName(key)] = ErrKeyUnexpected + } + } + + if len(errs) > 0 { + return errs + } + return nil +} + +// Key specifies a map key and the corresponding validation rules. +func Key(key interface{}, rules ...Rule) *KeyRules { + return &KeyRules{ + key: key, + rules: rules, + } +} + +// Optional configures the rule to ignore the key if missing. +func (r *KeyRules) Optional() *KeyRules { + r.optional = true + return r +} + +// getErrorKeyName returns the name that should be used to represent the validation error of a map key. +func getErrorKeyName(key interface{}) string { + return fmt.Sprintf("%v", key) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/match.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/match.go new file mode 100644 index 00000000000..e399bcc0c7a --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/match.go @@ -0,0 +1,56 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "regexp" +) + +// ErrMatchInvalid is the error that returns in case of invalid format. +var ErrMatchInvalid = NewError("validation_match_invalid", "must be in a valid format") + +// Match returns a validation rule that checks if a value matches the specified regular expression. +// This rule should only be used for validating strings and byte slices, or a validation error will be reported. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func Match(re *regexp.Regexp) MatchRule { + return MatchRule{ + re: re, + err: ErrMatchInvalid, + } +} + +// MatchRule is a validation rule that checks if a value matches the specified regular expression. +type MatchRule struct { + re *regexp.Regexp + err Error +} + +// Validate checks if the given value is valid or not. +func (r MatchRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil { + return nil + } + + isString, str, isBytes, bs := StringOrBytes(value) + if isString && (str == "" || r.re.MatchString(str)) { + return nil + } else if isBytes && (len(bs) == 0 || r.re.Match(bs)) { + return nil + } + return r.err +} + +// Error sets the error message for the rule. +func (r MatchRule) Error(message string) MatchRule { + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r MatchRule) ErrorObject(err Error) MatchRule { + r.err = err + return r +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/minmax.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/minmax.go new file mode 100644 index 00000000000..f2a119f9bb1 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/minmax.go @@ -0,0 +1,195 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "fmt" + "reflect" + "time" +) + +var ( + // ErrMinGreaterEqualThanRequired is the error that returns when a value is less than a specified threshold. + ErrMinGreaterEqualThanRequired = NewError("validation_min_greater_equal_than_required", "must be no less than {{.threshold}}") + // ErrMaxLessEqualThanRequired is the error that returns when a value is greater than a specified threshold. + ErrMaxLessEqualThanRequired = NewError("validation_max_less_equal_than_required", "must be no greater than {{.threshold}}") + // ErrMinGreaterThanRequired is the error that returns when a value is less than or equal to a specified threshold. + ErrMinGreaterThanRequired = NewError("validation_min_greater_than_required", "must be greater than {{.threshold}}") + // ErrMaxLessThanRequired is the error that returns when a value is greater than or equal to a specified threshold. + ErrMaxLessThanRequired = NewError("validation_max_less_than_required", "must be less than {{.threshold}}") +) + +// ThresholdRule is a validation rule that checks if a value satisfies the specified threshold requirement. +type ThresholdRule struct { + threshold interface{} + operator int + err Error +} + +const ( + greaterThan = iota + greaterEqualThan + lessThan + lessEqualThan +) + +// Min returns a validation rule that checks if a value is greater or equal than the specified value. +// By calling Exclusive, the rule will check if the value is strictly greater than the specified value. +// Note that the value being checked and the threshold value must be of the same type. +// Only int, uint, float and time.Time types are supported. +// An empty value is considered valid. Please use the Required rule to make sure a value is not empty. +func Min(min interface{}) ThresholdRule { + return ThresholdRule{ + threshold: min, + operator: greaterEqualThan, + err: ErrMinGreaterEqualThanRequired, + } + +} + +// Max returns a validation rule that checks if a value is less or equal than the specified value. +// By calling Exclusive, the rule will check if the value is strictly less than the specified value. +// Note that the value being checked and the threshold value must be of the same type. +// Only int, uint, float and time.Time types are supported. +// An empty value is considered valid. Please use the Required rule to make sure a value is not empty. +func Max(max interface{}) ThresholdRule { + return ThresholdRule{ + threshold: max, + operator: lessEqualThan, + err: ErrMaxLessEqualThanRequired, + } +} + +// Exclusive sets the comparison to exclude the boundary value. +func (r ThresholdRule) Exclusive() ThresholdRule { + if r.operator == greaterEqualThan { + r.operator = greaterThan + r.err = ErrMinGreaterThanRequired + } else if r.operator == lessEqualThan { + r.operator = lessThan + r.err = ErrMaxLessThanRequired + } + return r +} + +// Validate checks if the given value is valid or not. +func (r ThresholdRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + rv := reflect.ValueOf(r.threshold) + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, err := ToInt(value) + if err != nil { + return err + } + if r.compareInt(rv.Int(), v) { + return nil + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + v, err := ToUint(value) + if err != nil { + return err + } + if r.compareUint(rv.Uint(), v) { + return nil + } + + case reflect.Float32, reflect.Float64: + v, err := ToFloat(value) + if err != nil { + return err + } + if r.compareFloat(rv.Float(), v) { + return nil + } + + case reflect.Struct: + t, ok := r.threshold.(time.Time) + if !ok { + return fmt.Errorf("type not supported: %v", rv.Type()) + } + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("cannot convert %v to time.Time", reflect.TypeOf(value)) + } + if v.IsZero() || r.compareTime(t, v) { + return nil + } + + default: + return fmt.Errorf("type not supported: %v", rv.Type()) + } + + return r.err.SetParams(map[string]interface{}{"threshold": r.threshold}) +} + +// Error sets the error message for the rule. +func (r ThresholdRule) Error(message string) ThresholdRule { + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r ThresholdRule) ErrorObject(err Error) ThresholdRule { + r.err = err + return r +} + +func (r ThresholdRule) compareInt(threshold, value int64) bool { + switch r.operator { + case greaterThan: + return value > threshold + case greaterEqualThan: + return value >= threshold + case lessThan: + return value < threshold + default: + return value <= threshold + } +} + +func (r ThresholdRule) compareUint(threshold, value uint64) bool { + switch r.operator { + case greaterThan: + return value > threshold + case greaterEqualThan: + return value >= threshold + case lessThan: + return value < threshold + default: + return value <= threshold + } +} + +func (r ThresholdRule) compareFloat(threshold, value float64) bool { + switch r.operator { + case greaterThan: + return value > threshold + case greaterEqualThan: + return value >= threshold + case lessThan: + return value < threshold + default: + return value <= threshold + } +} + +func (r ThresholdRule) compareTime(threshold, value time.Time) bool { + switch r.operator { + case greaterThan: + return value.After(threshold) + case greaterEqualThan: + return value.After(threshold) || value.Equal(threshold) + case lessThan: + return value.Before(threshold) + default: + return value.Before(threshold) || value.Equal(threshold) + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/multipleof.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/multipleof.go new file mode 100644 index 00000000000..864b8b75308 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/multipleof.go @@ -0,0 +1,65 @@ +package validation + +import ( + "fmt" + "reflect" +) + +// ErrMultipleOfInvalid is the error that returns when a value is not multiple of a base. +var ErrMultipleOfInvalid = NewError("validation_multiple_of_invalid", "must be multiple of {{.base}}") + +// MultipleOf returns a validation rule that checks if a value is a multiple of the "base" value. +// Note that "base" should be of integer type. +func MultipleOf(base interface{}) MultipleOfRule { + return MultipleOfRule{ + base: base, + err: ErrMultipleOfInvalid, + } +} + +// MultipleOfRule is a validation rule that checks if a value is a multiple of the "base" value. +type MultipleOfRule struct { + base interface{} + err Error +} + +// Error sets the error message for the rule. +func (r MultipleOfRule) Error(message string) MultipleOfRule { + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r MultipleOfRule) ErrorObject(err Error) MultipleOfRule { + r.err = err + return r +} + +// Validate checks if the value is a multiple of the "base" value. +func (r MultipleOfRule) Validate(value interface{}) error { + rv := reflect.ValueOf(r.base) + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, err := ToInt(value) + if err != nil { + return err + } + if v%rv.Int() == 0 { + return nil + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + v, err := ToUint(value) + if err != nil { + return err + } + + if v%rv.Uint() == 0 { + return nil + } + default: + return fmt.Errorf("type not supported: %v", rv.Type()) + } + + return r.err.SetParams(map[string]interface{}{"base": r.base}) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/not_in.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/not_in.go new file mode 100644 index 00000000000..6e35d754284 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/not_in.go @@ -0,0 +1,51 @@ +// Copyright 2018 Qiang Xue, Google LLC. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +// ErrNotInInvalid is the error that returns when a value is in a list. +var ErrNotInInvalid = NewError("validation_not_in_invalid", "must not be in list") + +// NotIn returns a validation rule that checks if a value is absent from the given list of values. +// Note that the value being checked and the possible range of values must be of the same type. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func NotIn(values ...interface{}) NotInRule { + return NotInRule{ + elements: values, + err: ErrNotInInvalid, + } +} + +// NotInRule is a validation rule that checks if a value is absent from the given list of values. +type NotInRule struct { + elements []interface{} + err Error +} + +// Validate checks if the given value is valid or not. +func (r NotInRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + for _, e := range r.elements { + if e == value { + return r.err + } + } + return nil +} + +// Error sets the error message for the rule. +func (r NotInRule) Error(message string) NotInRule { + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r NotInRule) ErrorObject(err Error) NotInRule { + r.err = err + return r +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/not_nil.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/not_nil.go new file mode 100644 index 00000000000..aaeb067e324 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/not_nil.go @@ -0,0 +1,44 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +// ErrNotNilRequired is the error that returns when a value is Nil. +var ErrNotNilRequired = NewError("validation_not_nil_required", "is required") + +// NotNil is a validation rule that checks if a value is not nil. +// NotNil only handles types including interface, pointer, slice, and map. +// All other types are considered valid. +var NotNil = notNilRule{} + +type notNilRule struct { + err Error +} + +// Validate checks if the given value is valid or not. +func (r notNilRule) Validate(value interface{}) error { + _, isNil := Indirect(value) + if isNil { + if r.err != nil { + return r.err + } + return ErrNotNilRequired + } + return nil +} + +// Error sets the error message for the rule. +func (r notNilRule) Error(message string) notNilRule { + if r.err == nil { + r.err = ErrNotNilRequired + } + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r notNilRule) ErrorObject(err Error) notNilRule { + r.err = err + return r +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/required.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/required.go new file mode 100644 index 00000000000..61b888bf470 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/required.go @@ -0,0 +1,74 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +var ( + // ErrRequired is the error that returns when a value is required. + ErrRequired = NewError("validation_required", "cannot be blank") + // ErrNilOrNotEmpty is the error that returns when a value is not nil and is empty. + ErrNilOrNotEmpty = NewError("validation_nil_or_not_empty_required", "cannot be blank") +) + +// Required is a validation rule that checks if a value is not empty. +// A value is considered not empty if +// - integer, float: not zero +// - bool: true +// - string, array, slice, map: len() > 0 +// - interface, pointer: not nil and the referenced value is not empty +// - any other types +var Required = RequiredRule{skipNil: false, condition: true} + +// NilOrNotEmpty checks if a value is a nil pointer or a value that is not empty. +// NilOrNotEmpty differs from Required in that it treats a nil pointer as valid. +var NilOrNotEmpty = RequiredRule{skipNil: true, condition: true} + +// RequiredRule is a rule that checks if a value is not empty. +type RequiredRule struct { + condition bool + skipNil bool + err Error +} + +// Validate checks if the given value is valid or not. +func (r RequiredRule) Validate(value interface{}) error { + if r.condition { + value, isNil := Indirect(value) + if r.skipNil && !isNil && IsEmpty(value) || !r.skipNil && (isNil || IsEmpty(value)) { + if r.err != nil { + return r.err + } + if r.skipNil { + return ErrNilOrNotEmpty + } + return ErrRequired + } + } + return nil +} + +// When sets the condition that determines if the validation should be performed. +func (r RequiredRule) When(condition bool) RequiredRule { + r.condition = condition + return r +} + +// Error sets the error message for the rule. +func (r RequiredRule) Error(message string) RequiredRule { + if r.err == nil { + if r.skipNil { + r.err = ErrNilOrNotEmpty + } else { + r.err = ErrRequired + } + } + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r RequiredRule) ErrorObject(err Error) RequiredRule { + r.err = err + return r +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/string.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/string.go new file mode 100644 index 00000000000..d6d45d89d08 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/string.go @@ -0,0 +1,64 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +type stringValidator func(string) bool + +// StringRule is a rule that checks a string variable using a specified stringValidator. +type StringRule struct { + validate stringValidator + err Error +} + +// NewStringRule creates a new validation rule using a function that takes a string value and returns a bool. +// The rule returned will use the function to check if a given string or byte slice is valid or not. +// An empty value is considered to be valid. Please use the Required rule to make sure a value is not empty. +func NewStringRule(validator stringValidator, message string) StringRule { + return StringRule{ + validate: validator, + err: NewError("", message), + } +} + +// NewStringRuleWithError creates a new validation rule using a function that takes a string value and returns a bool. +// The rule returned will use the function to check if a given string or byte slice is valid or not. +// An empty value is considered to be valid. Please use the Required rule to make sure a value is not empty. +func NewStringRuleWithError(validator stringValidator, err Error) StringRule { + return StringRule{ + validate: validator, + err: err, + } +} + +// Error sets the error message for the rule. +func (r StringRule) Error(message string) StringRule { + r.err = r.err.SetMessage(message) + return r +} + +// ErrorObject sets the error struct for the rule. +func (r StringRule) ErrorObject(err Error) StringRule { + r.err = err + return r +} + +// Validate checks if the given value is valid or not. +func (r StringRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + str, err := EnsureString(value) + if err != nil { + return err + } + + if r.validate(str) { + return nil + } + + return r.err +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/struct.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/struct.go new file mode 100644 index 00000000000..d63619dae60 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/struct.go @@ -0,0 +1,169 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" +) + +var ( + // ErrStructPointer is the error that a struct being validated is not specified as a pointer. + ErrStructPointer = errors.New("only a pointer to a struct can be validated") +) + +type ( + // ErrFieldPointer is the error that a field is not specified as a pointer. + ErrFieldPointer int + + // ErrFieldNotFound is the error that a field cannot be found in the struct. + ErrFieldNotFound int + + // FieldRules represents a rule set associated with a struct field. + FieldRules struct { + fieldPtr interface{} + rules []Rule + } +) + +// Error returns the error string of ErrFieldPointer. +func (e ErrFieldPointer) Error() string { + return fmt.Sprintf("field #%v must be specified as a pointer", int(e)) +} + +// Error returns the error string of ErrFieldNotFound. +func (e ErrFieldNotFound) Error() string { + return fmt.Sprintf("field #%v cannot be found in the struct", int(e)) +} + +// ValidateStruct validates a struct by checking the specified struct fields against the corresponding validation rules. +// Note that the struct being validated must be specified as a pointer to it. If the pointer is nil, it is considered valid. +// Use Field() to specify struct fields that need to be validated. Each Field() call specifies a single field which +// should be specified as a pointer to the field. A field can be associated with multiple rules. +// For example, +// +// value := struct { +// Name string +// Value string +// }{"name", "demo"} +// err := validation.ValidateStruct(&value, +// validation.Field(&a.Name, validation.Required), +// validation.Field(&a.Value, validation.Required, validation.Length(5, 10)), +// ) +// fmt.Println(err) +// // Value: the length must be between 5 and 10. +// +// An error will be returned if validation fails. +func ValidateStruct(structPtr interface{}, fields ...*FieldRules) error { + return ValidateStructWithContext(nil, structPtr, fields...) +} + +// ValidateStructWithContext validates a struct with the given context. +// The only difference between ValidateStructWithContext and ValidateStruct is that the former will +// validate struct fields with the provided context. +// Please refer to ValidateStruct for the detailed instructions on how to use this function. +func ValidateStructWithContext(ctx context.Context, structPtr interface{}, fields ...*FieldRules) error { + value := reflect.ValueOf(structPtr) + if value.Kind() != reflect.Ptr || !value.IsNil() && value.Elem().Kind() != reflect.Struct { + // must be a pointer to a struct + return NewInternalError(ErrStructPointer) + } + if value.IsNil() { + // treat a nil struct pointer as valid + return nil + } + value = value.Elem() + + errs := Errors{} + + for i, fr := range fields { + fv := reflect.ValueOf(fr.fieldPtr) + if fv.Kind() != reflect.Ptr { + return NewInternalError(ErrFieldPointer(i)) + } + ft := findStructField(value, fv) + if ft == nil { + return NewInternalError(ErrFieldNotFound(i)) + } + var err error + if ctx == nil { + err = Validate(fv.Elem().Interface(), fr.rules...) + } else { + err = ValidateWithContext(ctx, fv.Elem().Interface(), fr.rules...) + } + if err != nil { + if ie, ok := err.(InternalError); ok && ie.InternalError() != nil { + return err + } + if ft.Anonymous { + // merge errors from anonymous struct field + if es, ok := err.(Errors); ok { + for name, value := range es { + errs[name] = value + } + continue + } + } + errs[getErrorFieldName(ft)] = err + } + } + + if len(errs) > 0 { + return errs + } + return nil +} + +// Field specifies a struct field and the corresponding validation rules. +// The struct field must be specified as a pointer to it. +func Field(fieldPtr interface{}, rules ...Rule) *FieldRules { + return &FieldRules{ + fieldPtr: fieldPtr, + rules: rules, + } +} + +// findStructField looks for a field in the given struct. +// The field being looked for should be a pointer to the actual struct field. +// If found, the field info will be returned. Otherwise, nil will be returned. +func findStructField(structValue reflect.Value, fieldValue reflect.Value) *reflect.StructField { + ptr := fieldValue.Pointer() + for i := structValue.NumField() - 1; i >= 0; i-- { + sf := structValue.Type().Field(i) + if ptr == structValue.Field(i).UnsafeAddr() { + // do additional type comparison because it's possible that the address of + // an embedded struct is the same as the first field of the embedded struct + if sf.Type == fieldValue.Elem().Type() { + return &sf + } + } + if sf.Anonymous { + // delve into anonymous struct to look for the field + fi := structValue.Field(i) + if sf.Type.Kind() == reflect.Ptr { + fi = fi.Elem() + } + if fi.Kind() == reflect.Struct { + if f := findStructField(fi, fieldValue); f != nil { + return f + } + } + } + } + return nil +} + +// getErrorFieldName returns the name that should be used to represent the validation error of a struct field. +func getErrorFieldName(f *reflect.StructField) string { + if tag := f.Tag.Get(ErrorTag); tag != "" && tag != "-" { + if cps := strings.SplitN(tag, ",", 2); cps[0] != "" { + return cps[0] + } + } + return f.Name +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/util.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/util.go new file mode 100644 index 00000000000..b15fd9a2979 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/util.go @@ -0,0 +1,163 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "time" +) + +var ( + bytesType = reflect.TypeOf([]byte(nil)) + valuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +) + +// EnsureString ensures the given value is a string. +// If the value is a byte slice, it will be typecast into a string. +// An error is returned otherwise. +func EnsureString(value interface{}) (string, error) { + v := reflect.ValueOf(value) + if v.Kind() == reflect.String { + return v.String(), nil + } + if v.Type() == bytesType { + return string(v.Interface().([]byte)), nil + } + return "", errors.New("must be either a string or byte slice") +} + +// StringOrBytes typecasts a value into a string or byte slice. +// Boolean flags are returned to indicate if the typecasting succeeds or not. +func StringOrBytes(value interface{}) (isString bool, str string, isBytes bool, bs []byte) { + v := reflect.ValueOf(value) + if v.Kind() == reflect.String { + str = v.String() + isString = true + } else if v.Kind() == reflect.Slice && v.Type() == bytesType { + bs = v.Interface().([]byte) + isBytes = true + } + return +} + +// LengthOfValue returns the length of a value that is a string, slice, map, or array. +// An error is returned for all other types. +func LengthOfValue(value interface{}) (int, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.String, reflect.Slice, reflect.Map, reflect.Array: + return v.Len(), nil + } + return 0, fmt.Errorf("cannot get the length of %v", v.Kind()) +} + +// ToInt converts the given value to an int64. +// An error is returned for all incompatible types. +func ToInt(value interface{}) (int64, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int(), nil + } + return 0, fmt.Errorf("cannot convert %v to int64", v.Kind()) +} + +// ToUint converts the given value to an uint64. +// An error is returned for all incompatible types. +func ToUint(value interface{}) (uint64, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint(), nil + } + return 0, fmt.Errorf("cannot convert %v to uint64", v.Kind()) +} + +// ToFloat converts the given value to a float64. +// An error is returned for all incompatible types. +func ToFloat(value interface{}) (float64, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Float32, reflect.Float64: + return v.Float(), nil + } + return 0, fmt.Errorf("cannot convert %v to float64", v.Kind()) +} + +// IsEmpty checks if a value is empty or not. +// A value is considered empty if +// - integer, float: zero +// - bool: false +// - string, array: len() == 0 +// - slice, map: nil or len() == 0 +// - interface, pointer: nil or the referenced value is empty +func IsEmpty(value interface{}) bool { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.String, reflect.Array, reflect.Map, reflect.Slice: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Invalid: + return true + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return IsEmpty(v.Elem().Interface()) + case reflect.Struct: + v, ok := value.(time.Time) + if ok && v.IsZero() { + return true + } + } + + return false +} + +// Indirect returns the value that the given interface or pointer references to. +// If the value implements driver.Valuer, it will deal with the value returned by +// the Value() method instead. A boolean value is also returned to indicate if +// the value is nil or not (only applicable to interface, pointer, map, and slice). +// If the value is neither an interface nor a pointer, it will be returned back. +func Indirect(value interface{}) (interface{}, bool) { + rv := reflect.ValueOf(value) + kind := rv.Kind() + switch kind { + case reflect.Invalid: + return nil, true + case reflect.Ptr, reflect.Interface: + if rv.IsNil() { + return nil, true + } + return Indirect(rv.Elem().Interface()) + case reflect.Slice, reflect.Map, reflect.Func, reflect.Chan: + if rv.IsNil() { + return nil, true + } + } + + if rv.Type().Implements(valuerType) { + return indirectValuer(value.(driver.Valuer)) + } + + return value, false +} + +func indirectValuer(valuer driver.Valuer) (interface{}, bool) { + if value, err := valuer.Value(); value != nil && err == nil { + return Indirect(value) + } + return nil, true +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/validation.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/validation.go new file mode 100644 index 00000000000..ec7a1615068 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/validation.go @@ -0,0 +1,272 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package validation provides configurable and extensible rules for validating data of various types. +package validation + +import ( + "context" + "fmt" + "reflect" + "strconv" +) + +type ( + // Validatable is the interface indicating the type implementing it supports data validation. + Validatable interface { + // Validate validates the data and returns an error if validation fails. + Validate() error + } + + // ValidatableWithContext is the interface indicating the type implementing it supports context-aware data validation. + ValidatableWithContext interface { + // ValidateWithContext validates the data with the given context and returns an error if validation fails. + ValidateWithContext(ctx context.Context) error + } + + // Rule represents a validation rule. + Rule interface { + // Validate validates a value and returns a value if validation fails. + Validate(value interface{}) error + } + + // RuleWithContext represents a context-aware validation rule. + RuleWithContext interface { + // ValidateWithContext validates a value and returns a value if validation fails. + ValidateWithContext(ctx context.Context, value interface{}) error + } + + // RuleFunc represents a validator function. + // You may wrap it as a Rule by calling By(). + RuleFunc func(value interface{}) error + + // RuleWithContextFunc represents a validator function that is context-aware. + // You may wrap it as a Rule by calling WithContext(). + RuleWithContextFunc func(ctx context.Context, value interface{}) error +) + +var ( + // ErrorTag is the struct tag name used to customize the error field name for a struct field. + ErrorTag = "json" + + // Skip is a special validation rule that indicates all rules following it should be skipped. + Skip = skipRule{skip: true} + + validatableType = reflect.TypeOf((*Validatable)(nil)).Elem() + validatableWithContextType = reflect.TypeOf((*ValidatableWithContext)(nil)).Elem() +) + +// Validate validates the given value and returns the validation error, if any. +// +// Validate performs validation using the following steps: +// 1. For each rule, call its `Validate()` to validate the value. Return if any error is found. +// 2. If the value being validated implements `Validatable`, call the value's `Validate()`. +// Return with the validation result. +// 3. If the value being validated is a map/slice/array, and the element type implements `Validatable`, +// for each element call the element value's `Validate()`. Return with the validation result. +func Validate(value interface{}, rules ...Rule) error { + for _, rule := range rules { + if s, ok := rule.(skipRule); ok && s.skip { + return nil + } + if err := rule.Validate(value); err != nil { + return err + } + } + + rv := reflect.ValueOf(value) + if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface) && rv.IsNil() { + return nil + } + + if v, ok := value.(Validatable); ok { + return v.Validate() + } + + switch rv.Kind() { + case reflect.Map: + if rv.Type().Elem().Implements(validatableType) { + return validateMap(rv) + } + case reflect.Slice, reflect.Array: + if rv.Type().Elem().Implements(validatableType) { + return validateSlice(rv) + } + case reflect.Ptr, reflect.Interface: + return Validate(rv.Elem().Interface()) + } + + return nil +} + +// ValidateWithContext validates the given value with the given context and returns the validation error, if any. +// +// ValidateWithContext performs validation using the following steps: +// 1. For each rule, call its `ValidateWithContext()` to validate the value if the rule implements `RuleWithContext`. +// Otherwise call `Validate()` of the rule. Return if any error is found. +// 2. If the value being validated implements `ValidatableWithContext`, call the value's `ValidateWithContext()` +// and return with the validation result. +// 3. If the value being validated implements `Validatable`, call the value's `Validate()` +// and return with the validation result. +// 4. If the value being validated is a map/slice/array, and the element type implements `ValidatableWithContext`, +// for each element call the element value's `ValidateWithContext()`. Return with the validation result. +// 5. If the value being validated is a map/slice/array, and the element type implements `Validatable`, +// for each element call the element value's `Validate()`. Return with the validation result. +func ValidateWithContext(ctx context.Context, value interface{}, rules ...Rule) error { + for _, rule := range rules { + if s, ok := rule.(skipRule); ok && s.skip { + return nil + } + if rc, ok := rule.(RuleWithContext); ok { + if err := rc.ValidateWithContext(ctx, value); err != nil { + return err + } + } else if err := rule.Validate(value); err != nil { + return err + } + } + + rv := reflect.ValueOf(value) + if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface) && rv.IsNil() { + return nil + } + + if v, ok := value.(ValidatableWithContext); ok { + return v.ValidateWithContext(ctx) + } + + if v, ok := value.(Validatable); ok { + return v.Validate() + } + + switch rv.Kind() { + case reflect.Map: + if rv.Type().Elem().Implements(validatableWithContextType) { + return validateMapWithContext(ctx, rv) + } + if rv.Type().Elem().Implements(validatableType) { + return validateMap(rv) + } + case reflect.Slice, reflect.Array: + if rv.Type().Elem().Implements(validatableWithContextType) { + return validateSliceWithContext(ctx, rv) + } + if rv.Type().Elem().Implements(validatableType) { + return validateSlice(rv) + } + case reflect.Ptr, reflect.Interface: + return ValidateWithContext(ctx, rv.Elem().Interface()) + } + + return nil +} + +// validateMap validates a map of validatable elements +func validateMap(rv reflect.Value) error { + errs := Errors{} + for _, key := range rv.MapKeys() { + if mv := rv.MapIndex(key).Interface(); mv != nil { + if err := mv.(Validatable).Validate(); err != nil { + errs[fmt.Sprintf("%v", key.Interface())] = err + } + } + } + if len(errs) > 0 { + return errs + } + return nil +} + +// validateMapWithContext validates a map of validatable elements with the given context. +func validateMapWithContext(ctx context.Context, rv reflect.Value) error { + errs := Errors{} + for _, key := range rv.MapKeys() { + if mv := rv.MapIndex(key).Interface(); mv != nil { + if err := mv.(ValidatableWithContext).ValidateWithContext(ctx); err != nil { + errs[fmt.Sprintf("%v", key.Interface())] = err + } + } + } + if len(errs) > 0 { + return errs + } + return nil +} + +// validateSlice validates a slice/array of validatable elements +func validateSlice(rv reflect.Value) error { + errs := Errors{} + l := rv.Len() + for i := 0; i < l; i++ { + if ev := rv.Index(i).Interface(); ev != nil { + if err := ev.(Validatable).Validate(); err != nil { + errs[strconv.Itoa(i)] = err + } + } + } + if len(errs) > 0 { + return errs + } + return nil +} + +// validateSliceWithContext validates a slice/array of validatable elements with the given context. +func validateSliceWithContext(ctx context.Context, rv reflect.Value) error { + errs := Errors{} + l := rv.Len() + for i := 0; i < l; i++ { + if ev := rv.Index(i).Interface(); ev != nil { + if err := ev.(ValidatableWithContext).ValidateWithContext(ctx); err != nil { + errs[strconv.Itoa(i)] = err + } + } + } + if len(errs) > 0 { + return errs + } + return nil +} + +type skipRule struct { + skip bool +} + +func (r skipRule) Validate(interface{}) error { + return nil +} + +// When determines if all rules following it should be skipped. +func (r skipRule) When(condition bool) skipRule { + r.skip = condition + return r +} + +type inlineRule struct { + f RuleFunc + fc RuleWithContextFunc +} + +func (r *inlineRule) Validate(value interface{}) error { + if r.f == nil { + return r.fc(context.Background(), value) + } + return r.f(value) +} + +func (r *inlineRule) ValidateWithContext(ctx context.Context, value interface{}) error { + if r.fc == nil { + return r.f(value) + } + return r.fc(ctx, value) +} + +// By wraps a RuleFunc into a Rule. +func By(f RuleFunc) Rule { + return &inlineRule{f: f} +} + +// WithContext wraps a RuleWithContextFunc into a context-aware Rule. +func WithContext(f RuleWithContextFunc) Rule { + return &inlineRule{fc: f} +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/v4/when.go b/vendor/github.com/go-ozzo/ozzo-validation/v4/when.go new file mode 100644 index 00000000000..7bcdff5aec1 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/v4/when.go @@ -0,0 +1,47 @@ +package validation + +import "context" + +// When returns a validation rule that executes the given list of rules when the condition is true. +func When(condition bool, rules ...Rule) WhenRule { + return WhenRule{ + condition: condition, + rules: rules, + elseRules: []Rule{}, + } +} + +// WhenRule is a validation rule that executes the given list of rules when the condition is true. +type WhenRule struct { + condition bool + rules []Rule + elseRules []Rule +} + +// Validate checks if the condition is true and if so, it validates the value using the specified rules. +func (r WhenRule) Validate(value interface{}) error { + return r.ValidateWithContext(nil, value) +} + +// ValidateWithContext checks if the condition is true and if so, it validates the value using the specified rules. +func (r WhenRule) ValidateWithContext(ctx context.Context, value interface{}) error { + if r.condition { + if ctx == nil { + return Validate(value, r.rules...) + } else { + return ValidateWithContext(ctx, value, r.rules...) + } + } + + if ctx == nil { + return Validate(value, r.elseRules...) + } else { + return ValidateWithContext(ctx, value, r.elseRules...) + } +} + +// Else returns a validation rule that executes the given list of rules when the condition is false. +func (r WhenRule) Else(rules ...Rule) WhenRule { + r.elseRules = rules + return r +} diff --git a/vendor/github.com/go-playground/locales/.gitignore b/vendor/github.com/go-playground/locales/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/go-playground/locales/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/go-playground/locales/.travis.yml b/vendor/github.com/go-playground/locales/.travis.yml new file mode 100644 index 00000000000..d50237a6089 --- /dev/null +++ b/vendor/github.com/go-playground/locales/.travis.yml @@ -0,0 +1,26 @@ +language: go +go: + - 1.13.1 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN \ No newline at end of file diff --git a/vendor/github.com/go-playground/locales/LICENSE b/vendor/github.com/go-playground/locales/LICENSE new file mode 100644 index 00000000000..75854ac4f01 --- /dev/null +++ b/vendor/github.com/go-playground/locales/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md new file mode 100644 index 00000000000..5b0694fd19a --- /dev/null +++ b/vendor/github.com/go-playground/locales/README.md @@ -0,0 +1,172 @@ +## locales +![Project status](https://img.shields.io/badge/version-0.14.0-green.svg) +[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales) +[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within +an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator). + +Features +-------- +- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) + +Full Tests +-------------------- +I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment; +any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details. + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/locales +``` + +NOTES +-------- +You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body +of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string. + +Usage +------- +```go +package main + +import ( + "fmt" + "time" + + "github.com/go-playground/locales/currency" + "github.com/go-playground/locales/en_CA" +) + +func main() { + + loc, _ := time.LoadLocation("America/Toronto") + datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc) + + l := en_CA.New() + + // Dates + fmt.Println(l.FmtDateFull(datetime)) + fmt.Println(l.FmtDateLong(datetime)) + fmt.Println(l.FmtDateMedium(datetime)) + fmt.Println(l.FmtDateShort(datetime)) + + // Times + fmt.Println(l.FmtTimeFull(datetime)) + fmt.Println(l.FmtTimeLong(datetime)) + fmt.Println(l.FmtTimeMedium(datetime)) + fmt.Println(l.FmtTimeShort(datetime)) + + // Months Wide + fmt.Println(l.MonthWide(time.January)) + fmt.Println(l.MonthWide(time.February)) + fmt.Println(l.MonthWide(time.March)) + // ... + + // Months Abbreviated + fmt.Println(l.MonthAbbreviated(time.January)) + fmt.Println(l.MonthAbbreviated(time.February)) + fmt.Println(l.MonthAbbreviated(time.March)) + // ... + + // Months Narrow + fmt.Println(l.MonthNarrow(time.January)) + fmt.Println(l.MonthNarrow(time.February)) + fmt.Println(l.MonthNarrow(time.March)) + // ... + + // Weekdays Wide + fmt.Println(l.WeekdayWide(time.Sunday)) + fmt.Println(l.WeekdayWide(time.Monday)) + fmt.Println(l.WeekdayWide(time.Tuesday)) + // ... + + // Weekdays Abbreviated + fmt.Println(l.WeekdayAbbreviated(time.Sunday)) + fmt.Println(l.WeekdayAbbreviated(time.Monday)) + fmt.Println(l.WeekdayAbbreviated(time.Tuesday)) + // ... + + // Weekdays Short + fmt.Println(l.WeekdayShort(time.Sunday)) + fmt.Println(l.WeekdayShort(time.Monday)) + fmt.Println(l.WeekdayShort(time.Tuesday)) + // ... + + // Weekdays Narrow + fmt.Println(l.WeekdayNarrow(time.Sunday)) + fmt.Println(l.WeekdayNarrow(time.Monday)) + fmt.Println(l.WeekdayNarrow(time.Tuesday)) + // ... + + var f64 float64 + + f64 = -10356.4523 + + // Number + fmt.Println(l.FmtNumber(f64, 2)) + + // Currency + fmt.Println(l.FmtCurrency(f64, 2, currency.CAD)) + fmt.Println(l.FmtCurrency(f64, 2, currency.USD)) + + // Accounting + fmt.Println(l.FmtAccounting(f64, 2, currency.CAD)) + fmt.Println(l.FmtAccounting(f64, 2, currency.USD)) + + f64 = 78.12 + + // Percent + fmt.Println(l.FmtPercent(f64, 0)) + + // Plural Rules for locale, so you know what rules you must cover + fmt.Println(l.PluralsCardinal()) + fmt.Println(l.PluralsOrdinal()) + + // Cardinal Plural Rules + fmt.Println(l.CardinalPluralRule(1, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 1)) + fmt.Println(l.CardinalPluralRule(3, 0)) + + // Ordinal Plural Rules + fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st + fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd + fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd + fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th + + // Range Plural Rules + fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1 + fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2 + fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8 +} +``` + +NOTES: +------- +These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues +I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time +these locales are regenerated the fix will come with. + +I do however realize that time constraints are often important and so there are two options: + +1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface. +2. Add an exception in the locale generation code directly and once regenerated, fix will be in place. + +Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/github.com/go-playground/locales/currency/currency.go b/vendor/github.com/go-playground/locales/currency/currency.go new file mode 100644 index 00000000000..b5a95fb0743 --- /dev/null +++ b/vendor/github.com/go-playground/locales/currency/currency.go @@ -0,0 +1,311 @@ +package currency + +// Type is the currency type associated with the locales currency enum +type Type int + +// locale currencies +const ( + ADP Type = iota + AED + AFA + AFN + ALK + ALL + AMD + ANG + AOA + AOK + AON + AOR + ARA + ARL + ARM + ARP + ARS + ATS + AUD + AWG + AZM + AZN + BAD + BAM + BAN + BBD + BDT + BEC + BEF + BEL + BGL + BGM + BGN + BGO + BHD + BIF + BMD + BND + BOB + BOL + BOP + BOV + BRB + BRC + BRE + BRL + BRN + BRR + BRZ + BSD + BTN + BUK + BWP + BYB + BYN + BYR + BZD + CAD + CDF + CHE + CHF + CHW + CLE + CLF + CLP + CNH + CNX + CNY + COP + COU + CRC + CSD + CSK + CUC + CUP + CVE + CYP + CZK + DDM + DEM + DJF + DKK + DOP + DZD + ECS + ECV + EEK + EGP + ERN + ESA + ESB + ESP + ETB + EUR + FIM + FJD + FKP + FRF + GBP + GEK + GEL + GHC + GHS + GIP + GMD + GNF + GNS + GQE + GRD + GTQ + GWE + GWP + GYD + HKD + HNL + HRD + HRK + HTG + HUF + IDR + IEP + ILP + ILR + ILS + INR + IQD + IRR + ISJ + ISK + ITL + JMD + JOD + JPY + KES + KGS + KHR + KMF + KPW + KRH + KRO + KRW + KWD + KYD + KZT + LAK + LBP + LKR + LRD + LSL + LTL + LTT + LUC + LUF + LUL + LVL + LVR + LYD + MAD + MAF + MCF + MDC + MDL + MGA + MGF + MKD + MKN + MLF + MMK + MNT + MOP + MRO + MRU + MTL + MTP + MUR + MVP + MVR + MWK + MXN + MXP + MXV + MYR + MZE + MZM + MZN + NAD + NGN + NIC + NIO + NLG + NOK + NPR + NZD + OMR + PAB + PEI + PEN + PES + PGK + PHP + PKR + PLN + PLZ + PTE + PYG + QAR + RHD + ROL + RON + RSD + RUB + RUR + RWF + SAR + SBD + SCR + SDD + SDG + SDP + SEK + SGD + SHP + SIT + SKK + SLL + SOS + SRD + SRG + SSP + STD + STN + SUR + SVC + SYP + SZL + THB + TJR + TJS + TMM + TMT + TND + TOP + TPE + TRL + TRY + TTD + TWD + TZS + UAH + UAK + UGS + UGX + USD + USN + USS + UYI + UYP + UYU + UYW + UZS + VEB + VEF + VES + VND + VNN + VUV + WST + XAF + XAG + XAU + XBA + XBB + XBC + XBD + XCD + XDR + XEU + XFO + XFU + XOF + XPD + XPF + XPT + XRE + XSU + XTS + XUA + XXX + YDD + YER + YUD + YUM + YUN + YUR + ZAL + ZAR + ZMK + ZMW + ZRN + ZRZ + ZWD + ZWL + ZWR +) diff --git a/vendor/github.com/go-playground/locales/go.mod b/vendor/github.com/go-playground/locales/go.mod new file mode 100644 index 00000000000..41b8dc1048e --- /dev/null +++ b/vendor/github.com/go-playground/locales/go.mod @@ -0,0 +1,5 @@ +module github.com/go-playground/locales + +go 1.13 + +require golang.org/x/text v0.3.6 diff --git a/vendor/github.com/go-playground/locales/go.sum b/vendor/github.com/go-playground/locales/go.sum new file mode 100644 index 00000000000..3c12b4ff2a3 --- /dev/null +++ b/vendor/github.com/go-playground/locales/go.sum @@ -0,0 +1,3 @@ +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/go-playground/locales/logo.png b/vendor/github.com/go-playground/locales/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..3038276e6873076ecd542099e95b25037b1c0c34 GIT binary patch literal 37360 zcmV(-K-|BHP)ysx*JK zMpBX=dbK}5gAy}y3R{{beYQe$t~_(EI9HY=983!yQ4BbH2t9fee6m7kr7b^x7%_4X zJ9`gfp(=T?K~s?>F>nbtbqz;?9&D*HLWLAVf)p1<2qIzzh`mlUaSo2aQfQ+xioj4u zi57RSK09+4J$n}=VG1>K6C+;`IdT#dKnQiRIYfOMa;!RRrZpi|42HT(K7kH5Z51D0 z2RV8bBU%zznI$e~3~;G9N{kpKWf73XRDQWdPL3KgZV)nR5J7tvJAVu-VG5JQScJDo zR+bL!Ta3IbT6ncoYNbC?j4!6pWoxTBl*n#=wPJ6nPG_V?T&y%kcr-zED1W(DRHZFT zhB7~qA6u3+V3JBCZ4gs{L}jKoF=`!luS|!&X@DUWZdUYba2WIU7L}XOvil!bU7wAXuO|PIN?OtwU_FF?ptGuGegAxjs8(G9yYG zg|%{%%2R#1JvUt~pwDwIP%5I+ft0_2vEYw`$yQ3QA)Db+A{7b_0054wNkl}YE}wrc9DS+rDCwcq-kLn z6*rr9Gievg)&9W#+|En4X(tdH=fj-$InQ~{3w3k;I<^V903k*-k%?L`+gy zD<80`%CcVCt7&b)>-AWo}M*k4L#h*SN=*WN=wOPGQCc(S19x{GH}}yZ*TGF ziK2LMaUoYPF2r$dT&q?u0Q}(MpEI#8)^7>wy*j~dU6yeGL|G)4Z!`D?6v$J?4GNiF zGNUM+N-|jqFSt^;^pX20u^KaC*wKL1)CJS$%Vk@iEmQRU!98y?czh5h263h#&UI8G~>;F&}3tQ$@xhmG@h5396##KW2c6&nD-->t* zzH&?cgu?c})WwUscng4f{a@;`oGHloVP3&K8 zYWaY-m3_Qz=Z77^NgMadOSSCt@E=Ps4GgQlTkY0kAe~EC|Lpg z0C`J&DPN2UyU7&Gu-tM7^CfvM>!_q$|F7zV-={;NRk*zX^&Xy|sN>Mfv~agPsrszZ zST{=H##(12j$1JtBfT5p%`HZ(R~kw8ecKQFg5WJ*x&2m`V}}Y*yZ+#L(7tQhrOc;? zl-q4QvB#(*`_Y3X_Vss0x8Fk3#a~Y$nt>$Qn2*D~V85{yB8iQ{r9&gU|I zev9S7uL8Gt)I|dF=Y>|QegWzPvP@{V3Nn!IR4VydvXh@BKI2xUMBh(6aBL~@%r!AuWVG?f=&mJDxzGOktCLsq`D9Be0N@V+ zw0ap7DLEZtMdX4AhRfVstkov-f;^PxlkxbXr%Y<&X1CewisGbJ`y`1>y^eD%RgLHX zYcVG7w{0ENyVVm($?-3NBx$ln5*EKMtyUbD4wv+JlvSVUD^3~+YL43}RqjpaZ>3ze_N$kOC+VOY{hW+5B@jP%seQ7^>9XbyLO_reD=`#sdf2i+$u9o?M zdc~#Q*=#JUW~0w|6Dzl8Ec<(uJGK{c1uP-ECFnFoEcE?p>b=X@zgJgrNs7N?y_(l62vH_2aOTu; zH1Wkuhrd?fG^jc{yVoI7S9+u_MCut_7!Id3v9PUUb&ldf@7>y_JFay%O|e@o7B~1R zR>f}jzhMAekSBme7+=8B<$4CP2M`qy9;PHYV6Jyi@gzzQxufnYeT4}(ia$$dqsPwU z5sO*J_umWFSAy&Qz_!oZ_MFx;KV|fIOpaJ^|IlRj`#mo`7L%L${sFWDr)GmSX15!x z-ju0rqgjo2_ERjgP*qK!X^q!LX;y>Hu?d|?f1KrU_f9eo=CJlH^$aSqrQQSef(+VI zC|ev?=iM66d@OdmUC?cIyVcDG)WLl$UUUh>iAb>R_igW2u?(N61L7?ic^SATn9E7n zNxqRMszu^JF zAl2kMNq7&8r$ZVW%4{j9Mn@?q+c73i$N&u<3-i9;kLlEE%cl5$T`V6CkTnyOJ+mDaOaDWQ*^ecn!U$EDGLmsW}B zARh3Q)rAo6JL!FggZ3F6jy>B;&&!jW5FMg-Pfri`&2I0*$r<%!@Z{{~#$u(6L5tIC z^ct$p<0cjz2bs`3+o&=gg zN0I9tG<<%Iswa`AOAJdUZZ^xs-z{*A5120pqimm1%e$$8lj2Rz-5n3_^+!xU`FD3; z?p{7vc6^8QYfHemZ=l>~d!hFs`#E}j$4Ogk{OLe2Q=08L;!#{n!QPLNUsaV8bX6T_ zoZeWG8?W;fL^?9qw}QvFOlz1NW~S4EBF|xQlOZ}B-97Er#?yIiJSVn3uGPB5yDgsX za>d8T9K^q@j<*wkQ>~J@2auzPFOmx}x0!EFdB9!yU9WwxvgjR&?#kfg)Re^ zU-wz6Z!-%j`#HxBkmxujd;G3VYpX;y0Xtz}E(t{V$~uW0lOc^)=|GS@GK+e7MHx=# z!*RE|?AE3ejI;Sv?ZxLMm2b0E0P-M#%6ch}d~Pk7{t#OF7gYEvW_6V_EC`1%sm z;p`;DqvO$3nU9KswqGY#rE*CRhZCi6Vg+4BzY4?O1Z^=|F|5~1w-IQ|ecyr0B;3%> z6NBS&$4u{pLhhSaJ04eT@6<#cQlX#RA=lns%xy$Iav8;|8b2CeW=l2{s^~`vfxD{}nt5yRf<*f`crJol5KDpU+ooq#9&8 zolytKS$vp#7S*yGSBhp|TzSfi1;>|pT8b=!z8&|j&*(e&+8TUqFrCtl8|%?a$FBL@ z{y>@BXE$djM^uFJTU_rioz{q{e5Fbf#|)xZ>^Gwk$Huc-c9m^sKiA@Euns4o_3#?h z5i=0rkge;=Umc~s?o7dWGHh16c;nOB7RtO!P^{%PB)h}wuHU|SgU1_E^|#l+Q}qdS zH#zt}!9}+!7OMbK0D7Km3rMzut~&X`M`exqrYM-~VVJwhgjJnk7U?*AzI_a9V$zJD zYHhE&WAk}!5pOwiZ;S039$dytin18|FPwW+$P$X}!sSCocO(=%2_8{qAMNLT)*Y5M ztEsG{FE zcd6Zz*ty+c+4me7W7MhL5el9=Tt?^XuMb_;vSwBV%E_{HLRF>yV3AH_j|HcTm^;>a zvC_jO>{*oxChK<3CbtTePO|dXP!_ATF7#edCex`hO``srW-Zsv6_-_5+@v|i8We%Y zyMIuG>bHs?7YPXYC-gp6qnLxi$pt8CqEy8~wN7FkrU^!Uudp5tkYr|StbYSr7gfDV zsM%*}B(Tv6Atx0*<3}nIIDfx$@1=y-X3vRdKeE41dk)V6r>|lW>)}zv|MC*Fy`WVDgB%|{yeSf>k=9~(Lk%M*Vv}w0f(3C@}$xW z=X;gp5Q-lcaybmN2}XLiG@bn1#U(Pt?#M*{@CI`K@ZrY~KmPdP15Q4?y(75}eD891 zC_7n)=49M0h?tLJO|BA$D3Dz}f!wJx7^$#1Y~R+{IWt^h(BQN*7k{Rg64=2xqpssD zn`W{H_xI(6YZSCPV+TC8tNs^9A%it&I(jN+PxAE94F2TRn(Smgm^H4&{6j33fOh zZjjt+-FdS%$#sj9&A8U=j_334usFvB82#hjhsQtOJig5p-xephIDsOb#>Knora0d; z5&SmI<#x`^b0pP@Q&HaT3K3^9*D>9aop88;9J?Or>4dJ6EzBUTMqjBY_GmR4Fe6FO z3~ldwf$o=IUERAW(~Wh{VsiKmmaF?;oDlu_3w%;_Zb7fRgLaG6Y~wF^?Tgs-~d>j&DCZN@>`>j_xTR{}t_f9U)W2S+R648V8hb{3Dmp813n?7e z6LF4tmd&OUt5Qilew8{x$Uy@An?-AlX{&(3EiElA<#AhfOL=T557!owu4yP~DebPa ztx)PswzAR!84X=8C4fe0x=1Ru)JJIjfz$+3Ov@v(L?jKOMvF@vC=8ABl}ra>#Kgob zYE1lL)E|Bqy)c%ACEK~@obP-O%4%vKqpYuMEaDb% z>Xe*wLNU)%FK*z7Jri6>qYAqoQ~6u3ywE}DD8MH{89OM0G^)dF;hsksFBVOkXtT0L zA)0_y?A`NfeW$BFX`cjk;gmBhQ?bvFCP#gkc*f;o0ClWv2_(_=U@$V0blZKCc3>Z- zBH*?Mrjtn*NIp28+w$lrc+K<-%I$&t#c9`?SWEITs|_o5YRdX)Clyr@1azy>;plKs zILBxE2xv?d84A5>{!r^s^r5BjTb85%_(J2Zgm{-@VQR0h$oiG zruagSqDUa91*Hjf5jbT!=~D(dN$Ax^u%=i_N2ScHLF4s8w)Nt}WP($HD+eob4+B3P z2EH?PEIFp0@qxWyt=$peI9dq>gRu@o0?Cm`BpwOQCWH9i=%n2qna56ySnw)tT9SZx z7W;3W%9&~C7}KMVF^|Hnz%@C&C$MdCP+=BOb8{&*LJl6^&o&ZZ4|lRKcdWUR5qED&dJGwX)i)6B?n$ zOosG;3l-6!Kd&dR7|)UkH5H9Iqee`1;=ypJmBx9KQkSgDkU6M&@Vi>>Nx#3cdf>G)y~J z&y#G)I3rK8{v8dNM(U#AD~htsQ86ORxE5|0tFhji;NKFrW=+J=$c#L1JJ;M3Mo_yeqOj zI*IXjLAHKtzljb9u;77UFc6Gj1l&oB<)$6oo{t240gG#7-W|uHxO+xnbWFQPvFn(8 z06vU5$U{urHQalQn-%>gb9(cFpCbKaOy_`aLnc}uni3iZLGy;BwGx#%B&?Pz`TT~< zFIEW7G%Cug1j7}YPEPZslA;&+91*slk5kMm7b`V6lCvX`*`#a40zeNW<2d^Ek(+jmJ02821OEzy2yukV2AJ!GIcxE4Ry$&BNVa08W#W3>t(6_>z^&514EIOeh48)L8Is~0v&gIH(NED?41y597-PtVe ztf&wb7hftBovGq@#uW{fb@gJ6a7rT=iAw}xLt+TXNf>nJ_2#p+1oW!jxIpLhY~ImD z)3g=0m8H?JDzsiy-{OUD>}momhezG8Bi!@ho?h3ew`JS6?XyoO?MX`{Ih#D(y!*>9 z+lTvccHUpd#|p+Dy$89$(8o`yZD0gi7RQifvXf1ZYH!*`dtl5$w6P=Kgt*4kqBS`k zZ87L|XlFX4PEgbw;V@nSjrSXf1d#yj8;QQY>e`_$saY=MD$1%SN@bU1&CUD<4__hV z);A6)J(s#KahiuakvBoOl!z)MwHkBvl!nWJ!}XfO2yB<)O8EN?dLu~@C>lNY(lkk7 z%>nDC$81b5fCsm>ZD!Jk+96JX>F(s=w?{|5h$UdRj7(eX(}C@~J3Bka`S;#<@4-$0 zs&(!CIG#W-fb9;T-S`X!lEL|TOC(^QwOb;}TLyyISCYV#&QNUdD#Y z8cn6pZ6c?m`rBi=tFajL1t5>Cl`71}b}+o5E}^8lt4sD`?W>%+@rl6-nShT&4y3zu z8==dP>3Wy1BO2Ja{(tAGGVi5@LkIAJwZ<;7?X>ZyThF6J~`Y z)dCX%APghL<^pjCuKBRackk|hcl*w*Z@=B1Y#ROVdfolx?VXeNj^BImX<_HkJrc05 zj6_zF$ys;A9uG$1zIf2h-uU^X9kUJ%=Y~_m(xIEDwiTcqN6i|B)e9EEdXumwL8`1& zMpfH^;0e_3JWWwVDArE2OAW1aF%rh2G&ZEFl#M;DktuGJb$6dB@|-KL@Kn@^M9re{ zhQ2{g5qM3txY(o6C^cf4CS(u*89P+chm7aXN)cmGdP*JDJM<|%kEnDhfBfDX$0tW? zAoA>p2jW0=7V%I26rcSc=fUTpc+l`3#xgzGV?*%LG};1-3g@H=8*c-DhDDXIB6tJT zr5tkcWhiB#^XZIIm1NVpA^G9I5_BDio#spM7E7oU-V+LUlO z4fsyEh|j4W6I6>PM6!tv%1n_lIQmi&c7>6|9Ca9}s2ai{ipE?0aO7Z*GreB;q7}Kubavrj@AubsZ=XzW9AwkkEC&9KH}c1afW4b6*s)YD zhzARg)~`io0r_Y>i}1b~@L*74Bok~L#RinfAsDTdZgXPfuCWdqal^+`jaeh0fc2EuRfs}21N(Wucufk-eWo4VJO@TRbITCj|;iwPI3 z1%1Yu%S>+Uj_#b?-A|{dJj<)ku0H$4Zl-X1=iueT&4a0Qb}4_nxpU{>zRwbAdjF<< z#WKHQ58~KI;_j8Wdln22EEd`Ybk;qCgE(Vx2Uwi%VY4XQ&o){M|Jww}ga41D8AfPS z?I8TRJ~BpuZRphiB&omMIA}Ds8;H;i5Lu+{;Ka8 zjzHW{*EtRfFB|KtmGEwu`yh75Nb^Gp74AB28zK&*j9pPZ%7bJ@@?b*5*6VF)Q!{j1 zrjWUp$=qZ1!h0^)^ufu4^won0X|z54$mY8{yE~t}a_9bsZ|~g7ZebR_Iyzcgiv%od zcaKu8;0m!87n4#^Z#FKMQfWtRskx*I$t7X%HQl4ee4k?0&e z=jqeVA`sR->zDR%cc6xa!o83J4WaIv= z{lK4_CvT$6wfO%!W{v%f2UphcLfC<~!^gFQ9AVd4mlB>&3+5|)&BSD)pGK6meIzMVeKuIEwv`uggJnf^Ohf13E{t)+ba?R@_D_&E3A)6YNu z^ysf&e*Ja-@XcUwWn?A(=IqFdeFdLEh?*oT)+27;G<=(x86f;=BnSLtv>3QV8w%H+ zhS|tl;Y{Msbd=PO>4$0_A}KXqfjUNz{&tn>s+15Cp{^G>!m)|2nu!<7#v7h~y1t}@ zgIzBwI#=H~F)+Ys7}xN)-P~#cU(A**htCpJGhxb^JvD-(;$%ZrQ4 zi_7cl>)HI$`X`4=U*>N^qOQ$Dy{)kpWn>KxJM1GEe0*giNtE0(ZFeEBvuW|dZB{1S zW`j3ysvg*c4_${NONQp=q;SVCrocl~p^!?alU})qyDl}io8dRr+z@k1U*ai;i_cUP z^AwGB^^GNEobK*tlurZUmG#XR_`C|vm|Q9y>{6A14n9m0=hcMKK^sxOns(03MF}Iq zCMOMp4C2KG-N<-p#u3eAavO!rwM?N<$lUt=+uM7)Ki#_XRXV-y`QejyKYM<4@fDQ4 zwY8OAPN&n^d_JGu-vG|<2iPmdmdS267~Rpz>8>BHS?qyP-@MB`13y#S(*oK69Tdic zFQ%}~i_5OH0`Gu&=sK-tX{?HN_#Gr9y2{)N8QX3oC>g| zXU$OB6#RcscMw^Kv*~O17!*%$4sSn5uWzm2`tIvzpMB-o)yEfC zS5f@(@_KrG=}X+UU-EC{ckGr~cYMSi4bSp6msiLj;2dNjDu$sapwhfG|r#9TaG%npBZ&Y*wR__7PL^>hj{! zhSJLC#F_~K`?Cb1649CKGA>W^Dn~F;<|(Z&;_&32b|L?&+{`hD&N})jI)!MX7MB9v zR3?{!M5FiFNgC)>ByvrE|@PjEy=b-Tkq=Zw-B>wiqn3eAY9GuJ=o(q0QQg?kxmfzA}X_tOpOcW>u!zq_>b<9Ej= z7Z0|+$A+(>7``SsB%8%S6Mug<@ z_U9@EFYy)QV*D)4_2>9iMUC~B&+v=OMM{oLSrRVcs=E3IjZ#=mB^Nc-+X^+f;%C7}i4*&k=@UQ!y7VZb!M;2H&tOT;d zwMPWun}j;R#Ggt87z6!nY_{I(@M>WRTQTe@u-SzaITqDjA+8{EBM`W%DN03v>$DGP zifYEMOpS9C7rKjh`~gKV3WZ=99;hm+;*^x*qhU-bn;N?!mlV~6Zs_}U3u*{Kf1g8Y zq;pmr5gNr)>`zieO;)XyO@7pLKdsJWG8;hn%*Izge3X0b?caV#FMsq00PWqomS2C} z+xbGXzP069U*BTez78Ubw&$~7qI4v2kSKQh2nIe7S-E>}XXCHKjeQ^%@n(GN~bst8Gk zv^}ON<(3z7J={vqAV(uB?<{{+R54s!RD8MnOgX1iQzkA`@I1V+=7(idax=-rt|zDj zJh*yJoj9WS#cr^YO)|G@n$Hd23R`dQppMU!KCx7lAjU0~Lv><~( zR)y4#fw}FPoUC%e?Fg^z+?p<}J=66#)ITdnb`Ybn|1SX6^|F z&1eSc=uzx}EzriYEwckj;no(|Elv)0PS#iq2J7g)*=n=vW(p~)kVGnWb#Wv6F%bd$ zL69v(G6ly$BNz&S4>b;g4Vqg3OqO|ZVwhnD1^PIVIbg8Uq%2<6ESr_LUMzQMhA&<; z)Xpoq+9Ao|YQF{-9GKxzyYJk1xGO0Q07&TG`2bdNVU1f_U+P?6`l_Kd79t@Yc&Icu zD7M*H4$R%_dFry)->(J86Uv+rQ1}ReP!JKs1Vzhn;K0ctArSPA36XVd#!f=APZ%wj z%%U>@qpM>uf?baWM&Yunu|TzziGgJ`PGTbb~05fO#S>Cx;DFzWV-e>SdLCJx8S}7zx{Zv{UQYY#S7-kNwt?-Tcxc@T!3r2Qs8377nzGH zE}xqZ#SaEJ2zbD;fq{At;{A10mGp5?Z9qgk;6j`a(4G!`4L**?F`=2EkOD&gjF6d1 zW`s0>XgGus$)eK=pj;(0QbdemLap=wn;Qu>uAAbmzLy_ODPtXjj z2lIsT1+B1C|4=caxpjATJ>o-Cd)I{@Z~f=>c3535La-0az;#tS-t&?Vg}s4`f>aV2 zd~A%bk3-})yi(XCC7~Q7%2Jodmx16euPg}I*0LZ7`~Xx?0dfXF$38%nb7a3LpWMP^ zeB6lw@^Q%e$T~k_fe$S*B!n1=_uI3V;ep#^>w>p;am3nStT65XKSz7)b)sUeQyi%n zM^f@$yq6``$>bqgGPJ9KL|34T?Z$8jBLp*nNlsvcCK5J+gph;~IzmMJ{MeBRad|LE z!NN;c9ut|O7`J%~u;~JdyF4XJjCU*y*3OG0J z44B5#e#xn9h)t8kN?T>XNz^XQgVUuFAf?cmJ|FnF%CDf@)h0n#Q%~z^UoXpp?p_-Z zP%D707NPw5p*%#a;bN; z2Tsue6a{-7FwR?TtZWWYx42_0tSDRdkQ{+w##p!(6xK!hve^fjG#WdM+!PnVgdhhk zE=Lr~q0>U?en>>X#|e2bO~URRFlKtNYSac9mSf{P7ss_P=3XqkP|n?6n=%HdYv)Zc z&p^Q6ZHIa0;dZ(b>F&mOZZkQrn$Z-#l9Fvq2PU7!x+xN z082&|oIuENO)P{(rW2Z=)FT`=B80IHA_2-CCn16W%4A_)xoJ-QXPa?;_K{9#?C&2m zDM!@mGPQ9eZR}C}eWSu)FkQRVbqgB%H7IizFW!Km2Cj833PATr_zk0A?x>V21vnaI zX29pL$w0}^IW&LhP-|XVUfKK$W4lf+hg}`+k89ugEl9nn))s$_y<2~7QXfN)D&!Qh zXe)_?( z<V}u8invj{s)^GKhDyeJdBx z15F*R@B2SBSw0!c}C!c-R)*O|2u#nh76OfxS zKncajV1S;c7*9$8B{bE^!`ssX>tt1eLQEk(^&sYU^*>3Jn61DZ}9 zP+MlWV3OwvBg8BO5{{W5H1~EDUAlDFtK*V)J@2HFUV8Z0;ZK{7eH#1H_liA`2kj*$5(lN6!%o1&CjZsBdUisW9tJ3Sp4k zJZLTtFqx-;cbK0q4;ULe0u0&5dAgv5T-4`x1_sbbpMimOVBqg*zN`mwy&+%Hlk=6V zb=PR6Od?gG7vwr9OQD^k;@a}Xua_a%KN#$kqet9n)=YU<)x*xs%jTV|xqZDQ{a8%Q zCvDdcx3#n!ON?obilGq;X{^FTRn zt*g9KFk9?NR*s%nU#f4lH6WONb#9R-SSOGs;93aq5a=9)AZGgsLy=H2!Uj|_4i+_3 zyCgg=pDpgXq*R;`Bjw^w+2g_?kHdOY!tmEXg}xjM^zGuOvUF*xKQACC4rpu(2*p=)S?{h?nJ~wwmd|x&@A_M z@h$~=dKMMce>XHYH`m|acDR4I?|O?*>0w&iXKlw~XrC6x96MZ?OLQTVLDwBd!5_{2 z+{MNx%_6!G>F=ojwEixyKK!m{RsG44{)(Ch=O^c;e)j)<>a)Yw zzwj%LE=?uwtWIVGlM16^aG8uuP_*x1WRP5O7^-b<(Wlu(MbUPE+1OfJVh&h=JQzo! zI1_Q3DVsLgSl}66j7WD;=s}?$S-cy80-8+ch&XVIL_$si1Og)D2z>-dZ~}Q}gudOS z{YC!;=wbbGwMO6f$MWLF!zUv*^b3aRq_o%5)MaV%TD9TEjVBK$z}XWMPr&V3fMaC& zNitQXET?ibsZw%?4@3kXjd%$<&8M|WVQ~XgB2I0b)j^zh4yp}Cn*GdE&~LRUd`SPow+4*`|gHqVR$=w@X_Oi6YY;hr3xNXL}?J20+TtEm8CF9r}LIQyWdSD+i^Z0Ip zh{57;(m5glB7lk?3Fx~R+MNDz;B949efHFprWaApu8fS_U!ttNxUgoJrH{9M1;ASD zduj0_7w$Zqm;tX(47^(0))&SvOY1*=Z`|{2R?cLsS~_jt?qF?+-R!Z|0fR}gx3Tnc zv87N*pfJV|8L6R7VJ8w`M@6-1AqdnqA5mgd-(kJEH(eAeM1&%=u?g60(a-&6^KyOk zvHF!+Jx8Hgm^IFuW(^O~QGMDlAOCJ6=x5$BXu;plC*bR0+`z=ZlZ7YaGPJ6rxjmZC zg^y84dQ~MLRg%$GZi9qd2@F*HSnc%MoI)o?Ezg(8%F04K_N|aa|M~KJ;lYU8V4JjbM8Sq z+i-V}Dy#5p=V&UZrl|I|7%I-5LUE!{d~p=Q9yTDmO$F#{B_~d>VUUdo>J$k2MkeRx z^u5xZ@*Ti3&@*a zn0|s*Hz;#Z-Pz+y@BS^7E?wsLr1b!umQ>jQaC919C6Tp$wXyM6Qa-4pK`sgFTAyxM zqf20m&9gk7VRCZy%%A&tHDB(p0JF3FOWr6c>4vkdySt?1_n%I`QF1yftK0uf$-UdR z=lW8EiK&q5(AbaeVK` zwXdsdVu25fLlEv3!bS}1Famw8RI$8r?%cUajY6R?r^6g4>>GO8JEdRg{rTtBOT2Hx zr(Q%1z9>|xwZ^rzsc|JBuWREV2~r;#n;+LgVMjy$`OMWDzb$QyFC=lf@4+bZOa}gj zI9Ewqfp<}D{GooN|4X{7^<_Rlv6ti1?rV*Z>|(`QJ#Tf2cmCA9Q~OV!^4||9YoGrc zZ)}5m8$5xMH_mP=DLGqm<;vOan>YP$-aOM`rqL69`byK&n~RGB1E~&FFDlj1+SwO4 zCwLjc*4fU%5>OkAGsa@eQ5$Q##byegQHLj!@u6fIA(Vx#f=3YsEkV$Su=M(>qAFh1 z$#ds;9V`6`p+dR5I<>qyq*U}j=(yC;vGREJk-}is=+flI1%pw$b_ckIrKEQcjlHim zY@q&zA_snlxPLe@`Vv~2tS5(is3Ephs)F!GOZ?LGqx&;6kH#h3*tFK##@ct@7Iz8} zv(l^>=GClroUAzo4nJSI4N0@-~&5Ot|!i)Me4|u$jypwSAfLEA;UFEiy=YwO-ZFZ3YBk9c=WLC0w5Qrp z9lX7>v#C|#ftXycz})6&-*6Ho5I}3+OgfH32qyRiGt!d_qkf+3Eln?NnLBm<`h(Sb3;XDv?N%%BAI%5@_-83ebk~>T@RGwG$Vw-qJSiT3FEP z{#Y<5hu1E7dCh&>aqq#?@6i9~pM^Sj+nZ<4g5Ccg_z|o=y92(TJqw=i2It?*I&<^P znKL)De(3)+)xtKnnOfxJbk4IV+Z$w(ILqzO-0d*dHh60W-qj6{PjPh2ZT=|g)2No_ z^y@M7QXjDS%8>m&PR7_^Yku>hp;K?Z#+B&Ms9CEa_iee(l||{MSG&#gBc) z?-{>y_2I<8KzsWPkhJTy+Tq2qmHK~~xc0Cn>nJ?#7ZE`q!4v^?6v3Kgk&i_*77q~? z1414GvI#^YY($PGd!~aJw}%aJlb914EH26#A><%Q0^=HD$W1U*KoLO!#qe6a-xk%` zw+s7&Z|`@`dC%o{eqVjQl*OV7cpP5yjq4%?557+E6f-?NJ)m?T)vEp5Z=w3_+wsI_ zToWg!k|OB-B;dRog z^%m>bS-ks6tc8~)3IV!0fnG>g(}z4yitqQPoLion17apFIR(%OA+ABPNDSN}9#5*| zm7x*SpFJ+QIl6TGvj5r9{@RAl+B**)jOE5OE;gcEJJoaIX=SKiT>8Vyqn(`#8SyW7 zd>q*E#es;7^!8n?Wp60POO>Xl>gDsJrMG^6dQc!AJg8(j7c)34*--N}c$g=X$t2jk z#K{BLR@_{Ncm+B%|IgP&S}94Me!a+JNK>6d02lK4QMLj48#W@wZQZ+-MCC(rl6=!_ z;Ok+uFmh5fndTmy6&)Q`m@t29h%(iCZECv2(-W$O1OYle>t4h`GKM$^k(MQv>K2)n znwP$Q<@n{kHiWm%*2k4`s=O-`NMaWoXCFUJeURJU9@p8{x%l=(MY`V>|9DIg#O1|w zv@Q0JOU66fuF7lce;dD3Cn-*1v4nhNFd{L9fru>@F=QY(oF2b|IJu7Db>abcCxGi< z!}y8ILvX@L&*}R^w@Q|WI0agf<>VLh6MdbjW|2lHTS==(_N&&}SeTeZ3E6CFrjW}P z(%5Viz%+FNw;`vu6eTx3^p17uXjKK%y<*D)_ z`nv0DC5pPbHiB+1X0@pM5PkYRE#GvYS@HQ?zKuKj=m&{t%|3JBAm zo_eIA%-t)1ax^gdf}F&}99!E>k(Q=wEDbjxs$1!Y2LwdLCdI}^Me*4o!mO;!LOMHv zoxtU*s|(b<%|GX8TBeFTa84MymSk`cdWZXg{onJuB%{}6iieEduCqct^(np179=wy{~ zCAH_2Wh|*&AlNI&AJKr^f`DI4sGPZ?@a_M(I^K2AbymM(?Fe>|=|^)6j#yKm)p}wM z#MU=5U{k;beLGV-QfRfE`B2|4^N&ZHP>}goJe*yu z?H|1|q34kHNf1` z+|tZo(}p)m&dyXMdd8&i@HC6(9gV}t#$FQ;mgub=xMj_5m z=pFVCJA^(!bTXCzYl}VKb)5R>c*FUg%iV2bJ$)FLscd?XSNRxCn#W(nv`3_>o?(M_ zQ{4Lpki@9c|CsSle^A+1qmY-Dw$}ArJyj!}tdZ|kyeU;4%vWM0qnO8Gh{Uk?AVgFc zJiJYd9l#+vl>(vTy<0?J7H*=!i1yUZ@GOtnJ67_dOWnm zCx$(8cnl4)yxzLw!{2=K<1cq^7D{@$D^*A1;`*AZ;+j-#zy5kW%K+#CUB@ZJB)Z?xm}8IZIlm+$$*K9Fua2M~-oh!PF@Pui*ekK)Aoj zbV`Qawce&fOvsALaS%e7`nMh}_m*$n$5+eusyP~srXYu3Pz~^W-$be}_BfesG_}R* zi%p;YkG|L1HlvxuQk}eFr6Lp~K{~M5FU{ zoz+8hA9N`Vk<>Z0+@=h*OwSF?wJ-=L*FK53&bOuVsR3q2tMyl}F)}hU+(@!=TW9yK z)oM&xJ^$qkn6uj!wQY~hTE~#AtOW49kSpX?bGa9qmy?}}TAGJ?h3cPsZxlV+?g86N zCW>ZM6$LB-kLBz#_|q>xb{wBQ|JLQYhFZm$)GMQ%ss#-FS5)OLl*hEkS1sh_rm6xD zRpf>4%d1fNEq=Oq>HP6$7ZtUF`cmcmhZ9S!1IlWp;2=*a6CcbM33x%GW60QHZn}_4 zq!U_((-HehNMB+1`;Q(?FE2m3Q6ReaX#QHkg&d(!2nWX-4t}I%W@Lbao`Igdsl5To zj*4tf-_lH)BXXc)59zW66$5

^J3SC{O=CWxCY&tzNKiT7Ed?5&{U3B&KGl|Qi^kMOQv-cdv&|b# zHv+SZnT-wAe#7(YZP%L^B$3jDwEgJu(xPef1bPBj%~dC;>1=x8>E5aNpVdk9wEbN6 znfaFOK%PuQ9ON7bZ#)?~79(fcCXQqA`D`Z^Rgu8`diJp@{pdoRYN4v4B4!~qJ-xju zz9IuzeC7N8f2iUny2fVW&qlO$b@fXohTE2YkVtuxm^Lfs2*8}h`MmELTD7An)$top z>)iW4A+RHqf!yXsIQ;4yji&n6bje&nP9`W_hdwjbH)&I*Wh8ZLl=GX0)U9S4oXxg6 zZ{12VGBnnQ1(;YF+gRC~?6xwY(n4ZEb7U-Fc{#E{b-*pmQcv9y{)P@%LPCS`v-JaK zT8IV_uYiNZn#0rZdHL1O0GRqoE^pN`*{DwiyuB!#aE=KF62E}$cw>RQ+gAg zsYe&`UP`U3`0(qQqqLS-DIYLd2FV&Z7OZ9beu-$Fy5@4Pb z6%`PyADnHMZKoHEky+BJHLEtSH`ZHcVG(Ar7F!#4+W1&I9@$}i^N1`Z}+u zyi=Qw=EXF5SA_bhQsc@)(<>q}u&VxP-`GS?*T*03`fjmreyMNxr#i(*y+SY|FB7<- zD&PpDJk~K?2M(O7O=WZpM`W%ZI&sp{EfdLxXo|~@txkZj&oI8f*6T%cbPRVShlPZM z#D<5PS=s3u8DSG|fYF-ZZ2e%+oP!-UfH)X$UQIF%U!9HeWsmU-pG8@J7J`j}&tbIQ z+S<{5tvflT?#xdObSDT|o*GbB3%T4RuJ9Z)`NCCN zY)F{#>hap_GezrY3Wg~|+7OjT3*K0kle_;}|XMc1yy z$2TV>jf%nkGC}`HjgpA$0<@fqWrFW{_|MQg1>#9aGQhu16cF2;{t<%wz=60#_pdGA zQYR#Ex%{7)qSI_)Y)E7XEs7QyYn7N}a?r%~O%u#hSUOW(Tx@r5^^M)@>+4KfV_;-p zVn{O9vr4itNlJ3tZ*K0D7T_2{^9rMxxJSiC(=r>_v}kiWo1c=zEli*{@X2IqA-7<@ zh)L1fBhoDf*e-}C;vkz>3ah^5K?(!o;_$)KVhple11Wk?gIgQTKrF(Zh{Bw!Q8r!I#@`l`!c zi87)Hf^SQ-jfv!w_vWru=OAUhaDO0CSm46;%@k5IU0hBHAa`bPTpq*eA77VFTTbKCQR$>Gl|b{yVw~P=d z3=WBRgDu98<0@Q&37mDX67^AAcaqNd*F>^iooNO>OPEi{rzytxqraRR!ji z@2WTy5$7M1TD7Iz`+zDi=$U9%U8o5M@M-iZal% zBN^G5t}c6CZY*#!^&6N=h@@7dR~*C@GVH9bPHo@K{nFELN^ijCYQ;XLNq^q|ZAdG;UY> z7yj)T54L-~<*q85t>WG@_0-w!Jhz$v$g?NSA(AGJaSUK24kyaZS z6S~$92FY)>vopnp=HNBKo3o9wadS3{FW-IFWY1@M*ztZC8($6|w*Knyw!^q_A0}^K z!IJZ~BZSs>ypnPRAIOfU2cF#Hh|#6wKmeNXc@1bj)45FYd})7AS5Irj<+0)MEA7w9 z;{q!V>}vWvFx6ibxa-0DGgY52e!jR^{_xqK{ffq#$yyYxW&LtQHK|-9Rce$x@cTjM zVsxJwFf3HaI%mff*uFyUcxtuI#N~%=FvaJt-x9KELUuSilg6fj*J&1CW{2BnuUVa~ zA0BL%9gMpX8Ll=Ax4?^Au!Ef$QMo~EoNbc$Nl~_L25xBv_GzT`ULotq`@?tInERwf zoiN$oKqK$ZjHV}?E(|*%bajnAolvd5aPM9+n3u`K`BB*sZi$vHPg9I^zO3eV(s*6P z<+i>jqZzNp9O@`PkP+{HH1yM|R}Y{&^YZ&0!*?@g(2NA;Nu>f?BnFiT=PaItB|G;> z=ES5--_}esVNF_g15yalbVaU?7pNT%0zLHZS;BU$*E=+RI`!3C1&z7ahRpK z>^67a?SlKSP#s*HF&1pR*V64E>CHEd4hGng0vt#VHs(gbdaDuOOh^`MNzd=m!v+HE zD8L;$$e(PpUb_?ffUI4~m`-tb-)a3^iqFny*FrU2-4a9;t#C9k;pssL@;nVoIwGi% zl%lU(+t~fEGriI;9f>>E7xUsUD^%%M?jP^nG1C$8hyTo9fBntVmB%$*#^H@!>>f;H z12$xXjbIXJvXT&*vtSJZW&;HlM}#`=iGi$$h{MWFXAp=MA{BH%fgGiiL(Iz1C`GbE zR8Uu>r9t)Sdxr(_A z0$UfPC8dZcl(ZzN#zR9#%1#wA=?srh4F{cOwo4h4=Bf15d&YWG0|S{`x3a>Y$-p$H z8?=qwpyaoDb@T%dtae+y_h4!S*hBKvy(bTvOs2lsMYIgp4JZ;GI6d@13nO>8Ihnw( zet+?si)Wu)SGw-;xWjSUEtS>#FqvK15w)%B?&_#hKb82;|NZwLf4z06;@zoh7cU(u z1iob+KlIu0fm1$Rst>+3>tN^bI(ha%i^i8c-a}-fMQ_l@CS3MGm}FT zDxgbhRgF$nmkuAN^i~pCUW_u9hD{{YGkFvymndb|OOvE9c%U>C0voo@CbH`JYyqE7 z%S*2CEYRyKWO)@8%8G)#0=-IQsxhgJs9}m~dUI5UT_#zrsl2nc`@0bQVt8IBOba)c zwHV+~a<&1XMEP>;%lS3hn4{W`(k-Q>>(*@BxA}#2G0(>Pw?tJxaO$VvkN*7g?>|33 zG+_Va=g@QZm+}uBXc(M0^6J}fUmE&uMab~b(-&!1cwxpKmO9=0;@?UnC8JJmX1X`b%C`tvlhL}z%z@rEp7uuDZ7 zo<$$Gl;1khlF5Gfo?GK1HdUIjMwb_wuBu7T)bOZ~Tw`L;hv&=lkuZdzzQXkZ$$kOC z$n|O2%Od>He+ zohpx~R0@=t*jO32zFt~Z&u0-Un6P*$5f7P6p=*2yOmdP7kqd<;9*vs97ZN?`PQEOS z0NO0yRiEsu;4|cWb#%HxohH|zLm`Vc%A-{V6Hxle+s-{><48G?%;1a)tMU&70hT^} z43z$k2jSn}*#(yY^;73UVfY7*$tZ=!MTowV<%pH|F1s}$2=HwIR1x^e{OC0 z==YwU>9-rI4wb}6dk-~kZ7eEWud(0*sOyS8&; zc&>Ms5jMW7Hm9%#P*g$kGGWj%1|tX;-jz!ANJ3nPfAb)+rED@Y$q7btLenH^X(>V- zEmT0GS75%AEa3B4L{GN2CoO|U#ZW=`{o?SzFtIpHrVtzC^1KK|pFwX@mk(aXnP+7! z8oRT6vox9u^V{1bMypCAsl8zxw9* z$7fHUzH+7K`1Y^H9{#jzwEEgW|MluLNK{Q8PkB*QTBEsab^$1(E8@Ar7 zF7ND~xl@e5>WnXlzl~NL(oVkvRRz~ZbB}@73dU#flck&A*xUi>(3tHVTJ5%HgCE_p zCU?X3(x{_x4}NcSH*R!K8P!F3=#l2h#C*M{ zx4_$r=HM<%Wf?kF*Kfs@hPs9Xj83f#Uj=x8$lpTJ#r-uU-IhES#r%?&+mEGiN| zt>f3^g$h-n1)&8TPSG+lmm~r^)Pv#bL4iUxh0l~S$xNavmCW^^Q?sF=>cU~r1+FQc z$pWf`&vj)$Zr9h7%kbjyyr_JyVCyMF7E3_jQRyB8 zj#Nlbqthidz8Z-rNuuG1D3Jkv=)TmG>nRjAJBdiFFJqL&l9BV)ds0$(z-TEzZorgx z7evP@#6^4cM!8xXT`up_^_lAA>T&~6!2Y2d-|g7lT$nxS_JG^Ty$2t<*l=Rgqo+3H zLOc|rO3w!G>(Kfi-Wa@YyW_!9d=bQjhaD%6o~-`%D#l&gfW#eWKh)6NFol=@L@7@<{w?&jLA*2zBB*-$ey~||AJ@Q%<>a}Q@s8j`f z9XNn|0fEKl)t9lU0u~oyYAm8hJ(Ec!N{P7pJ`9dX6tFXtK$cK_m?SQbz~zH4D@mb+ z(geb?ydZairy$5PnZyesxeJ1}dMcFqt%|%%gDgxPW&rg-A(NS8261oi$c^K(0H*v# znwkzaVI1(Tu^wnQtSxyorg}@qzK(71aSKmY#xHE{S}18bdb1__e77nA1hADi`t7&u!?%Zj`)z6KO9|{!E|=v5EvO0AUu!;2abZ;J!%oR}qy-Fz*@Pb{W6cI&AVUi?h!Dy01 zDH`aA)9C3W&U(6VXAXTmQzFbGpwZ$*Lr2Q%{&4SSFbR(_st9-3y4p=1CYQ;>6nTIu z4Jz9=#|JL?9Y`Md;;Ge7J2T$Fm#b791N(tK;HhP91BF zIu_pAbk zS-S(1U)(zGyJgmX&TgtxtI-lx$z;Hv-Q9Ii5@oV@JcKS^Ave;6;RE;0b0zWp7#hEH zjt~*tg(&rK@%8g1Joz^3n4M9Yxc$keSe!`>p3!Pa!G>b%cH>%*z1Klykpcl7wEe`);MN3~k)xMMy( zc)oODzO*Z5Avf27F%E^};Dsq66dVdIW~+H;L%U^vW1@L~R^oe!AuC?lvGSKshgYCF zFK%3kt3F<3o|+wAdHc0R@csJS+}vybb~=ZLC&zmSdu^S%U9~!tl{$4@C?b#&Ghc#0 z&sdr_i-+9Yg9y41)rUl<`-;+bl6(S0bQcL-D9TyyD4&vr z18HC8cCGOr{p63cXECSTv1NXrzr!C9E!P3HfyYj@mUJmnqocY88qB*7Ou^dQ3lSX5 z2~#0p<~3XoNf<0nFyF8@6gHb18bcCS+SQkm{W{N#&)vK7<$LbAetUXwdUz#V-1U%g z`|qIqcXroxmm6yhIw?(H5HLGL2_)6*Vb5Nmm=IvvHZTh zvIp*49vybyeXHd!rrwo$R~X3Rtvv%6Xd9R~5_D|MT~TWHJVWZ?f+${KVeVR?mfl7 z-0piizZ~=@>l{q{-a9kXgPnVZ?u0l~8vB_cm1muL$X;%<88P>x(&_6|>FBla^}?JW zjy{0z?Lz|-6z9z#ijc=ZFx`bK(ulYaX(15n^VyhgBqed!n0;Y08H^n0!=q7^EQu5_ z2xU|VchTxuWs5P+J2o%Y9kC-EZ3%qEPIOqRTeL#av^ua!X=oAyyMZXj@hZu;S3cQv zSIdP*$4+gC(w1ob7fRwC{>Qc>Y0q6a)%EBCUe(Q~4t~*}xU;#c%C8;mR15m{K-aAA zCA`^aS&`Lvz4+c6{j0Ke?Hbc2 zd5&>-&X}SuLI$f=DBLqMV#B>-Gh&&sEEXY!$q|yXef@Sy0wmmJWQGQOp>!$R!wJxc z=yZG`0D;S)`vzpY?i7+Dd0bHtTObq&7+!2HE;|Dv2g%7n1VNb~7AZZ-S2(<|5t-HlSzI(7e0Zj5c*b;o$Y_mWDxVqO}v=Zv56rL1F*J)No^O?Ik0Y ze(0A~*2(gkBDL6PFdR<+tqZQs}x>hCBz5#V8BBqIu=AgDw{1J z2v})Mz9^K#$qrZ+8Q?34qz3u1T^NxJczcnb$d%3^Xeb^O3Q-^-5;+7egnCFkBC1va zgF$2D=yM8WLRyeOuY?;>gsJ6jXhALCyF4t_xd7Zm;N#xtgO4=zoE~W(Go?QA(Abx! z$DWFUf@pk8+@_lg59gLdJ#yjZrlx~y@4HK{?;0rF{dDvG#=`61Wi&VMPq2c3idWE> zXjy?9Uwr%Wg#F}QDRSE`paQtiioV&91f;A6gTZ$2xs!8tOLOD6EmIa~f>Odcl|C<1 zta(Ps$gHSvqn9aJK`8`2bhU&OK7~Z(feR~QQlxBbkEhZ2Nz-<6BeNs3G3Vy$=i=*u zP(X!NIFm>&i*@zlk6i_B}5 zZR&JE#YByX|9p=6t-B>8@SXF1-+OzW=l%GjZ_nKG^k2|+$3Tnji;p~X@k@L3;>FtP z?aCs$YYT7qR-a#f7e4K-xh|aY+*DT=>e;yQE*%fc`2JSKoTVw+fw3cr^jfAZCa^X6)+U@~PL)O)#Ma@_ zk-BMk#wIB@7&nPzav9!lP!c4N$Tum(0Rxg*x0UOz#uw6zVCQt=}b3M%P<#DO}=^wwQJyno{aG_O#npRTrR>&@&f{mYzm+QQUoCqb{!~0gC_;(m9y-x@ zGIj!GjuVlcW*yquNi=Ex;gtW~ptjZngsWi%$)zqL6%*92e@$1MtIBvKBE3Y}7+ zCirS2m?6;8KiP1<=>E3G@m7(8GE$-Vc;Yk$jo3gW>%@4ol@>5WAP5zW7CYKcf$jR< z`|n>?UHKmCHSVjQzSn*G@!O9)a(t@2_srzo4}?FYQq|1xam)5XR}N_pkn1CJBW!SX z$Jx2{vj7|eQZWVCec=mNd-J)np8?;hx)|;Fd~d5vQRQ}~W1iaHDOZ?gv(8@$``_O> z+M6~Hj`lmu;nOM2#2pE>O{(_DiMk@Xf=_1Lne}RlWv*C5R__yBoFl(Er)B; zb7iDNA{O(SR6=7=frD<8RuNSpOPt4%fF5O;4w1YtkEp3T-Ij3H&euS(7L6zf8iL(# z@AO0;=vcy;pLf~jPfhmT-G1uMjkmrRqpR}PYu~^2^N-&>c;>Hrx{n{ZxN>}E1yVoJ zUVAtyDpq!{?OxtL+}!y6@7vfqHXuWN=q(qLR~9l?rg96Ffhawi_GY7C8HPNQ2?lh& zbP1u<#`fl=3XLWxy?yCM_1<2^6LP@0)j1RmpmZk3MV3epGE?NKD71?S4#`((b*+fDY-!8hMZ_H9$>L@8byigVrD+5X!j{F_-7#D>IgePfb`w5sTcy0i;qQ zwtxrr#A$XaL@UI+TE}ZdI%S;)o{cGbI%aEGQdsR0v^71Mk!tV6Q1#~RNlL>SE?CkPMOhGRU+#i zf1)ma;=~ET6DRrnNN^|}% zzVo%_9w8BSG>juMktjF{u22BBu8OA$$^t=?P~-FYjP8DGM9kyFd_t*If+lmos_vKh zcy1F3B0pgjNM)ibC~K=ZgqSbm%k&n#$xBA+rS-7;L}NWVG7c!9u%gO#mI!9+)X`BL ztxI`yOgW^}v5=#$fQuONx!;(Ih{YDu>o2@H}ZlV-Ij@E;vLpK`idVg2^v{@m5g z%Kq8)&CM@DDF< zz7YUvOcFgup8*vH-XFIjuocFj%EaXw_;LfGSF4js(l8z&2sW7!iR#deX@-O{jd=s% z5XGy(ejT=U8&t%jo+xhkB31M->GbB=97sZi!qfs&w7q47ce%x`!c-=Au(zfRhs!RHISMnfN9|AvSh9QI zLgv9kqv*u;SC3WV8{1oz-QRv-KL7J@^S4_+EiNx_?e=*0;>}M8>c&ZZBx!ZKNrN!h zUlr6=Nn?DmSR*!wtsF(hs}>uR7>FPxs>mQuC2!)gLmdrLq0GW}TS%Y9q>fFitvqDA zb&wuB(a_WIgrm+F4iTY72jy^eOt#dr2H#G3pvxCFqjZ7d6?$>m&9fsJ@PRTu!|>G0 z5yR_OED&gcud96B-Tihi#@;=!vNQqN@kw(hV{U0~on^ju8*nWQxu*;Zxi=&W8*SlY zI28@iblhz2cbOF~nxb3`?SWt{)9xyJvfG=0bou>`neDAn@3hY`xVw3U zh$x;sp*kT|#mSzWAy6=glPW$J!<_^w+;MV~Po|N;)=2ak^!Y#Uo!dvFWbrwa9G{W~^2>PbSlv zFw_i6n;ENtE0kmQlTfLbgX_!H2o*+!+ejq&GB=QB5v8RLvsxS5+A!Fyrg6BgXFMFH zU1oD_2Oa_>rEN?qA5P^%-n@5g?B?phr9PGn$9AJY5EFSmy&iOE|&5hx!Mn#pR3Uv!a@LI&J zq%tWnka{am&*A6=$YGGjYr*d3Sw-X4NMtBtGVzjDiJAl2A4e#CMmP@VhMSHjs>7(a zfdOZ=4TBRCE(-4VD*z# zNh(HQII#jfxCV$lxmp3EqFpFWQeJ%0*)b-aH-jFBrw{!67eD=9C&%mW>RMmyLSDYx zGU~E7uF&UuYdh|S;_p39XKxz!cadt_e!P%;ys$X3Siq-{dv|Z`^R4UKlv5Y=z%1+@ zT=GtEO=+`E_iR4xW&D0Bn{tHE6v&R2E?+vlus*jxT%P;=8c3=`R_hG-OECfjW_3T= zPg;3$4PWj9S6>^G2@3&;uJJi?0}p3i9MniPY$iYC)9}bDkya|#i?t@ToG;`^)uxmI z_r0}0E>h|e=s%+8UN_JTpo+*G3h7{s9Xe-iEtO5{vi|YWHI_Q?EVjRYlif4BmPgLQ z>T!jYoavH1)lq^(R`gDUeRmf<6J}Q%sy+8#&5c}n90w1sFlQL~>ae)GKeyxYhIQVW zM5?;lHjhcXY#3p4MYAsa-w~oxgIO5BaN_eDxeR=1;b>>`X#JL;rw2bO&{FOs8Plo+ zQf{0b0zk%MrkFMoAZ0k}Ds99>YV=Jp#5QhU@|=Xv#ZCq6h2Y}~v{g#uI38C9Gd+1? zxC$ZScSMNwpty_b@C;hpXh@0M=J8`B9OfFjCdJSc8gY=zSpDkxRd9?B{(kH~FyAZa z!!CB+(RY0VCw3lF*H)a+s_dP(yBjiYQM>0QWGBDP}(GzkarYYh?rCjHA)TGl%*)e+wBj>RD%j*Ti5V^v7;VTf=%W*<20$3;%Gm3?J zlOzc}HNCV-5@Z{VVu{9@F&K@Sq!{pE=*Y_50dQfE$BVg|02I@OZaG4kq+6^}6Odaa z;N4*RBDFMvo&-k$qj$7n?r~y(mA!#SccmE@n(Z@r?@Sh0#npd5V}rQr{*_Z;0anYb zz`~^xqOu~w6uZvjY_q#uEr4TE@DPxe!51$Be#O$n3)lA2X5IF}@-CCWX`*d57l!%R z;Z1A22%siHc0@IfaJ7wULx_KQe-0;v-TgX#Eza%iPWOA+R(gyinvjheG#rglD}f(Z z3%R(^3`%2wi@24S(aVw?+yN%ORm_)T+8GQ&#w!ScAhd{^xTFlqd>kbqtJ4}S2Eu9) zHPqF&wY56f$(V^SokFQ1p{VxK9@-YxnE@;UGI@0sTW8mP9~wi;iw|FTcyS%OpWDha zB!GE}Lbj|Mt%P_5DQ1P#h&Nt)6 zYsY*Wlq$jrue!#I`b1^n@eAu%7ykc^KexUDqw85c*)LNf<(4b8e6%5CVw7xf)$xg> z*raWeD0qA$8NgeYsPr5XnuBhAl2m3C3Tu$i5M;Qi0Dw7Z4`kr zK!kKcI^5`V=&EaR--XjUGt{nXY&Is%i~`fK%C3X!7w)*w*N2}5O!7PSE8BU*qzLzm z9yTy#7d(}{$fQeYJThz@3r4@(_tSxYaOTE~*AMbuzc;R<<90e89S*r@q_1=~)la(+ zEhBjy1Z437yYMX*3+osDTd%;wtv-A1Szl&I4q(PF zEfa`Qs?cOepMfMn`inQ25X%Y`G9^c*!iZ|fOrsnrsy_+8CQ_?J^}@Qk`V&esWcS0T z?}tq7zBe1wRm3u|$%YjJCjVH|9;3N5W-KqF8E42=ZL`tE!E6Yj>*y#N ztOxrnIe#7d1ylY1H~-jeET0hs<)NYB3<->wMr?v`a!^22srWeQZiyRmb9_~VgtnxZ zBr{grX2{mH@*vO`$tGEHIO*o|2m&U-=gZ|nk@}=sWR#?o1kQS6Rr6_J>RN3;*PzMc zBv6-z;+amD_QO>FJ?>bWM}3RzT3*Lz%j-{%tx;YxlP(YX(_z*@+lpx_no6g!^|7(t zo9ve^!?7*w>ZZf~)w184&Fa9i)PaXVowqyB!=WO4s2U& z`}haeZNGrI%5t+oc4C7%nM}q<1xm<^LtwEd5X1$57p=~iK;qZCwI;ci-^7>mtuk&V z7AwRJG6fF0M#fVK|EZ!VJQA7~*Am03C)o*upc9*$){oeO`@BQBUy>GX__i|0_qxU@A z`VA0BaO}62r>6&Z{`>$Zf~e+$r#`s&q;ws83sR6>2q=JcnBErJd+5`V>8+qI`1Dl_ z*f{||eGb%Xgsqhn-L_CnHaWyFU0(N^YZFz~xnwL((xgBg6131ZdDYSl70^e|Z~^jp8?99- zf+o;bJW~dCD#F$h5hBYll9cI)w3yu^67@um1tC;_QQQT8bOYyu(^yQQB0NG5>GIQC zPle|QpnLVzPjSd)tqT9Q6S7TL@bj8eeFwdl;kI#8rmG>0H)Q1Re7XuZcm@DCo(4-) zj~klk^ma<#uG89G-MDeH1!KQC4FUD&;%Dp?JHEH)XYoB;e?Y|w1?$mx0Y#^vrX?fQ zqtl!*Xu9KTa-8C66UjQsg?C-g4=l?9&M7uW>frFAgi2F-(J;6HYLNs{iKeJYSgK9T zr8&iBscd$mg9nth#gMSt9-+@-G+Cmz;TXmYls`pZ%h~y@5ASUpD(BB{?BLwo)vkxQ z{~c{-YZvRC(#}XL4@2>2Yk4+Z{sKMp;mLPj;_rTIhz$nA<8XdJ8Bn^l9$qe|wKZ*M zr)Y?T(sp?YY4*jR&qA*K=dZv1_1Di2W9#AGANT_&9{xUB9FEa2D;Rn^D&PT(SI`_T zHwRsKZK@c>u2E#hT}PFfE5y=dJgz25lgiJefdM~=dr{z%JV;$?u5*CIL%bP=}=a;KtaNQrRra(og&9(q=AOpm(RTIf#V|n{_C_%XR z`Qbl*{R`#Sqn|GR{tGQ;5cB>T+WP@8eu3Yz{Y-O4xs;NQPnuA1Lh(UyLScZ?qQOwa z0OfT)Miyz3)77F%(O?+T_sbt};bLcGGDFtfT6E1_JCKFSmu+1pR;XDd5=H&d&L zcqs*s5a2dyVZHZYxI8*IJy?Eq=ZQC-1E+rZ%iS-ukIPsq*gLrU^_^YxnqS3XLR!)< zjR1|SRqICS?6Z$P`R0@BH~L$3q z7r*`S=bx`09o@e5+fP3}5N`eFyN@5e6>c8eKmBD|w2;NP$X{Zr8Rf50vTWwlF(xnB zU~CD#kkl#FQuRE*zI{lMvo@ut^-!PndX7}{r`J#6M}Toh_aw@% zzVPaE&mBGYI5a3vUHlztxc_+Y=--V$fsrJ4ep)WnisfZvYt#vXdL6<(I$bmisv-%D zO`uoOR%C@UY+)eKFGb^AnpTTNGuE`oLDW!r-t&??i;@h-`f`QGu5-`lG8y!gAF4DjR0kRMZr@qvaw;kP1C+&JGI({(3E~=Skg5 z+l(=;*n%2Mb_oflPMh5Ugn-#ZHkFOU3n9$0IjK`ACjmQXGaKY z?%etiQT62)UmxGRQG&TwtDS1Kj7Je+?8x+9Tn|rk2aOq>P6q)&38~61Dwp^k{;)O* zJ=(}&=JAcncCNjJ(q65f(`q%xA32bghorz0w{hKHeeRxVFYxE>7cMU^PoKb%W9!XO zzx>6szug{>Ay=llZ9bXL<5n}Vg22rXttxSvXdVs@JsIu-`&)a=O}WRIeOAR@%9G<+1wx4{g0&-FdHH#U3>@gCLH<8@TB09RkftxYpb7 zgIDWULwRX+jz;BNVN%KE_P6KPM-V9ew@y!C?>Of%_USGU4(@)t7#}T;76%6t*!x%C zo_~D6T#HeraN>K>sMl*-oJw9Jl>##`b(0Vj+6Kz2a*QWwb<7E~cH50c3rT{@>*z2{ z32KVryL8BNVwwp@Nlm_zVY8vZ1L{Yt?>iPhvCzC#^H$EvIreV`r@vof>p$LUVAe}( zRc^pi{mzbtG;F<&Q-1{M>mztD?S2ptRdBiZyc)p3N4K164UoBAV-P1rH}{BoW@BHh z?>!x^{mnW?=P8r9+^~Eg%OYF{;TkuWFQKb;arVbwf#tdjkW<{@?Ci&jPY)Kcq9CPa zi8c~5j`P4srf6N^lAK&Y=4Oz#>n16~hO(P#Mp>av%5BC-n+j!CKzD$++Cgh>D{SRyg&fsye_-o(ZbBW_DpxnJS7GpF$SF{| zz&pR#`Mrf_xe%OUVaRhCugtu>D?(az72qkNBO8GuRhQ;_Jm#T_Vou!KM1?a zH;}Z!WLJ~c)L#r2i_vl<24F1Ce|qEHJ70h}L_=G@-$DK{*N|?n=N{X{E8&lBJ-WO3 zc7N+JaZ7_veib;Hd!<&p;y7Zb-`^CsH|n_^dMTx2e32K}!R{H+()Oe}DPz-F6(zYjeEdl^$kpscN30$e0ZZPK^~6 z@a)21aOfL4MX8(_L);S?FwL1ODAyz!t>e1dmQB_~>PrhsC6$%3y`CZa_C!HYlj-O7 zYAlE_ZwklO3be)u*wn%}-%o%1;KOG>Z2-A%0LXTyqsAy0l}BQ~ysm9uspFZ7^7>Wa zeHf(y=5WzR>)Ya^b@3r>Tg-@&TneEb7&2$F?ztd$!MF?VcLq``uvZ}K%uAEWVm=8-l#|wg*Yin&Mq?daA$SEE=()TW4@=J343&{s#}A~& zy*URCu(wlNIhug%?Y9r#ci$gU9pz58+YW^{pz9 zZY7Gl;GH&iVC$Rv;W$jhii(&KuB2Fz&y3g&tlUOve}6yq{A99x#-A0erWN?hMH2=O zg_kWi7nUx&NYavwKpFn{`?E3K_GDfj$B9^)_GgwW@LW3S@j8*`p?5L~({!i&6CNl3^kx<8S0G8(+tSyD5tbJO3o%3)HK>lmIX^kWjQ-;rVwesJ@p$Sr{@hV z%UL!cL74`~C*M6EPMzbDgj5SI`8q~-Yfwf$(g(5i8s3lYzo*sj7eVH?MlH+Hq$^@) zd*oo-`?>u(n9pr7ThHw5Z{*P2WP%Epn7zzC5Z;-%!D0f7p9PDtbvTADbul*l1@8Dj zE)-0@?dyUfnZ7Rw7G&y#AMd$QQgta$p4f8CZO0-M4JEUU!c(!yH3x<+I|2h>i^i@+ zT+e_JA!ubN5*2xp(IpTWN=%_BHu1_N5*bj2HJ{AUNvCzfn5SV4{YFsr8h!9aD25!I zfB4~#A0Hq0!)~_Px`&X|Z(Q;EAl2AXsfM=t{^pKWeOj!C>WY@^^u6`|_KV`))4e(l zoXPPWB#C^*scndxn|pxwM7GNoC`rhyIlW%n_bssOiohEK|73h}B+H9yNnQ!8=FIYK zo}keY>bgwNx5m~$kldcsW?WTJx?IJkRLbNqx}i&OwhrqVOhDe|i*YVmk-PFtYQqtN zsRY(jN>>$BicC^0#8Ujk=sv>}1S`D~N#IXuUYX2&WNniMme+Dtojzn{%ex=k`gU=1 zQR7>UPNy^ywd*Sw{Q7IJq1_M*I#Iy;I$lq|TGcQKM%+U6w!B%@#Lj#)8cCyQB1ejQ z<0=pyA=MngBL?Tx_xBPKV@$?lO844|tEdJ#95GGokSom0@R^4q9CKkjwr$oX;+*Tc zV>Vjwe2Dv8zg|=2gE+Mq>V7e z9;mk4uf5ja-0EW(hSq3iOrGjA_@**rT$s~2i$z~T`8hdku`i&%`+tj|qYpaNs3l z(WE3PNLyzzqY|?w*-Yk(BnS+l;M+akgwiz&}U@7Q?QWT}=tit7$bRukmqULN90$RTVB`UY5qa*s9xR2McYi$hM&i~E4prOsJ0oRM5gWb! zQ1qOtr-hFfR2!YGRJDUlESQc&shUf?oQXV?%E61XsZ=(D>ZVuQ$Pl@DCYS3}x*&^y zJM3PD@dR`XFrEk5`Qga{R2*cIv7&83m>HB2c6}FJANu7&P#6pONbwbD9nxxEiI{Vl z7u+_vM)6UBJ0y}S?z#}|mG31xwKz`?j_4Pw@qY7=BM-ELyZUMi9&72RN?G;3QrPtGA&PU8d(y&|)MAk$G2 zF+6AH2b4AI>M=3~^(Xq~6nM;J3^NU4#U#yBf=#FbB(Efcep*U5Q{z;HsnF0&xILnB z)?9{Kai1NHY8`3R8_mzgH-jTopT219=YVIuvM81T@uNBb3_)2AacXTlA?|fHLt0Y| z|1&4To!7TSQ9~gohkvqr8{sUpST04Z3E?MSsjv)~;l%SE%AMB))~o3 z)-nhtN)A!?Ry~2|^dzmsd$qZg%T+p(S7n_lsLhc!!jPY^bej}wUO9s?{<#_gyfu@Ev4vb2#*4xizdX^%)ETV1Nvd5URB1~u=FVlvTqmOFeA;7=$78UE*t=saXJmvTiqJp-X$Z}_G!jXXl|a!J2(nN{ZxIp&$jvK3 z;@@(d1q$v<#(BRM?|Uh~`ugQ-WGQePH-yKC`)cBbLf^fISFq3S zKKb~=4-wF-xI`MU_3CgJsn2;9l{m9`KA?=uuAhGWvic(eA{ z4`~#gyc>2eEpbw8IVmg==ii33iFD+kj^Yg`d3-Q#vqmp}e0=(H_@SvDpK*!$c>nNz zh8t7-^2he~@1MVTzxd?-^ZK(-KF2L+K8nAEJ|jgTPi1$X!kb@bHU56T5I%#fe+YxN ze}fpf`?`4fVe`@)p0^vm(S?l*Nc1>rV;*pQAlqKjJ~}=cGdbp4pYt>NuH3f?lZb%J z3e^rg{5tCZRIa%~A{qg|47p`BCO^PkhIAV!b;IiESt99qq_zzp4`Fn9x+I zBp)J%|3%MQVoz1N9iH|?NGqt`pi0c2t2CyjE{`v@c}e;G;qcStx9#)OSMMG_e;41Y z_?!aJx8HvI?pG-IVfUZjEr6Gsqd(l;;u_Fv4Bvc%d&OsqmaXa)%H-@NTiLJc`h^yX z zdbo>>9#4yxG~&4x-VdcTG>^dWq?!z+~;V$UdQhi!-Wo9r-Q9N z|K=NP$SRHs1XK|(zomaMvVy;bXh;#`Js$4{{v)cqyEvoHw#p{)x^MRVunsPlEZ@E9 zFYA-s zxqI)E?-n7i@38^cgazBb&$zCSXoK*jpab>QT)(qU@V(=;y3OG+0&9J{2~IX|hm(b2=8MRE(@kPy}(F2o+qxmnupIO&6Evx6WT-~E0yI-hH7RWf#`y1iEx%+g9`5Vta1?`uUU<6zYuh;Qnw5~VD zto}61ZeB+&*%U>gNfbp$sFR6`JO#aXsdYq-Y3}n&yM7Z`+f`MUKW%@!ti^K@#UVJG zd$TJ@KTW}Cn>_EDB%=92g^=?#@x2(V?a{bJt(hvK8|l!YCqxG+o#*SOqq2n`8_C#^ z4;>_iYzXHdSUf-k8$u!~j`N(YNKKE$0v*5S<3qDRN2wL5&ft9CKeZ5|dgpws_GGhEoPujA;bY?P0 zZOf`I?(He@e!n3ifBW@f{vaML1N{f7;7 ziu|@ui+gm|85j+;48AnRtTg-zS3hK*!o#nYEite>Rtwt^)DZ}@i6V68*a|$x<0BDV zD99Y8)(Xb#X~LXVhQO?)v>;4*-(8}^o^>6rR$O+;d8kTBtvh(p(H(JvGlw~Q+*Kay zpAtnFE=CM*8YKNKKSM1g`nKiL#g(?2vVv4mCq_x7c0nPuP$rzfJkljq|3+{wMhA^0 zR+54ja3C)!jpGP@B~QasTGUH$-!1GOfnkM5gkiDct3lu5qBbToc(2G_zvfuP$7MHT zQDC6Q@vCKS!U)WAVxfGeDMElF6s5C>b!j347WJPp4L##x_sSWaw>=dHvAUeeFD*#TiIR&DsL3(3mJNkic0@xc9BCP=5XOSI@|Z z1-2^QC4PhylQ=sG`=)l0<~(4#4v;J3P}2B4BE;?U{%i)FwrpnE^-Z%oWV4$NIiZTU z&@wtp>ilk3@`85~6`Y0?Zo@sEqi*DS7+f@~s9 zSLGu2uvK=2-7Uk6C+y)qzQ5@}nSxFNpbL@RUBMO}ixH>Tg48uiC8|`jF%#p81veQ= zly8F*mhhd=&zr;g@K?8f6ts|?SNxE_iTB^x}tLrx2Nn-?w(nPdEFv24!tyhdyL9o1I!nP5lXY@qM1v-L~$JEeG z_gEYe#4cOvk*mm#6tEV-$5kf8Gr}8|&8DW=JexV2R|Vb!cDPn+3X3?BxQpllT}LDV z+xf^Wfy;6ygya(LzzK&mWm)UhroXh@SImMK3N5yWzKPQ`MGbS8&BTo6Rz~5lgYwP3 zP>uI|Pu>hxczy3zm2$9i6GdZV?d)bpgo`&X#p8bDD}0 zGTU)!Rx6#+Xk71_$RNwnhlSV=={@?IMdmQTOtGl-t#FA;TECjjU_ig5bOWOnLuO`C zj%@Ka>f1;(xnG*`nM#KjmTJ9pFG-Mur*HTBU@hl)*Qtn%p;f!C+%9rZU&pB36Qhl2 z0LQJX)=zBvsqfOOHWdCK0&~s`X4+a zNklAA1HZ9x1a~BoX4y*>(He=2KESRI_Bv zO}HKBfU|~1NdihUCL=0k#h8%77zwQrl12#Q{_^JW(F@;oETrH-dx|2%(abf%0mW$M zGF|6P(wX=rE%oc6iCWo`wr3Z;-66M`XrevpqH7eWmO5(k#L3x6m^)4qE*C@z)$WxF z%0pkfrh&q=*4oqy5W{FJ*Tii^Gl6glYr2S*)gb z^9Ehqju71?)@5)}Btw#%#!)6nZcY(!KRD?GZ)~^gd7CUxwP;W-tO|m(QahFo+olkj zGCI{=!nt%aBP6MsCK+r;c+dY7jhqT*g{sf9OXjP)LzZMNrU?AmOok{)4T)@`fHPF8 zYV+0-+aby<#Lh|0C>BfQ0=-t0E72ttv?2#Z$@KVo0|dptBKom z3v>mj3&VgIcXeKGY-ArdDSJIV?6g2^iZ%4%VmXac@YcUt}E&?3f9K0 z9+)o-D-v9cT(8O7OUS2Z&fvD?`w=rFIr0@!YQ~hj(km5AU#FD`-cHlR6pAJrN#1*} z+JZo77-Viz3k2%SVodA*pwF++VgEq~?8UHjvK%5n7q8GU0ev;L%(g%WLipA?Bw4Hw z!pm@GA_igJiefrmwn?8Scz<7YjdHFT1|^ZdRYzhwN$%s!nXaj>lhKTkP50uNKUz`_ z{!H?<8-jl>wpCP_2X@s7ul=1t+Qi0B*20g5GBHuRH11wN-58oBMIr))cUG#Uw0d2u z`zJ!pe1nb+_&;>0Hvlh~WdzNH0eT^D{vYW2)$6zE3=|&FC36;D4?#+8*sSR^P7~fI z_+~>o%D_4#@y?E_^IWw|AQ)}L3f4NP6D2L3qeA6L6wQ-zqvs;`)+V$kz6x8Nzljo= zEv2uGI;;Soslz8{ji84Uu~NA#lzD|%55#d5>;gTYRj>}wDOU84*TW3_sBf;>40#I~ zeT7~da9A(U$zqNWetQMS3!J~117wY-SdPGX#0VD#w?KEV(5ap*1y$hfA|6kHB|t8^ zG4Y)ejZa#pFmG#_pbj#j?K}x8xe!80F+=6mx!JS{8wgvIzf__p8koI;Ac8$$)sn5Q z8nhzwJn67xcq5pCi-oW~bd$9qEHyk7sC5pkkEH$=Ix_~CF$qSqKwr$*8pyF^0u!&$ zg?)uyup3^Oi(a8Kz1Yp`k#r2uCH}qtpo1J@C{ryF3?$__SN_~C#jYPJB?wb3!3>O9 s>SmKTFz#xX%2+j_#T4;&ea^-9Z+8#z7el$et^fc407*qoM6N<$f?|Tx;{X5v literal 0 HcmV?d00001 diff --git a/vendor/github.com/go-playground/locales/rules.go b/vendor/github.com/go-playground/locales/rules.go new file mode 100644 index 00000000000..92029001496 --- /dev/null +++ b/vendor/github.com/go-playground/locales/rules.go @@ -0,0 +1,293 @@ +package locales + +import ( + "strconv" + "time" + + "github.com/go-playground/locales/currency" +) + +// // ErrBadNumberValue is returned when the number passed for +// // plural rule determination cannot be parsed +// type ErrBadNumberValue struct { +// NumberValue string +// InnerError error +// } + +// // Error returns ErrBadNumberValue error string +// func (e *ErrBadNumberValue) Error() string { +// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError) +// } + +// var _ error = new(ErrBadNumberValue) + +// PluralRule denotes the type of plural rules +type PluralRule int + +// PluralRule's +const ( + PluralRuleUnknown PluralRule = iota + PluralRuleZero // zero + PluralRuleOne // one - singular + PluralRuleTwo // two - dual + PluralRuleFew // few - paucal + PluralRuleMany // many - also used for fractions if they have a separate class + PluralRuleOther // other - required—general plural form—also used if the language only has a single form +) + +const ( + pluralsString = "UnknownZeroOneTwoFewManyOther" +) + +// Translator encapsulates an instance of a locale +// NOTE: some values are returned as a []byte just in case the caller +// wishes to add more and can help avoid allocations; otherwise just cast as string +type Translator interface { + + // The following Functions are for overriding, debugging or developing + // with a Translator Locale + + // Locale returns the string value of the translator + Locale() string + + // returns an array of cardinal plural rules associated + // with this translator + PluralsCardinal() []PluralRule + + // returns an array of ordinal plural rules associated + // with this translator + PluralsOrdinal() []PluralRule + + // returns an array of range plural rules associated + // with this translator + PluralsRange() []PluralRule + + // returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale + CardinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale + OrdinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale + RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule + + // returns the locales abbreviated month given the 'month' provided + MonthAbbreviated(month time.Month) string + + // returns the locales abbreviated months + MonthsAbbreviated() []string + + // returns the locales narrow month given the 'month' provided + MonthNarrow(month time.Month) string + + // returns the locales narrow months + MonthsNarrow() []string + + // returns the locales wide month given the 'month' provided + MonthWide(month time.Month) string + + // returns the locales wide months + MonthsWide() []string + + // returns the locales abbreviated weekday given the 'weekday' provided + WeekdayAbbreviated(weekday time.Weekday) string + + // returns the locales abbreviated weekdays + WeekdaysAbbreviated() []string + + // returns the locales narrow weekday given the 'weekday' provided + WeekdayNarrow(weekday time.Weekday) string + + // WeekdaysNarrowreturns the locales narrow weekdays + WeekdaysNarrow() []string + + // returns the locales short weekday given the 'weekday' provided + WeekdayShort(weekday time.Weekday) string + + // returns the locales short weekdays + WeekdaysShort() []string + + // returns the locales wide weekday given the 'weekday' provided + WeekdayWide(weekday time.Weekday) string + + // returns the locales wide weekdays + WeekdaysWide() []string + + // The following Functions are common Formatting functionsfor the Translator's Locale + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + FmtNumber(num float64, v uint64) string + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + // NOTE: 'num' passed into FmtPercent is assumed to be in percent already + FmtPercent(num float64, v uint64) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + FmtCurrency(num float64, v uint64, currency currency.Type) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + // in accounting notation. + FmtAccounting(num float64, v uint64, currency currency.Type) string + + // returns the short date representation of 't' for locale + FmtDateShort(t time.Time) string + + // returns the medium date representation of 't' for locale + FmtDateMedium(t time.Time) string + + // returns the long date representation of 't' for locale + FmtDateLong(t time.Time) string + + // returns the full date representation of 't' for locale + FmtDateFull(t time.Time) string + + // returns the short time representation of 't' for locale + FmtTimeShort(t time.Time) string + + // returns the medium time representation of 't' for locale + FmtTimeMedium(t time.Time) string + + // returns the long time representation of 't' for locale + FmtTimeLong(t time.Time) string + + // returns the full time representation of 't' for locale + FmtTimeFull(t time.Time) string +} + +// String returns the string value of PluralRule +func (p PluralRule) String() string { + + switch p { + case PluralRuleZero: + return pluralsString[7:11] + case PluralRuleOne: + return pluralsString[11:14] + case PluralRuleTwo: + return pluralsString[14:17] + case PluralRuleFew: + return pluralsString[17:20] + case PluralRuleMany: + return pluralsString[20:24] + case PluralRuleOther: + return pluralsString[24:] + default: + return pluralsString[:7] + } +} + +// +// Precision Notes: +// +// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh +// +// v := float64(3.141) +// i := float64(int64(v)) +// +// fmt.Println(v - i) +// +// or +// +// s := strconv.FormatFloat(v-i, 'f', -1, 64) +// fmt.Println(s) +// +// these will not print what you'd expect: 0.14100000000000001 +// and so this library requires a precision to be specified, or +// inaccurate plural rules could be applied. +// +// +// +// n - absolute value of the source number (integer and decimals). +// i - integer digits of n. +// v - number of visible fraction digits in n, with trailing zeros. +// w - number of visible fraction digits in n, without trailing zeros. +// f - visible fractional digits in n, with trailing zeros. +// t - visible fractional digits in n, without trailing zeros. +// +// +// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above. +// +// n := math.Abs(num) +// i := int64(n) +// v := v +// +// +// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64 +// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// +// +// +// General Inclusion Rules +// - v will always be available inherently +// - all require n +// - w requires i +// + +// W returns the number of visible fraction digits in N, without trailing zeros. +func W(n float64, v uint64) (w int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then w will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + w = int64(len(s[:end])) + } + + return +} + +// F returns the visible fractional digits in N, with trailing zeros. +func F(n float64, v uint64) (f int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then f will be zero + // otherwise need to parse + if len(s) != 1 { + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + f, _ = strconv.ParseInt(s[2:], 10, 64) + } + + return +} + +// T returns the visible fractional digits in N, without trailing zeros. +func T(n float64, v uint64) (t int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then t will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + t, _ = strconv.ParseInt(s[:end], 10, 64) + } + + return +} diff --git a/vendor/github.com/go-playground/universal-translator/.gitignore b/vendor/github.com/go-playground/universal-translator/.gitignore new file mode 100644 index 00000000000..bc4e07f34ec --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.coverprofile \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/.travis.yml b/vendor/github.com/go-playground/universal-translator/.travis.yml new file mode 100644 index 00000000000..39b8b923e4b --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/.travis.yml @@ -0,0 +1,27 @@ +language: go +go: + - 1.13.4 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + [ $TRAVIS_GO_VERSION = 1.13.4 ] && + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/LICENSE b/vendor/github.com/go-playground/universal-translator/LICENSE new file mode 100644 index 00000000000..8d8aba15bab --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-playground/universal-translator/Makefile b/vendor/github.com/go-playground/universal-translator/Makefile new file mode 100644 index 00000000000..ec3455bd59c --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/Makefile @@ -0,0 +1,18 @@ +GOCMD=GO111MODULE=on go + +linters-install: + @golangci-lint --version >/dev/null 2>&1 || { \ + echo "installing linting tools..."; \ + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \ + } + +lint: linters-install + golangci-lint run + +test: + $(GOCMD) test -cover -race ./... + +bench: + $(GOCMD) test -bench=. -benchmem ./... + +.PHONY: test lint linters-install \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md new file mode 100644 index 00000000000..46dec6d2b23 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/README.md @@ -0,0 +1,89 @@ +## universal-translator +![Project status](https://img.shields.io/badge/version-0.18.0-green.svg) +[![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator) +[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator) +[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules + +Why another i18n library? +-------------------------- +Because none of the plural rules seem to be correct out there, including the previous implementation of this package, +so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package +is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for +use in your applications. + +Features +-------- +- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) +- [x] Support loading translations from files +- [x] Exporting translations to file(s), mainly for getting them professionally translated +- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated +- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1) + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/universal-translator +``` + +Usage & Documentation +------- + +Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs + +##### Examples: + +- [Basic](https://github.com/go-playground/universal-translator/tree/master/_examples/basic) +- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-no-files) +- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-with-files) + +File formatting +-------------- +All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained within the same file(s); +they are only separated for easy viewing. + +##### Examples: + +- [Formats](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats) + +##### Basic Makeup +NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats) +```json +{ + "locale": "en", + "key": "days-left", + "trans": "You have {0} day left.", + "type": "Cardinal", + "rule": "One", + "override": false +} +``` +|Field|Description| +|---|---| +|locale|The locale for which the translation is for.| +|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.| +|trans|The actual translation text.| +|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)| +|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)| +|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.| + +Help With Tests +--------------- +To anyone interesting in helping or contributing, I sure could use some help creating tests for each language. +Please see issue [here](https://github.com/go-playground/locales/issues/1) for details. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/github.com/go-playground/universal-translator/errors.go b/vendor/github.com/go-playground/universal-translator/errors.go new file mode 100644 index 00000000000..38b163b6269 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/errors.go @@ -0,0 +1,148 @@ +package ut + +import ( + "errors" + "fmt" + + "github.com/go-playground/locales" +) + +var ( + // ErrUnknowTranslation indicates the translation could not be found + ErrUnknowTranslation = errors.New("Unknown Translation") +) + +var _ error = new(ErrConflictingTranslation) +var _ error = new(ErrRangeTranslation) +var _ error = new(ErrOrdinalTranslation) +var _ error = new(ErrCardinalTranslation) +var _ error = new(ErrMissingPluralTranslation) +var _ error = new(ErrExistingTranslator) + +// ErrExistingTranslator is the error representing a conflicting translator +type ErrExistingTranslator struct { + locale string +} + +// Error returns ErrExistingTranslator's internal error text +func (e *ErrExistingTranslator) Error() string { + return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale) +} + +// ErrConflictingTranslation is the error representing a conflicting translation +type ErrConflictingTranslation struct { + locale string + key interface{} + rule locales.PluralRule + text string +} + +// Error returns ErrConflictingTranslation's internal error text +func (e *ErrConflictingTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) + } + + return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) +} + +// ErrRangeTranslation is the error representing a range translation error +type ErrRangeTranslation struct { + text string +} + +// Error returns ErrRangeTranslation's internal error text +func (e *ErrRangeTranslation) Error() string { + return e.text +} + +// ErrOrdinalTranslation is the error representing an ordinal translation error +type ErrOrdinalTranslation struct { + text string +} + +// Error returns ErrOrdinalTranslation's internal error text +func (e *ErrOrdinalTranslation) Error() string { + return e.text +} + +// ErrCardinalTranslation is the error representing a cardinal translation error +type ErrCardinalTranslation struct { + text string +} + +// Error returns ErrCardinalTranslation's internal error text +func (e *ErrCardinalTranslation) Error() string { + return e.text +} + +// ErrMissingPluralTranslation is the error signifying a missing translation given +// the locales plural rules. +type ErrMissingPluralTranslation struct { + locale string + key interface{} + rule locales.PluralRule + translationType string +} + +// Error returns ErrMissingPluralTranslation's internal error text +func (e *ErrMissingPluralTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale) + } + + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale) +} + +// ErrMissingBracket is the error representing a missing bracket in a translation +// eg. This is a {0 <-- missing ending '}' +type ErrMissingBracket struct { + locale string + key interface{} + text string +} + +// Error returns ErrMissingBracket error message +func (e *ErrMissingBracket) Error() string { + return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text) +} + +// ErrBadParamSyntax is the error representing a bad parameter definition in a translation +// eg. This is a {must-be-int} +type ErrBadParamSyntax struct { + locale string + param string + key interface{} + text string +} + +// Error returns ErrBadParamSyntax error message +func (e *ErrBadParamSyntax) Error() string { + return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text) +} + +// import/export errors + +// ErrMissingLocale is the error representing an expected locale that could +// not be found aka locale not registered with the UniversalTranslator Instance +type ErrMissingLocale struct { + locale string +} + +// Error returns ErrMissingLocale's internal error text +func (e *ErrMissingLocale) Error() string { + return fmt.Sprintf("error: locale '%s' not registered.", e.locale) +} + +// ErrBadPluralDefinition is the error representing an incorrect plural definition +// usually found within translations defined within files during the import process. +type ErrBadPluralDefinition struct { + tl translation +} + +// Error returns ErrBadPluralDefinition's internal error text +func (e *ErrBadPluralDefinition) Error() string { + return fmt.Sprintf("error: bad plural definition '%#v'", e.tl) +} diff --git a/vendor/github.com/go-playground/universal-translator/go.mod b/vendor/github.com/go-playground/universal-translator/go.mod new file mode 100644 index 00000000000..9d086000ce4 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/go.mod @@ -0,0 +1,5 @@ +module github.com/go-playground/universal-translator + +go 1.13 + +require github.com/go-playground/locales v0.14.0 diff --git a/vendor/github.com/go-playground/universal-translator/go.sum b/vendor/github.com/go-playground/universal-translator/go.sum new file mode 100644 index 00000000000..20667ea9848 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/go.sum @@ -0,0 +1,7 @@ +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go new file mode 100644 index 00000000000..1216f192372 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/import_export.go @@ -0,0 +1,276 @@ +package ut + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "io" + + "github.com/go-playground/locales" +) + +type translation struct { + Locale string `json:"locale"` + Key interface{} `json:"key"` // either string or integer + Translation string `json:"trans"` + PluralType string `json:"type,omitempty"` + PluralRule string `json:"rule,omitempty"` + OverrideExisting bool `json:"override,omitempty"` +} + +const ( + cardinalType = "Cardinal" + ordinalType = "Ordinal" + rangeType = "Range" +) + +// ImportExportFormat is the format of the file import or export +type ImportExportFormat uint8 + +// supported Export Formats +const ( + FormatJSON ImportExportFormat = iota +) + +// Export writes the translations out to a file on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error { + + _, err := os.Stat(dirname) + fmt.Println(dirname, err, os.IsNotExist(err)) + if err != nil { + + if !os.IsNotExist(err) { + return err + } + + if err = os.MkdirAll(dirname, 0744); err != nil { + return err + } + } + + // build up translations + var trans []translation + var b []byte + var ext string + + for _, locale := range t.translators { + + for k, v := range locale.(*translator).translations { + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k, + Translation: v.text, + }) + } + + for k, pluralTrans := range locale.(*translator).cardinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: cardinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).ordinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: ordinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).rangeTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: rangeType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + switch format { + case FormatJSON: + b, err = json.MarshalIndent(trans, "", " ") + ext = ".json" + } + + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) + if err != nil { + return err + } + + trans = trans[0:0] + } + + return nil +} + +// Import reads the translations out of a file or directory on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error { + + fi, err := os.Stat(dirnameOrFilename) + if err != nil { + return err + } + + processFn := func(filename string) error { + + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + + return t.ImportByReader(format, f) + } + + if !fi.IsDir() { + return processFn(dirnameOrFilename) + } + + // recursively go through directory + walker := func(path string, info os.FileInfo, err error) error { + + if info.IsDir() { + return nil + } + + switch format { + case FormatJSON: + // skip non JSON files + if filepath.Ext(info.Name()) != ".json" { + return nil + } + } + + return processFn(path) + } + + return filepath.Walk(dirnameOrFilename, walker) +} + +// ImportByReader imports the the translations found within the contents read from the supplied reader. +// +// NOTE: generally used when assets have been embedded into the binary and are already in memory. +func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error { + + b, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + var trans []translation + + switch format { + case FormatJSON: + err = json.Unmarshal(b, &trans) + } + + if err != nil { + return err + } + + for _, tl := range trans { + + locale, found := t.FindTranslator(tl.Locale) + if !found { + return &ErrMissingLocale{locale: tl.Locale} + } + + pr := stringToPR(tl.PluralRule) + + if pr == locales.PluralRuleUnknown { + + err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting) + if err != nil { + return err + } + + continue + } + + switch tl.PluralType { + case cardinalType: + err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case ordinalType: + err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case rangeType: + err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting) + default: + return &ErrBadPluralDefinition{tl: tl} + } + + if err != nil { + return err + } + } + + return nil +} + +func stringToPR(s string) locales.PluralRule { + + switch s { + case "Zero": + return locales.PluralRuleZero + case "One": + return locales.PluralRuleOne + case "Two": + return locales.PluralRuleTwo + case "Few": + return locales.PluralRuleFew + case "Many": + return locales.PluralRuleMany + case "Other": + return locales.PluralRuleOther + default: + return locales.PluralRuleUnknown + } + +} diff --git a/vendor/github.com/go-playground/universal-translator/logo.png b/vendor/github.com/go-playground/universal-translator/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..a37aa8c0cd0f6e1b98e0be3eb2531ebc6ac6717b GIT binary patch literal 16598 zcmV(yK*U_<=i=1Q$^7;0v#+eDrK0%p z>%6(N%f`XBv#;pk-N3xJlaY`9`tZ@r#>d0H&B?^Dtf>F`_3`TEoSK)csHK;dl)1LD z{xUb9o}0zMyZALSjf{%1BqI0!{{5jc`1SO`zPq8KpwB5MBz`@`DJHxnB>jUjr=XpP zhJ*ceGX0V>qah&tt}xmxDK3XPA9F;SA|8N$duRs?{%JJ*T{HG>5AcL8I*&O1Ju^KN z1^!Yp@h&apFDzno38}29au*Y6VF#%(B=Pd^`t{{04g#CPRH8PEL>89OH1Y4=wM`?Y+cnsCD|dZGb#-yb zPbYsY6v#U#TTlf1)HKmxCR88|TWkfi;WgJmEa>U!iH1CPzc%zlFUrZIxOpL50SAW2 zHrkRe-Bv8!+}rP-U23*Bp`mB-jzz%dHuKoF($TRq)t%&MEAPFIet}u?$D-;-NTfd? zqGBFudL7@{$H>RCmXS~SDk-e2cr(q3b}2D4;I^fdk8wf@!NHcex`pmIGxX}ykX2Zk zP#l(MXu)GF(Pvk zb5n$3uu@QfLrR3XE#bH{zmF;My}ImfZ|eE@n0GU+i65iC#kVv_JUkHP6q^ z=b4$1KRnawHnN9;XOVD+dTcU@PWtQI+{V7)vVb@}2{~W|d768ob{*KawLyATCMyBY zhk?3y?SQh*YZ(!U#KDNg9qaM-(p?CZPE$X(8z(y`Rk002FaNkl8uC7fajE|-h>jHG2b9lbevz(KOfu`z+X2)PND2t1w1WGFmB z;BZ@Tf}w8eW!d6lVUNJlhe<3w@VLAr5!hqYo=(frj?qrXhNe@r$z?o#)_BC$c#6Tn zw%ibJSrePzu1EE-o7FRWE@Bmr=0#t-W8=a(6GTqoYvcqTWN@=@*0!nMye8HIu2(T+Ak@9J5Z~>D99iw*`m8V%yP8G^_lKA3_-39D4+I+C1zz z%PxAIjtxqC6rEeEoK;TYRK5moFT3e)S;c473ltAKvc{hCWlVc;gKFP7(_^pP+TQSQ zjTnnuLBc{Sdb7KSnYy{T8Mi%*-`7X=bnva_VNVf!d<1rL=-JH6uuGF1?^y5f9AB6q zotFt+&fvKN9wTt?2px9TxG)4b)AV}%MO>l#`PSCbhc8CH z(ahGS3jt5QrHGrvLqpybTd3aT!_Ma3C?0!8iS}-@N4$|3Vjk~Odfkur_B2j4Q=ai9 zwCdFE-MgodJ$v@--@hOJdcD5T)wQs^+~2>veB#83lSsEgpFKBQ8LAYfrz?!j@VGrK zfD?mhnuhu zc6A*>`uqE-yqo0c(WA$Y&khU>p!`akr_C16;qx9(K3}Q48GMs(2|#W(U+C>U+gs?p z^6}jBk0WP#6@}XUG1|MFxifa;Gu`Gf@{^8r3D0vJ&-0ExB2Qql!KaO@u%q%RuxCa- z=nDq{@3OWa?o`}pF?bNzz;Q^nz?4c2d1Yzhk<6eIWaeu#=B8Cm810v^Rke)+e)?h zH``r9!+0zn64TyHj(U9NCw8Ir?OU83bm-%VH;0Wb>dWQso<4o4T$akBA7WgT3iz&_ zN#||w+mrl%lwSPt^Vg61UR6`~YFWHi_l{!JcULueSU zuU#7-U!wAd^F!yiGqsgYce}fpeBa&ns10GwH=8Hj)>BVDk~}BH;~@vhCnx{=D_1_h z9(z4@+Q`0r`z}a*NhgoKotri}dCcg8TMu47d)M~vT^slBr56_$zkgrMPE@N|O(WZ! zsLQ%0YpP1+LKrd6^35*)GUSB5)MzwrJS8#U4D>#RSz-pt?c=lJXO zeSaQ*)aC{sacalkE(o<+>1c~WPfGnKtu-k`2YJOl(N%r)u4fVPB7smYkR!~c!2?5` z13>TVn~)3sx0VVE3qQ8?KZ&MxkE4I%0~`Az9FB(`>c8OVKkXPppkEAHPbYRdx2|$6 zf*j~H;Ex?orIrKyj}eYAQ%S`h#fdpw@Cz<58T?>}2zx?ZT|%y(T{~Xpi=gkZTF;#e z3F=$F{`}PNFa>wR$HyM5Ztd#os_vSakjh6}Iy<|C2zX8|>ielr&R9gEe{Fo0|8gK| zkY8VlfcpEtiRi!U(S93Z-L~--2kINT8Vb8(?fP;$%dGar++*t=TX-4CqacXL~rY1d|=Z=r( zT@Q{j(+PIADfOvY)=!t^b(wRg^=0R7n8aR zGu)okchJ@@r~V1+Ujz8JBY#ZBac;u{>vE*5fRzYXDa*3>N)mKP)O|L;_+)+@W$#W_ z1HCSw0BPQbdWUK^n3k3~2){GaaiQou{J~fvivE1qGekZm14C^s_>uYe_~`Zew$6dh z&dz2jY$hilM;G9l1sq4)7Rn-P`H|OEqLr&S14g)DnOR!8zA!U0bMNinM7RaVpGDl; zY-jGh@Wtt|F$}V_tG&EfzS=j}cU7%lLF4S~p35Z7OJBR(*!bOV-xB?PmY6P)N+eR& zm~-CP8no)@pwxKED3P8YEr^+%?8q=F+vn#4EnG5nUJd7iSCx$h8STYo>M5XqKb%>U znOOtZDR(?fN4Uox1Ub*XkD$wZHf)e>(EIqv@I>f+HsGnMd7^oAVqjvh1vQ|K6RZ%6 zS(agDR(SBQc^!rdmzN8vK88rJqp)LqgEnqF@z805HGQJ@1$%=?VI znx@@zXBFMuwRP&sqPpVZvLw6xjS9oC{Zv}oug2jiu%}ZIcF@Ph^RCBI+pC^=W&^+h zj2nbIz;VS1L`O&K^(`$vP;`|5u!wLu!{jbqrQrBbaLVo`2m4qAM6qoVHq>>S?SyR` z*c}LXS5Tn6g}o`iI4?0VQ5zK%Rio1xlm=zWuo0G1 z^py0Jec*;-d4*BepMTH?oH9ZSY$|#H{6GG`^dbW~(Tbd;`3i5jm;5kK&YSMhLvC&3 zHVlp9eiusn*^WX_1XErv-9)w42ysdA?Q+H zW2CX6=ZBNdm?l$Xli3vMR5s}}x+#0!T+v)!UV9=H(9Y8J_>rsU{BkbR#TC3Ak&M^yN+acyB-!s$e-Ut;vL8ySlm4a z;eC8dRw94`tf=X%Uv6%$8=aRI9J<1TIMbMW6^gFu(4jHQl%}SF=r%{bpn^hWvR;$u# z?d{>=gnV=2*2E~n{(eP81u1Vp-=CRI+4rUIi{BUTb=|}MLKVTK$?cJhCIQZa#yajY zS(}2N?bT>;%*x8uf2KJB_nPh{ry9GsF6g#rFPyoz{6b}Ux%JHA;^J%)xNpWt!c`tW z{u(CNPsL)vuD!{O6hY2%>I8qZSX5O>PtGWEy~`Lmb<%8B>Xe$#4z(JbN}bN!q|z#P z103ko^(ZQx!H{Jz7!K^;PuL-zok!n77@PpuN3`=QIJaXNJwN>xw8No`D|!K~UHql+Qv5HWo# zB}cT^)aaC12QmqJ`hK3=ZTlv`6W5{ViSOGfIrW*|Np9;)2Kq```l9tRy)3$O^iF?{ zPUxJ75m{!I-KK-QaAxVc*K(Nd4R;SXqMht4{NEVX+)5Xg*@A@Hq<~LB?(fg+E!|uH zU3PZ%u1f#x_XH9a#ZAHrAeO}~uDGbkZnqajX~LVrJ32BtOdr;k&6(!Li-vb=RcYZo z`qr(9iCZ7x(X%SDkm@`-$X^FI#FKA=?VA8jd`GrF3{<|9@ts|UOIx>`TRl)x)i(4! zVqK1178248PoY+zy&b-K_%Oy7g$1LI+PVzSss7D$wDbGdl?MTRO~_3yx};tzTQz$r zSe*>=WM;>E0%_2DZ|vGpUk`K^A_F2WgkGGN_tjVSqN1X_;l#xEe>Y_`nRX?JRuvab z&g~`&C+yn9tq{;bzH_ItrY6OpN)iIAqhPS4RrcG0}H-ItzJ63yu82&@oOwU+_mf1X}n+g2RO*VZncK44<-BB z+G+Yq=F+7-dwvP<@5T-$&i;rIO9hgIw?BI0!*bRmde8z!{OebMCn_45k1IEu%%;5w zZ2pLs8`5bugFNbyN1`6vx%0719fnqA*1>c_p8opl;C9=-g_85G(`0|)akx&hT0i8E z*A@6EL~^dPMBnD8U_geo*8BDX#{gcTd}-NoZ9(W=)2}TPcDLmocn>*d*J+ov2jn5X zXFPA=Qd371{#Zkjvdg7$5|ZN*kx0rQ<9P{LJPXA|<1mAld=H zeF6pxh+y>)4x?&9&JdUXE;854k?u3tb-luOx$vDg{YKA04)&K8U7-bQ)+Wt*$g!p+ zd>R)h=2(eXDrI1_slN=}1S%ohp zYxPt}n<|U+ZLLX3DU^`n%%gKDue)jSGZ|@jXG+Vz=eq5l&YUz)X3nM`O}_;8bW6G= z_o!T>E7p}a=o+%+4G~%088Lh6<#L=(cY(ZrFR_KH+jR%FgZ+7u*Ku0ym53ZDlD%;M zDK00U$79jCFr3=+e>+Zd8Kbx^y8rpdw;+ePi0(rEcF{vXXUH9M*J=IbOtmtzv9U2p zZ8n=LDb51T5%B!EI7_u9F0HyYe&&bWyDz1GlxsT+qsKmW{8-WTmSZB|aUiV( zp-$K&$n7N5VfYthn(2a0mpq8WDj|Q##oe9cXkS151>`UX`LAYm5)73sDbrS_7t)At z*5MiTSA0^HksSw%*X)LU1ay357LSh~J$h6Zm8R3Ap?k~D-LOHIy{AFm-Px^**;QYa zRWHuV0((CzdpO8}4s*y3Xwud&^^FK$SLZ;dLr&F+Ft25_rg2cGMHE*=P(hwTxPLCH!Q#wALd+)uu>YY1x z@80P^ABRO1AI(Ks%hl*K+3~t~e6n@AtgJn{^)dCj`q*8&RP*TJAcw=-+1WIg>Og-! zi0R>uof>t-y6A!jjiN=dZvEf4lN?@2@VXN~XWf0N=0ImPpPrm-n>8vX&)n@Ol2R#B znbf1LwNQFh?-AAi|L(|>>jb9nKL5a# z9Oin*V%Hps8x@|4J7>=Pz-uLu-d02$AM6y*6+OQCyoDT5;^I)icc$llc4<5}_oyZ} z9q6GAFnMu9vAj6DbA5(iJ=DU~vNpz>?Ab;zfJ01KKV`b&+4+RMK#-yA2MrCG_ zjasUD`OG&}adz8*Hiir*E=5IYiqq0M(?ZkbGuepsVqLeqJEOB(2kKohauw41-dFoo zTMPU9LH@!E;jl%)zA-iRc}Di9+Jk~xPK8|TB|^jN?%g<&O~C;*1>ZTr|1HqT^~&AZ zt2|$RWm0odWs$lF-$Id8uw<=`?m#1}vaQK}T3tBj~iK{rDR^4%a1L}Fip6pKf zo^E+P(CcIO@2Z!pRH`=%LEgUw>`rv2>e~rA)5v2sH_8=VnBt-*_gW4*wPuqbAjy{F zg8t9slNhyMMJprJo3Y9#Q_FY~k2ZAkutg4!9=0@%*iP77rNsdba<2F7igPm&jID>o z#Kgo_Rqc*a9xqlw4X(wq?9avVNq zlfWb>_yT3k)!mnn1Ko?Cg6jldkn@@v0Zpnql3mf#LUJI^Vez+xlB;b|b15a~Lmi@X zJWG2YK1tUT^v0&MU$F+&){6@@u6Tr zjdB19{%^XtMFoAbLxMTGeca3`Dl2Pk)9Z^WWs<-7woPeKX~Znm6;#ut=)i4DZGI`h zOUtV7N~y{@4xQgO_dr`{X@zvA z9J2{nIpTZ?Ix9NJfxgt`Nv!vrR&C#Sx@&moL?lHYuRFE;i^icGLGYMgK=2as!*F+y zzYB7^=2C^ii(DQ062r07cvB{%yjs=fMwz`zF6@(vkk(-#is~cOIV0CsxT=)reLM7O z{`m-UPzCo=aF7%BcH;2FTvT^-HCu;GT{Si0(*uFSLtPj@?fr^guIS!|F@Y(-mgIM0 zaW8WisqhzQofDQsa1z#?_ca*OC3#HY)*TC(}?XH@?&Z-WxtL~JYjrGCdlLMQysqz8SfFa-G>$_VQx`Nh36<)yN?DnC)#7fB z!>_%4IZIsJ+?=ICB0tL7{@(1QvYaHs45Uw{tXPiEHi zq;9v>)C6A~O&ks3E?4zOPYwjEGlgLhcHN7M-5uy4zvVcsRg7q(#oprRXf3(HQ)IUJ z(u5qhYprT2kK(wXIgT4Cestix!M$rbb2e+FyM>a9riL|Pi${b zWYL!fd*tdHml<@G>e3Yo=eHL4-U>46j7L+xp|&jsyRdcav_GR z?TLwOu)`KNH6uRBVLeKlQl-)kmWV=@sXCVuBE7nXm4b>5UH&(vkkqhs7x<)`RO!=hk@B#+>Rab5%2HV z@qUDT;C_V5J-1;)dF|B@?)S7eM0aW)%WxBPyXuLse{y(u{0LS@BmIVYYLo{hWD2@W zboZVy!GJQ!6dA~Det&WKgV$KiKP2R^X|e|>@v@5htOl`G9KBCW0iDKWi+vohILtX5 z^p*J^*^es_9A7ZZoR6G~M`F*2FVBc^fIH)yC5P$^JM5o4eyi0V`;&p#WgX~oN6;>J z0et0m>jc5dfw0Ihzd--h$)=yy!QEN%TWxJW>+df)pr5|OSDf3oFFG2ZXzc}>q&b_B zMpg&^BI6+advZH0JMHW&ICIl5U)0LyHUE0eet<6}U*hm=L-*YCgZ@xG46Ceyo+d7D zd3%z`xob`i3j~46u;DJPA=%%@|0kPmUi4qEJc4B@?^XHJXFAqO`V1r1zKLtsR*;NE zNzF#Pd#CU{q(+zR9chQO!m{&Hv4xPQX#j3VXTf1#kFu$KhQ1i<)SPUJuM zj6L-LfC+56y*~W^lm#th`;^yuyU3>Y=Bs#gX zxN*THx|cicONSWMm{YK(W-A(|v90N^r041!rU`A>rg4C?uVnZw%yymDml4p z^m^gTU=MovKC&c}*us#r9&ZPCq7Nqfe2aR-pda$Z&s?U*NA?7pOv(Y%6~k)7gUQK0 zPXzxS#^$&-EO5FRvD@!aGJV(U+5=erBbz^6`zYjanHk5nd2`k^-vU{R(Sp&GUG>oM zZ5lf{)4a0iGhRQkVSen#&u&g#tE(%ltE+q2en+0jqO)y0J_P+)(#k#;`K>=vJhLjj zDUys|mm@j;KfzeA0IK^LwAwBofAX$JdD#!-1bxj)vM1(_E%vq6zP{+w){!n8mrh3; zP?>3?t*o`RwXLm94=ZA`CPfMH&zg&g9c98Cs=H{;pvT01{ONu+!|ngfTKN5O>D1Ks z9zV8_B>{)%4%Nf)K|f~~9kP!c9En^VwmOiYuRDl&ySn`A%x**wgO0!UE)4c?Ms(|g2CMy12+&Pq_u%a! ziOWhQZw%-Gfyx`CR6=wgpC=AI^yt$zfZw|H0Q}u;1(u~|^$B3tVn=nvYQ4ed2zgxS zE>vK*U!tfik_qSiodN)Tp2_W+;!qadsW~ZX1U+VRZAlf|$MhE>%)cUcmcFa5x76XP zJduPuW)*eUUzl&>!y);g^^r$>h9i^xfsWT$ICA<@{~MmBOC;O{I_0brprKJlb|J+N zgZ&Vmu|4|e&9I`IX0;XsGxg9iuu zuqIF5#P}!U6;uFJ7mT4u{r~d{{YO5J)tr*#)Qq8zwfTvo#i?CPtTX5!huvS|)--)a zw#q}{A!(XCZ|$>Mts^5NR&tLlRg*lYuGAyZt6S$YJjeGH{cYSNYBb}WgdFH;5svdY zJ~6}?^eSp<6xf?_U?H9R$5?|PZITxa^q>|(e$2hsyTr=+`}M~s1`m^+} zk2rdWpck2C5|LhA$x9JQGc#F_$6+?9IdeQ+jvIysW^EuLAuqlmuUj6mzB@0AxVyL$ z4qu-aJCQT;`sS@0+g~r-xOdA*nUt*3k`V|&2YJh}RphC_r-R6Ig#F?g!ysM&;#ElG zL7#6|M+Tbw0&44d(q-Q%l06rGzZ+>}TLX6*pw|{x?0MNM(^eHS2w6q_?+H`NP_lgzP>mx6aB=_u_k3P+) z?qe7vNjYG^WP0@gS$Q~#=Lw3FI z^kQ#lUte#p+rA4F$6!E4qNPdBj>arwkE;qgrLh@`!#X>ea{d=7=N{8m702;%aa^D< zV6csc@^TEk>mZD>DT%9=&`nn;gh)mlB7!rdB9AJME`b_S!eiwj ztdeHfL{c_bjmTcch>`u}|Hkh*x3{+)Iw$_<;Qj;ar@!axl#^7s(}uH{Y;^fAa2w@GE+?S3Ep z{Oz~%^F4tvUh2BKPS&D;Oly$Rx%c=!JlS~bVug~%%PXR^tFgEVUFh02#JbG1Hj4fs zK|iWiefYX6=Vd3lDs^3Z}T+Ya;&Ni@f!YgQA1J`fWyK%5;X(4!FEX-yLqx}StO zvq`UuOHP2T0^S!C>=UXTiWi?HgPSd*;fB$KaJ9bkRLETcolYqut+(&mx3K>6{O>(6 z(xBB&wIz3z1iCvgYY78VLd(Bg2Z+2ir8OlLRQf6^D$l010h^$|1oUdv`+2I5dGy!v zw0Uo7b($(eu6+&YXc+P65lgLzaR@ILFc;IX>f_gL+Az^7-f8e=hu$F%q3A7}estPl zClWei9YrA0N6X`Y}!prk%Okx6*BdR;WvleVA97X%R&DC&^KGs4G%(E9BPQ3WVdbTMFDE;wHAn3hB92y`<(J4RpFY&niX{j{c{56Ajx2`3ml z3fQ;c>p4Jo)B2IDr>0~gI!SKW&s+)ExP68iybs8Cn>KJ{GmNBl7`%LMaDv`qk|)q8 zHxsyH^j%19*lhfyv{Tm^M^|oc=wK(aO{nJfg)QB^NrbM+Cg#s=d3U|a7w%$7!w0WjnU;tsxcgfeU z!?dyx7KV9G__}WK*;7dg`FeRQjqo#4cN+G;pa+uVW{6GJVKT>Y@9r}s&UvEy?;{8Q z&z$ienRm*$b0Z@-4u%F5>Am6&pQ(Y?1u=tp?;Tih9fz8ispN9RXD5#@M; zUofRC52d6oL`9K%EKJWmrz2O^$de5Sa+{ki%nln=5G-1gS{}57_x38x_&7y$jFZ## zx)WV0t*op2e!iCY`o#BDyx=f6mT!;;VjCm~uFlJ6-XzA55Fs*gIpRz!<4JhY;1C}BLr<47kbpwp6LEtidvj$ud5s%uc(|KfsBujHs zu&DiuCeolf92|`Gx_Pw4UJ1D9V@qY-!+W&I#AW{29FwE2=PFt>@h#h zt83>+6l!>Rt$O|(1e@nSZo3i;tX{b|f1ug3=c{HsbI`g!Gpgm$tGA*1!O`1o2iI&x ze~V5d{Gu)+_2JOdQD)kZ&6#$XrqM%POeS3#xfef}x)b&Uawo^A)iBnHv>qa$3)#(Y zR|nLNF`uH5`Pw=Zv9=FJftcKx8M3trvP+}!MTCi;sSA~5@A^0wGD`#u@a zPY`s4>LQPxrPgNUXj3-qyEOLw<+i9bTa1v~Ku*z7&4C=f5?|kSQ)=pZSd@+)E_k?i zT4z2LVvRNH_2KEyau{9QA$#!gq;zw5EY|%xY(Y?Tu#1U(cRSU3Gql}Tr)Y!D;p>R+ zw(%-nUWEudt==&Ci0tMwJzX_LXTGT6u(Dv*>;aNd?bVmxQmIcUh_B~$Q*>zk{BFCy z%pTQYi!|7RM07$f6{4IEt}wZ~n3}pV7`B(#d)`p&b9#qEXHK?S^3&g(9w0+5GT9D{ zo{2i14x5j6bdF8;-V3=)J5(Y%&2B&sX#e?~Q^vo)NsRsGcy@L+4^NhF5{B*of!({N zr1aDBlG4(W&u9R;Zx5Bg*H2tkDNxI_=3$*$U6)m$%_z_Y_}WHCZGMPxqc+5MF~Ead zK!>mIj*LSIZ(1DGV+O=DsDIvc4_o5X4y&biF6F@NB*$Env3I157c78za&+`6tm$S$ ze7FT2*N~7?+Y>w32|7|a0vn=R8TE4rzCm92=1_5cNsWYotuwTg^)7#}SqeWW=VL&W z&F(X5U%YriMdV_JR;AL`W#y~TvkAJeX|ru=?%Td#qwm~S^`r3rdWmkw(A_ZEroDXB-mrlx2GAF?s==(t${^?w zUiWO9p;$Nv7+M5$C|z)MitbC$BcpUVD^JsFOHju4S{?LWD!ggyMk3}wPEN{}u3wi8 zkUd1N|8nb6fB#&6Z@)!7hCdi0E?dJqLVU;gxrsU|WE*0l z%F)~gbllSvYF?;4p6^DdPo+Iefy}C|^Ia#!LicZx|=kx;>#>W*m5$35-OP}~2($)sWSM+5L zV}#I2OF4kVH+~tA(W`!DFJ@7pL)Lh~OgZ6=68MPisZB)3;w5VTVLA2y%=mIR^!oVN za9o=7%<|Go>#b`m-M`^#21YutbVv5&T}nwX)w)%$9r>>m^wyho>i54Tc5$@-0|-^qv5389|oe03Z9LVZLDo@$A6KChyh0HFb zH|ekA^*^-Xq<(=~-FL`Hb?zd+to(?BY9G`5mrZ$+6K9vnVpwKiz>6mbx@hn2sV$OM zx`58#I~W^2XUVr*-@~%AlU^LLIT;BlEKM=)I%iqUKdFR$1L*kb*CV^R%J|T279Vra znbN;OkGa_B0*5r+_(=H?N}9)@vyFLnOlern>=agSth1P*!@GE9=pzJ1!2Y8(j?VDtI3JWOqCdKV*>@q}he~y2-0s-K{cKKo9a*x=?b8&f7b>u^^`# z%sPvuKfgeogGi~_NSvZWKRhb=lQR0m(e&}y`hKv*ItTLp1ie~L)Q%>VE6@SmrS>9S zop8E9eaLtW=m4kEnA=l$nS}C3RVZb_Q zLr^-sTY=1-lhBT_YrJoOi%O^Huy^;Q?o>K=1xUtEJY7?Lw5zbeUY7t>e@cVp)&9rjhRZL@fiLU zrln!fK{?De4Re>ji@tQZr)STlo*U5|+k?w_7{SVxy8+FklYbz`r^BqH6dUm7(J8rS zCC8(KT(EbX@d`PmvtZT@qCVohi>Ld*B$kaXduBN)RY5fS4+&j-@}B`Z#sT6_jg1|5 zQ#-m;e1nd%<0_h=PNX#j&POZG76Kd8g+`pxtWE^ zON@Zd>}y5+D#o}g10)b5_$AIpgIp6BNR=*F+*~lm}B~8kn%Sz?&C8hFo&_{(Bb_!y|k|xpJD$#tM6v`FKZc zQ~INxPFy{1I_G(iQ*?DcnQ5hw1GXa-K4flvSxOT1azhW<|(3Ixh;XVH5*TL2eCp0E53{WhKEG`gQ5;{d6>n1~Y%ma5 ze%K}aNC~DK+Vp_bAjJX&l~pX!HM@JUkSiCE6(PF9szj`{VmUYnXKNuyYDA-?mnbI= zsh~ZyRfU{fDpf5|C4zE^Q2q(TcV>4o9$LVOR9;-yQ7`@Y`{vD?H?#AWj`#f-Tvb8Q zUxWKlaGD_9;s-W;cL!JpYxaX9{H|njNAxrP{08)3)lS)azB%(9Qkr}U?AFTRqbo-b zTZUFDhsq_bTvp4Lx;{EpLvo`M^rK}B=xpIG%i}ZL;Rk1KDR4;S-xqIb96ccCbGjfp zVqD)Z!Ce(JzK^vBkLS;AqgJx}6XJ(3+PUn0kpH{vllyxveEH@X`^Y5bH zJ^uSU3>{aL@axuK2%L&cu2tNv0bO7hWcOYE(F<#j|L_L}`pfU;&!0?zdu64RR?CK= zOP65uu<`3BHspiAXcZJDAca~fGrg@ zXaUNypPzhv!T|cGVXbP_jO{5Sw?W=(En_)N(2?B$eXCq1IZZ<^LUX_)o>TO(4;^ez zlZuqxP(hyLE{ivC?6?#-Q@L5-8-bNImmP%IEA*pTR~CjEX8 z>A)_?&e&qn8IoWk7-CsWpnHEm4E)p9JqzF|z|j9S{59x_T{BPGjLSp#~jHQiDq zsGg;YW;(C;L2N8*xG7Ur(2`b z8bu!(CFsatG1p-T$UZs|9U086-xhmRX~Aq@w%2m8^k#3-ZFF_ndLtHP`GtGq!0v zF(NuA?-t2{4)Wg*ku4V(*eCs~(j&iZ!r?(axGLfd-NZEC>G{!N$R4mC5ZNI+Vej5I zn}=C>ihe7XZ|1j>Eu(4}O4({!B`sA-!_$vinp{q`q;;za^stn1OfTsG&iSKXRNxrc%gd^^9xB5b@^qq~|8u$i?VRV;s)e zf3rCiOXGobOKn<)Va2d;l?Zwhi&p#<);vsZwwX=Nn-#}|`{(8h+MJ%4vvZD?gEOtE0_g@+bTz-JE3tKTXm%mBT z^LK7yBdUL~7w|#fgS3hXN=v3Unl-gnBF?@MADXTjHBuFI^^8Owlkn`cr-xk~GHVT6 z#mn>-^h(q-l*t_jANT-SAV8EK@>Q9n2(Ueq}&dkrOkQyCnK;ggPO| z!yt^^*Ig{wK!&^Lanorve**{f19reCeD?G>rPnliRX8yXIZHf5&~vKoNYWQ(*fz-* z;M!rwjoWTUpPSd?b=&sliR6qt&>W%*1D%m$NS>BNOZ>WuaL+ZDw=a%ugIoqxhoO$i zPT7Ip&JOh1>l3g~w^?jVcxVAZ8$PIk`gn^Ic*!H^URYH!r*tQ3yJ=+Z`dpMqp7Gq=oE`nlZHw)q z<}8`Pyv9Ty7BW^A+L-Fd;#SWMUWE3$YX$SzC9eC9%Tsndasz)S=<92=XoJ-|7oJLw z{ubjnFh|C`N(Vmp@h9Zzu?)~L%jvq8Fy|4~qlIEtuefH^p6h8iEpuL&)dX@RGsN9F z`N);~?C60q#6BxMSIPP{#5UbJp5PceOX}Lf{8tqHgB#sJ!AgJ^y4)AOy<2b!$cE8t%=oOIDMpsA~@74TSHqZwM`NJLD9VqXm`O76C)V;u~ zRZEY*_`}3`kgxeg8yII7;h9nLG|;s=vi7XrsO$6fLN-&V7xe|#?6KXt9?95quH7GD z)A{Y2ocy^Uxcj0H_yc`-JoMifd%DEn8K5(EA=JU=ucIf&ylJtPPZ13ae}EbQrV zV26r&v4U421KtMG8GFGj7VWNxQ_yGk`{d-P)Sm!n`72G`LEb*lp9=jK8G`zrYrqR+?Y^hyzW7cvbqA4f-Sr5ueUzLvaeN$c zN4~mq=gtS4q#5s%SFUt3c+fwz)XZ9+$XWABkh2YCQFBE14o3Rk8(kf|4|{N)uJ1bFF9hp%-l;Rvb+;eJ;5k1mnn*vn zRR5`tmENP7u0(G#8I97WR;;HNeh)bt=Pb5U&1rCd;(_ZHiyEwno#Y`YP z=)oSiJ!{S?@Z$sdh<6zNiBL0eLe3l2ve3@Qxsb|0^hxP~+r$6B&T?ARtbF0Q@`N77 z;R&Z;nSQP}xQPjUTE$82e&nx658Oi1fxoe{+C1BsAngAEz5qf00MAd3Cq<=4x Z{{;$xH 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7) + t.cardinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddOrdinal adds an ordinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd... +func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsOrdinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.ordinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7) + t.ordinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddRange adds a range plural translation for a particular language/locale +// {0} and {1} are the only replacement types accepted and only these are accepted. +// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' +func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsRange() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.rangeTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7) + t.rangeTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 4), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + idx = strings.Index(text, paramOne) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)} + } + + trans.indexes[2] = idx + trans.indexes[3] = idx + len(paramOne) + + return nil +} + +// T creates the translation for the locale given the 'key' and params passed in +func (t *translator) T(key interface{}, params ...string) (string, error) { + + trans, ok := t.translations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + b := make([]byte, 0, 64) + + var start, end, count int + + for i := 0; i < len(trans.indexes); i++ { + end = trans.indexes[i] + b = append(b, trans.text[start:end]...) + b = append(b, params[count]...) + i++ + start = trans.indexes[i] + count++ + } + + b = append(b, trans.text[start:]...) + + return string(b), nil +} + +// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.cardinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.CardinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.ordinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.OrdinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments +// and 'param1' and 'param2' passed in +func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) { + + tarr, ok := t.rangeTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.RangePluralRule(num1, digits1, num2, digits2) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param1...) + b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...) + b = append(b, param2...) + b = append(b, trans.text[trans.indexes[3]:]...) + + return string(b), nil +} + +// VerifyTranslations checks to ensures that no plural rules have been +// missed within the translations. +func (t *translator) VerifyTranslations() error { + + for k, v := range t.cardinalTanslations { + + for _, rule := range t.PluralsCardinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k} + } + } + } + + for k, v := range t.ordinalTanslations { + + for _, rule := range t.PluralsOrdinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k} + } + } + } + + for k, v := range t.rangeTanslations { + + for _, rule := range t.PluralsRange() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k} + } + } + } + + return nil +} diff --git a/vendor/github.com/go-playground/universal-translator/universal_translator.go b/vendor/github.com/go-playground/universal-translator/universal_translator.go new file mode 100644 index 00000000000..dbf707f5c7c --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/universal_translator.go @@ -0,0 +1,113 @@ +package ut + +import ( + "strings" + + "github.com/go-playground/locales" +) + +// UniversalTranslator holds all locale & translation data +type UniversalTranslator struct { + translators map[string]Translator + fallback Translator +} + +// New returns a new UniversalTranslator instance set with +// the fallback locale and locales it should support +func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator { + + t := &UniversalTranslator{ + translators: make(map[string]Translator), + } + + for _, v := range supportedLocales { + + trans := newTranslator(v) + t.translators[strings.ToLower(trans.Locale())] = trans + + if fallback.Locale() == v.Locale() { + t.fallback = trans + } + } + + if t.fallback == nil && fallback != nil { + t.fallback = newTranslator(fallback) + } + + return t +} + +// FindTranslator trys to find a Translator based on an array of locales +// and returns the first one it can find, otherwise returns the +// fallback translator. +func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) { + + for _, locale := range locales { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + } + + return t.fallback, false +} + +// GetTranslator returns the specified translator for the given locale, +// or fallback if not found +func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + + return t.fallback, false +} + +// GetFallback returns the fallback locale +func (t *UniversalTranslator) GetFallback() Translator { + return t.fallback +} + +// AddTranslator adds the supplied translator, if it already exists the override param +// will be checked and if false an error will be returned, otherwise the translator will be +// overridden; if the fallback matches the supplied translator it will be overridden as well +// NOTE: this is normally only used when translator is embedded within a library +func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error { + + lc := strings.ToLower(translator.Locale()) + _, ok := t.translators[lc] + if ok && !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + trans := newTranslator(translator) + + if t.fallback.Locale() == translator.Locale() { + + // because it's optional to have a fallback, I don't impose that limitation + // don't know why you wouldn't but... + if !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + t.fallback = trans + } + + t.translators[lc] = trans + + return nil +} + +// VerifyTranslations runs through all locales and identifies any issues +// eg. missing plural rules for a locale +func (t *UniversalTranslator) VerifyTranslations() (err error) { + + for _, trans := range t.translators { + err = trans.VerifyTranslations() + if err != nil { + return + } + } + + return +} diff --git a/vendor/github.com/go-playground/validator/v10/.gitignore b/vendor/github.com/go-playground/validator/v10/.gitignore new file mode 100644 index 00000000000..6e43fac0d5a --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/.gitignore @@ -0,0 +1,30 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +bin + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.test +*.out +*.txt +cover.html +README.html diff --git a/vendor/github.com/go-playground/validator/v10/LICENSE b/vendor/github.com/go-playground/validator/v10/LICENSE new file mode 100644 index 00000000000..6a2ae9aa4da --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dean Karn + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md b/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md new file mode 100644 index 00000000000..b809c4ce128 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md @@ -0,0 +1,16 @@ +## Maintainers Guide + +### Semantic Versioning +Semantic versioning as defined [here](https://semver.org) must be strictly adhered to. + +### External Dependencies +Any new external dependencies MUST: +- Have a compatible LICENSE present. +- Be actively maintained. +- Be approved by @go-playground/admins + +### PR Merge Requirements +- Up-to-date branch. +- Passing tests and linting. +- CODEOWNERS approval. +- Tests that cover both the Happy and Unhappy paths. \ No newline at end of file diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile new file mode 100644 index 00000000000..ec3455bd59c --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/Makefile @@ -0,0 +1,18 @@ +GOCMD=GO111MODULE=on go + +linters-install: + @golangci-lint --version >/dev/null 2>&1 || { \ + echo "installing linting tools..."; \ + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \ + } + +lint: linters-install + golangci-lint run + +test: + $(GOCMD) test -cover -race ./... + +bench: + $(GOCMD) test -bench=. -benchmem ./... + +.PHONY: test lint linters-install \ No newline at end of file diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md new file mode 100644 index 00000000000..f56cff15d96 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -0,0 +1,323 @@ +Package validator +================= +[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +![Project status](https://img.shields.io/badge/version-10.9.0-green.svg) +[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) +[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) +[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10) +![License](https://img.shields.io/dub/l/vibe-d.svg) + +Package validator implements value validations for structs and individual fields based on tags. + +It has the following **unique** features: + +- Cross Field and Cross Struct validations by using validation tags or custom validators. +- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated. +- Ability to dive into both map keys and values for validation +- Handles type interface by determining it's underlying type prior to validation. +- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29) +- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs +- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError +- Customizable i18n aware error messages. +- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/master/_examples/gin-upgrading-overriding) + +Installation +------------ + +Use go get. + + go get github.com/go-playground/validator/v10 + +Then import the validator package into your own code. + + import "github.com/go-playground/validator/v10" + +Error Return Value +------- + +Validation functions return type error + +They return type error to avoid the issue discussed in the following, where err is always != nil: + +* http://stackoverflow.com/a/29138676/3158232 +* https://github.com/go-playground/validator/issues/134 + +Validator returns only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so: + +```go +err := validate.Struct(mystruct) +validationErrors := err.(validator.ValidationErrors) + ``` + +Usage and documentation +------ + +Please see https://pkg.go.dev/github.com/go-playground/validator/v10 for detailed usage docs. + +##### Examples: + +- [Simple](https://github.com/go-playground/validator/blob/master/_examples/simple/main.go) +- [Custom Field Types](https://github.com/go-playground/validator/blob/master/_examples/custom/main.go) +- [Struct Level](https://github.com/go-playground/validator/blob/master/_examples/struct-level/main.go) +- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/master/_examples/translations/main.go) +- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding) +- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash) + +Baked-in Validations +------ + +### Fields: + +| Tag | Description | +| - | - | +| eqcsfield | Field Equals Another Field (relative)| +| eqfield | Field Equals Another Field | +| fieldcontains | NOT DOCUMENTED IN doc.go | +| fieldexcludes | NOT DOCUMENTED IN doc.go | +| gtcsfield | Field Greater Than Another Relative Field | +| gtecsfield | Field Greater Than or Equal To Another Relative Field | +| gtefield | Field Greater Than or Equal To Another Field | +| gtfield | Field Greater Than Another Field | +| ltcsfield | Less Than Another Relative Field | +| ltecsfield | Less Than or Equal To Another Relative Field | +| ltefield | Less Than or Equal To Another Field | +| ltfield | Less Than Another Field | +| necsfield | Field Does Not Equal Another Field (relative) | +| nefield | Field Does Not Equal Another Field | + +### Network: + +| Tag | Description | +| - | - | +| cidr | Classless Inter-Domain Routing CIDR | +| cidrv4 | Classless Inter-Domain Routing CIDRv4 | +| cidrv6 | Classless Inter-Domain Routing CIDRv6 | +| datauri | Data URL | +| fqdn | Full Qualified Domain Name (FQDN) | +| hostname | Hostname RFC 952 | +| hostname_port | HostPort | +| hostname_rfc1123 | Hostname RFC 1123 | +| ip | Internet Protocol Address IP | +| ip4_addr | Internet Protocol Address IPv4 | +| ip6_addr | Internet Protocol Address IPv6 | +| ip_addr | Internet Protocol Address IP | +| ipv4 | Internet Protocol Address IPv4 | +| ipv6 | Internet Protocol Address IPv6 | +| mac | Media Access Control Address MAC | +| tcp4_addr | Transmission Control Protocol Address TCPv4 | +| tcp6_addr | Transmission Control Protocol Address TCPv6 | +| tcp_addr | Transmission Control Protocol Address TCP | +| udp4_addr | User Datagram Protocol Address UDPv4 | +| udp6_addr | User Datagram Protocol Address UDPv6 | +| udp_addr | User Datagram Protocol Address UDP | +| unix_addr | Unix domain socket end point Address | +| uri | URI String | +| url | URL String | +| url_encoded | URL Encoded | +| urn_rfc2141 | Urn RFC 2141 String | + +### Strings: + +| Tag | Description | +| - | - | +| alpha | Alpha Only | +| alphanum | Alphanumeric | +| alphanumunicode | Alphanumeric Unicode | +| alphaunicode | Alpha Unicode | +| ascii | ASCII | +| boolean | Boolean | +| contains | Contains | +| containsany | Contains Any | +| containsrune | Contains Rune | +| endsnotwith | Ends With | +| endswith | Ends With | +| excludes | Excludes | +| excludesall | Excludes All | +| excludesrune | Excludes Rune | +| lowercase | Lowercase | +| multibyte | Multi-Byte Characters | +| number | NOT DOCUMENTED IN doc.go | +| numeric | Numeric | +| printascii | Printable ASCII | +| startsnotwith | Starts Not With | +| startswith | Starts With | +| uppercase | Uppercase | + +### Format: +| Tag | Description | +| - | - | +| base64 | Base64 String | +| base64url | Base64URL String | +| bic | Business Identifier Code (ISO 9362) | +| bcp47_language_tag | Language tag (BCP 47) | +| btc_addr | Bitcoin Address | +| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) | +| datetime | Datetime | +| e164 | e164 formatted phone number | +| email | E-mail String +| eth_addr | Ethereum Address | +| hexadecimal | Hexadecimal String | +| hexcolor | Hexcolor String | +| hsl | HSL String | +| hsla | HSLA String | +| html | HTML Tags | +| html_encoded | HTML Encoded | +| isbn | International Standard Book Number | +| isbn10 | International Standard Book Number 10 | +| isbn13 | International Standard Book Number 13 | +| iso3166_1_alpha2 | Two-letter country code (ISO 3166-1 alpha-2) | +| iso3166_1_alpha3 | Three-letter country code (ISO 3166-1 alpha-3) | +| iso3166_1_alpha_numeric | Numeric country code (ISO 3166-1 numeric) | +| iso3166_2 | Country subdivision code (ISO 3166-2) | +| iso4217 | Currency code (ISO 4217) | +| json | JSON | +| jwt | JSON Web Token (JWT) | +| latitude | Latitude | +| longitude | Longitude | +| postcode_iso3166_alpha2 | Postcode | +| postcode_iso3166_alpha2_field | Postcode | +| rgb | RGB String | +| rgba | RGBA String | +| ssn | Social Security Number SSN | +| timezone | Timezone | +| uuid | Universally Unique Identifier UUID | +| uuid3 | Universally Unique Identifier UUID v3 | +| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 | +| uuid4 | Universally Unique Identifier UUID v4 | +| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 | +| uuid5 | Universally Unique Identifier UUID v5 | +| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 | +| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 | + +### Comparisons: +| Tag | Description | +| - | - | +| eq | Equals | +| gt | Greater than| +| gte | Greater than or equal | +| lt | Less Than | +| lte | Less Than or Equal | +| ne | Not Equal | + +### Other: +| Tag | Description | +| - | - | +| dir | Directory | +| file | File path | +| isdefault | Is Default | +| len | Length | +| max | Maximum | +| min | Minimum | +| oneof | One Of | +| required | Required | +| required_if | Required If | +| required_unless | Required Unless | +| required_with | Required With | +| required_with_all | Required With All | +| required_without | Required Without | +| required_without_all | Required Without All | +| excluded_with | Excluded With | +| excluded_with_all | Excluded With All | +| excluded_without | Excluded Without | +| excluded_without_all | Excluded Without All | +| unique | Unique | + +#### Aliases: +| Tag | Description | +| - | - | +| iscolor | hexcolor\|rgb\|rgba\|hsl\|hsla | +| country_code | iso3166_1_alpha2\|iso3166_1_alpha3\|iso3166_1_alpha_numeric | + +Benchmarks +------ +###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64 +```go +goos: darwin +goarch: amd64 +pkg: github.com/go-playground/validator +BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op +BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op +BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op +BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op +BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op +BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op +BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op +BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op +BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op +BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op +BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op +BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op +BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op +BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op +BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op +BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op +BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op +BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op +BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op +BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op +BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op +BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op +BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op +BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op +BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op +BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op +BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op +BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op +BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op +BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op +BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op +BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op +BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op +BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op +BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op +BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op +BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op +BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op +BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op +BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op +BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op +BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op +BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op +``` + +Complementary Software +---------------------- + +Here is a list of software that complements using this library either pre or post validation. + +* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support. +* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects + +How to Contribute +------ + +Make a pull request... + +License +------- +Distributed under MIT License, please see license file within the code for more details. + +Maintainers +----------- +This project has grown large enough that more than one person is required to properly support the community. +If you are interested in becoming a maintainer please reach out to me https://github.com/deankarn diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go new file mode 100644 index 00000000000..f5fd2391d20 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -0,0 +1,2409 @@ +package validator + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "net" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "golang.org/x/crypto/sha3" + "golang.org/x/text/language" + + urn "github.com/leodido/go-urn" +) + +// Func accepts a FieldLevel interface for all validation needs. The return +// value should be true when validation succeeds. +type Func func(fl FieldLevel) bool + +// FuncCtx accepts a context.Context and FieldLevel interface for all +// validation needs. The return value should be true when validation succeeds. +type FuncCtx func(ctx context.Context, fl FieldLevel) bool + +// wrapFunc wraps noramal Func makes it compatible with FuncCtx +func wrapFunc(fn Func) FuncCtx { + if fn == nil { + return nil // be sure not to wrap a bad function. + } + return func(ctx context.Context, fl FieldLevel) bool { + return fn(fl) + } +} + +var ( + restrictedTags = map[string]struct{}{ + diveTag: {}, + keysTag: {}, + endKeysTag: {}, + structOnlyTag: {}, + omitempty: {}, + skipValidationTag: {}, + utf8HexComma: {}, + utf8Pipe: {}, + noStructLevelTag: {}, + requiredTag: {}, + isdefault: {}, + } + + // bakedInAliases is a default mapping of a single validation tag that + // defines a common or complex set of validation(s) to simplify + // adding validation to structs. + bakedInAliases = map[string]string{ + "iscolor": "hexcolor|rgb|rgba|hsl|hsla", + "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric", + } + + // bakedInValidators is the default map of ValidationFunc + // you can add, remove or even replace items to suite your needs, + // or even disregard and use your own map if so desired. + bakedInValidators = map[string]Func{ + "required": hasValue, + "required_if": requiredIf, + "required_unless": requiredUnless, + "required_with": requiredWith, + "required_with_all": requiredWithAll, + "required_without": requiredWithout, + "required_without_all": requiredWithoutAll, + "excluded_with": excludedWith, + "excluded_with_all": excludedWithAll, + "excluded_without": excludedWithout, + "excluded_without_all": excludedWithoutAll, + "isdefault": isDefault, + "len": hasLengthOf, + "min": hasMinOf, + "max": hasMaxOf, + "eq": isEq, + "ne": isNe, + "lt": isLt, + "lte": isLte, + "gt": isGt, + "gte": isGte, + "eqfield": isEqField, + "eqcsfield": isEqCrossStructField, + "necsfield": isNeCrossStructField, + "gtcsfield": isGtCrossStructField, + "gtecsfield": isGteCrossStructField, + "ltcsfield": isLtCrossStructField, + "ltecsfield": isLteCrossStructField, + "nefield": isNeField, + "gtefield": isGteField, + "gtfield": isGtField, + "ltefield": isLteField, + "ltfield": isLtField, + "fieldcontains": fieldContains, + "fieldexcludes": fieldExcludes, + "alpha": isAlpha, + "alphanum": isAlphanum, + "alphaunicode": isAlphaUnicode, + "alphanumunicode": isAlphanumUnicode, + "boolean": isBoolean, + "numeric": isNumeric, + "number": isNumber, + "hexadecimal": isHexadecimal, + "hexcolor": isHEXColor, + "rgb": isRGB, + "rgba": isRGBA, + "hsl": isHSL, + "hsla": isHSLA, + "e164": isE164, + "email": isEmail, + "url": isURL, + "uri": isURI, + "urn_rfc2141": isUrnRFC2141, // RFC 2141 + "file": isFile, + "base64": isBase64, + "base64url": isBase64URL, + "contains": contains, + "containsany": containsAny, + "containsrune": containsRune, + "excludes": excludes, + "excludesall": excludesAll, + "excludesrune": excludesRune, + "startswith": startsWith, + "endswith": endsWith, + "startsnotwith": startsNotWith, + "endsnotwith": endsNotWith, + "isbn": isISBN, + "isbn10": isISBN10, + "isbn13": isISBN13, + "eth_addr": isEthereumAddress, + "btc_addr": isBitcoinAddress, + "btc_addr_bech32": isBitcoinBech32Address, + "uuid": isUUID, + "uuid3": isUUID3, + "uuid4": isUUID4, + "uuid5": isUUID5, + "uuid_rfc4122": isUUIDRFC4122, + "uuid3_rfc4122": isUUID3RFC4122, + "uuid4_rfc4122": isUUID4RFC4122, + "uuid5_rfc4122": isUUID5RFC4122, + "ascii": isASCII, + "printascii": isPrintableASCII, + "multibyte": hasMultiByteCharacter, + "datauri": isDataURI, + "latitude": isLatitude, + "longitude": isLongitude, + "ssn": isSSN, + "ipv4": isIPv4, + "ipv6": isIPv6, + "ip": isIP, + "cidrv4": isCIDRv4, + "cidrv6": isCIDRv6, + "cidr": isCIDR, + "tcp4_addr": isTCP4AddrResolvable, + "tcp6_addr": isTCP6AddrResolvable, + "tcp_addr": isTCPAddrResolvable, + "udp4_addr": isUDP4AddrResolvable, + "udp6_addr": isUDP6AddrResolvable, + "udp_addr": isUDPAddrResolvable, + "ip4_addr": isIP4AddrResolvable, + "ip6_addr": isIP6AddrResolvable, + "ip_addr": isIPAddrResolvable, + "unix_addr": isUnixAddrResolvable, + "mac": isMAC, + "hostname": isHostnameRFC952, // RFC 952 + "hostname_rfc1123": isHostnameRFC1123, // RFC 1123 + "fqdn": isFQDN, + "unique": isUnique, + "oneof": isOneOf, + "html": isHTML, + "html_encoded": isHTMLEncoded, + "url_encoded": isURLEncoded, + "dir": isDir, + "json": isJSON, + "jwt": isJWT, + "hostname_port": isHostnamePort, + "lowercase": isLowercase, + "uppercase": isUppercase, + "datetime": isDatetime, + "timezone": isTimeZone, + "iso3166_1_alpha2": isIso3166Alpha2, + "iso3166_1_alpha3": isIso3166Alpha3, + "iso3166_1_alpha_numeric": isIso3166AlphaNumeric, + "iso3166_2": isIso31662, + "iso4217": isIso4217, + "iso4217_numeric": isIso4217Numeric, + "bcp47_language_tag": isBCP47LanguageTag, + "postcode_iso3166_alpha2": isPostcodeByIso3166Alpha2, + "postcode_iso3166_alpha2_field": isPostcodeByIso3166Alpha2Field, + "bic": isIsoBicFormat, + } +) + +var oneofValsCache = map[string][]string{} +var oneofValsCacheRWLock = sync.RWMutex{} + +func parseOneOfParam2(s string) []string { + oneofValsCacheRWLock.RLock() + vals, ok := oneofValsCache[s] + oneofValsCacheRWLock.RUnlock() + if !ok { + oneofValsCacheRWLock.Lock() + vals = splitParamsRegex.FindAllString(s, -1) + for i := 0; i < len(vals); i++ { + vals[i] = strings.Replace(vals[i], "'", "", -1) + } + oneofValsCache[s] = vals + oneofValsCacheRWLock.Unlock() + } + return vals +} + +func isURLEncoded(fl FieldLevel) bool { + return uRLEncodedRegex.MatchString(fl.Field().String()) +} + +func isHTMLEncoded(fl FieldLevel) bool { + return hTMLEncodedRegex.MatchString(fl.Field().String()) +} + +func isHTML(fl FieldLevel) bool { + return hTMLRegex.MatchString(fl.Field().String()) +} + +func isOneOf(fl FieldLevel) bool { + vals := parseOneOfParam2(fl.Param()) + + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + for i := 0; i < len(vals); i++ { + if vals[i] == v { + return true + } + } + return false +} + +// isUnique is the validation function for validating if each array|slice|map value is unique +func isUnique(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + v := reflect.ValueOf(struct{}{}) + + switch field.Kind() { + case reflect.Slice, reflect.Array: + elem := field.Type().Elem() + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + + if param == "" { + m := reflect.MakeMap(reflect.MapOf(elem, v.Type())) + + for i := 0; i < field.Len(); i++ { + m.SetMapIndex(reflect.Indirect(field.Index(i)), v) + } + return field.Len() == m.Len() + } + + sf, ok := elem.FieldByName(param) + if !ok { + panic(fmt.Sprintf("Bad field name %s", param)) + } + + sfTyp := sf.Type + if sfTyp.Kind() == reflect.Ptr { + sfTyp = sfTyp.Elem() + } + + m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type())) + for i := 0; i < field.Len(); i++ { + m.SetMapIndex(reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param)), v) + } + return field.Len() == m.Len() + case reflect.Map: + m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type())) + + for _, k := range field.MapKeys() { + m.SetMapIndex(field.MapIndex(k), v) + } + return field.Len() == m.Len() + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } +} + +// isMAC is the validation function for validating if the field's value is a valid MAC address. +func isMAC(fl FieldLevel) bool { + + _, err := net.ParseMAC(fl.Field().String()) + + return err == nil +} + +// isCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. +func isCIDRv4(fl FieldLevel) bool { + + ip, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() != nil +} + +// isCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. +func isCIDRv6(fl FieldLevel) bool { + + ip, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() == nil +} + +// isCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. +func isCIDR(fl FieldLevel) bool { + + _, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil +} + +// isIPv4 is the validation function for validating if a value is a valid v4 IP address. +func isIPv4(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() != nil +} + +// isIPv6 is the validation function for validating if the field's value is a valid v6 IP address. +func isIPv6(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() == nil +} + +// isIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. +func isIP(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil +} + +// isSSN is the validation function for validating if the field's value is a valid SSN. +func isSSN(fl FieldLevel) bool { + + field := fl.Field() + + if field.Len() != 11 { + return false + } + + return sSNRegex.MatchString(field.String()) +} + +// isLongitude is the validation function for validating if the field's value is a valid longitude coordinate. +func isLongitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + + return longitudeRegex.MatchString(v) +} + +// isLatitude is the validation function for validating if the field's value is a valid latitude coordinate. +func isLatitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + + return latitudeRegex.MatchString(v) +} + +// isDataURI is the validation function for validating if the field's value is a valid data URI. +func isDataURI(fl FieldLevel) bool { + + uri := strings.SplitN(fl.Field().String(), ",", 2) + + if len(uri) != 2 { + return false + } + + if !dataURIRegex.MatchString(uri[0]) { + return false + } + + return base64Regex.MatchString(uri[1]) +} + +// hasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. +func hasMultiByteCharacter(fl FieldLevel) bool { + + field := fl.Field() + + if field.Len() == 0 { + return true + } + + return multibyteRegex.MatchString(field.String()) +} + +// isPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. +func isPrintableASCII(fl FieldLevel) bool { + return printableASCIIRegex.MatchString(fl.Field().String()) +} + +// isASCII is the validation function for validating if the field's value is a valid ASCII character. +func isASCII(fl FieldLevel) bool { + return aSCIIRegex.MatchString(fl.Field().String()) +} + +// isUUID5 is the validation function for validating if the field's value is a valid v5 UUID. +func isUUID5(fl FieldLevel) bool { + return uUID5Regex.MatchString(fl.Field().String()) +} + +// isUUID4 is the validation function for validating if the field's value is a valid v4 UUID. +func isUUID4(fl FieldLevel) bool { + return uUID4Regex.MatchString(fl.Field().String()) +} + +// isUUID3 is the validation function for validating if the field's value is a valid v3 UUID. +func isUUID3(fl FieldLevel) bool { + return uUID3Regex.MatchString(fl.Field().String()) +} + +// isUUID is the validation function for validating if the field's value is a valid UUID of any version. +func isUUID(fl FieldLevel) bool { + return uUIDRegex.MatchString(fl.Field().String()) +} + +// isUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID. +func isUUID5RFC4122(fl FieldLevel) bool { + return uUID5RFC4122Regex.MatchString(fl.Field().String()) +} + +// isUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID. +func isUUID4RFC4122(fl FieldLevel) bool { + return uUID4RFC4122Regex.MatchString(fl.Field().String()) +} + +// isUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID. +func isUUID3RFC4122(fl FieldLevel) bool { + return uUID3RFC4122Regex.MatchString(fl.Field().String()) +} + +// isUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version. +func isUUIDRFC4122(fl FieldLevel) bool { + return uUIDRFC4122Regex.MatchString(fl.Field().String()) +} + +// isISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. +func isISBN(fl FieldLevel) bool { + return isISBN10(fl) || isISBN13(fl) +} + +// isISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. +func isISBN13(fl FieldLevel) bool { + + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4) + + if !iSBN13Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + factor := []int32{1, 3} + + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(s[i]-'0') + } + + return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 +} + +// isISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. +func isISBN10(fl FieldLevel) bool { + + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3) + + if !iSBN10Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(s[i]-'0') + } + + if s[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(s[9]-'0') + } + + return checksum%11 == 0 +} + +// isEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address. +func isEthereumAddress(fl FieldLevel) bool { + address := fl.Field().String() + + if !ethAddressRegex.MatchString(address) { + return false + } + + if ethAddressRegexUpper.MatchString(address) || ethAddressRegexLower.MatchString(address) { + return true + } + + // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md + address = address[2:] // Skip "0x" prefix. + h := sha3.NewLegacyKeccak256() + // hash.Hash's io.Writer implementation says it never returns an error. https://golang.org/pkg/hash/#Hash + _, _ = h.Write([]byte(strings.ToLower(address))) + hash := hex.EncodeToString(h.Sum(nil)) + + for i := 0; i < len(address); i++ { + if address[i] <= '9' { // Skip 0-9 digits: they don't have upper/lower-case. + continue + } + if hash[i] > '7' && address[i] >= 'a' || hash[i] <= '7' && address[i] <= 'F' { + return false + } + } + + return true +} + +// isBitcoinAddress is the validation function for validating if the field's value is a valid btc address +func isBitcoinAddress(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcAddressRegex.MatchString(address) { + return false + } + + alphabet := []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") + + decode := [25]byte{} + + for _, n := range []byte(address) { + d := bytes.IndexByte(alphabet, n) + + for i := 24; i >= 0; i-- { + d += 58 * int(decode[i]) + decode[i] = byte(d % 256) + d /= 256 + } + } + + h := sha256.New() + _, _ = h.Write(decode[:21]) + d := h.Sum([]byte{}) + h = sha256.New() + _, _ = h.Write(d) + + validchecksum := [4]byte{} + computedchecksum := [4]byte{} + + copy(computedchecksum[:], h.Sum(d[:0])) + copy(validchecksum[:], decode[21:]) + + return validchecksum == computedchecksum +} + +// isBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address +func isBitcoinBech32Address(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) { + return false + } + + am := len(address) % 8 + + if am == 0 || am == 3 || am == 5 { + return false + } + + address = strings.ToLower(address) + + alphabet := "qpzry9x8gf2tvdw0s3jn54khce6mua7l" + + hr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc + addr := address[3:] + dp := make([]int, 0, len(addr)) + + for _, c := range addr { + dp = append(dp, strings.IndexRune(alphabet, c)) + } + + ver := dp[0] + + if ver < 0 || ver > 16 { + return false + } + + if ver == 0 { + if len(address) != 42 && len(address) != 62 { + return false + } + } + + values := append(hr, dp...) + + GEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3} + + p := 1 + + for _, v := range values { + b := p >> 25 + p = (p&0x1ffffff)<<5 ^ v + + for i := 0; i < 5; i++ { + if (b>>uint(i))&1 == 1 { + p ^= GEN[i] + } + } + } + + if p != 1 { + return false + } + + b := uint(0) + acc := 0 + mv := (1 << 5) - 1 + var sw []int + + for _, v := range dp[1 : len(dp)-6] { + acc = (acc << 5) | v + b += 5 + for b >= 8 { + b -= 8 + sw = append(sw, (acc>>b)&mv) + } + } + + if len(sw) < 2 || len(sw) > 40 { + return false + } + + return true +} + +// excludesRune is the validation function for validating that the field's value does not contain the rune specified within the param. +func excludesRune(fl FieldLevel) bool { + return !containsRune(fl) +} + +// excludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param. +func excludesAll(fl FieldLevel) bool { + return !containsAny(fl) +} + +// excludes is the validation function for validating that the field's value does not contain the text specified within the param. +func excludes(fl FieldLevel) bool { + return !contains(fl) +} + +// containsRune is the validation function for validating that the field's value contains the rune specified within the param. +func containsRune(fl FieldLevel) bool { + + r, _ := utf8.DecodeRuneInString(fl.Param()) + + return strings.ContainsRune(fl.Field().String(), r) +} + +// containsAny is the validation function for validating that the field's value contains any of the characters specified within the param. +func containsAny(fl FieldLevel) bool { + return strings.ContainsAny(fl.Field().String(), fl.Param()) +} + +// contains is the validation function for validating that the field's value contains the text specified within the param. +func contains(fl FieldLevel) bool { + return strings.Contains(fl.Field().String(), fl.Param()) +} + +// startsWith is the validation function for validating that the field's value starts with the text specified within the param. +func startsWith(fl FieldLevel) bool { + return strings.HasPrefix(fl.Field().String(), fl.Param()) +} + +// endsWith is the validation function for validating that the field's value ends with the text specified within the param. +func endsWith(fl FieldLevel) bool { + return strings.HasSuffix(fl.Field().String(), fl.Param()) +} + +// startsNotWith is the validation function for validating that the field's value does not start with the text specified within the param. +func startsNotWith(fl FieldLevel) bool { + return !startsWith(fl) +} + +// endsNotWith is the validation function for validating that the field's value does not end with the text specified within the param. +func endsNotWith(fl FieldLevel) bool { + return !endsWith(fl) +} + +// fieldContains is the validation function for validating if the current field's value contains the field specified by the param's value. +func fieldContains(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + + if !ok { + return false + } + + return strings.Contains(field.String(), currentField.String()) +} + +// fieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value. +func fieldExcludes(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + if !ok { + return true + } + + return !strings.Contains(field.String(), currentField.String()) +} + +// isNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. +func isNeField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + + if !ok || currentKind != kind { + return true + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() != currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() != currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() != currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) != int64(currentField.Len()) + + case reflect.Bool: + return field.Bool() != currentField.Bool() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return true + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() != currentField.String() +} + +// isNe is the validation function for validating that the field's value does not equal the provided param value. +func isNe(fl FieldLevel) bool { + return !isEq(fl) +} + +// isLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. +func isLteCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() <= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() <= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() <= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) <= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() <= topField.String() +} + +// isLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func isLtCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() < topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() < topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() < topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) < int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) + } + } + + // default reflect.String: + return field.String() < topField.String() +} + +// isGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. +func isGteCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() >= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() >= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() >= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) >= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() >= topField.String() +} + +// isGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. +func isGtCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() > topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() > topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() > topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) > int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) + } + } + + // default reflect.String: + return field.String() > topField.String() +} + +// isNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. +func isNeCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return true + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() != field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() != field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() != field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) != int64(field.Len()) + + case reflect.Bool: + return topField.Bool() != field.Bool() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return true + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() != field.String() +} + +// isEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. +func isEqCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() == field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() == field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() == field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) == int64(field.Len()) + + case reflect.Bool: + return topField.Bool() == field.Bool() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() == field.String() +} + +// isEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. +func isEqField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() == currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) == int64(currentField.Len()) + + case reflect.Bool: + return field.Bool() == currentField.Bool() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() == currentField.String() +} + +// isEq is the validation function for validating if the current field's value is equal to the param's value. +func isEq(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + return field.String() == param + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + + case reflect.Bool: + p := asBool(param) + + return field.Bool() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isPostcodeByIso3166Alpha2 validates by value which is country code in iso 3166 alpha 2 +// example: `postcode_iso3166_alpha2=US` +func isPostcodeByIso3166Alpha2(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + reg, found := postCodeRegexDict[param] + if !found { + return false + } + + return reg.MatchString(field.String()) +} + +// isPostcodeByIso3166Alpha2 validates by field which represents for a value of country code in iso 3166 alpha 2 +// example: `postcode_iso3166_alpha2_field=CountryCode` +func isPostcodeByIso3166Alpha2Field(fl FieldLevel) bool { + field := fl.Field() + params := parseOneOfParam2(fl.Param()) + + if len(params) != 1 { + return false + } + + currentField, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), params[0]) + if !found { + return false + } + + if kind != reflect.String { + panic(fmt.Sprintf("Bad field type %T", currentField.Interface())) + } + + reg, found := postCodeRegexDict[currentField.String()] + if !found { + return false + } + + return reg.MatchString(field.String()) +} + +// isBase64 is the validation function for validating if the current field's value is a valid base 64. +func isBase64(fl FieldLevel) bool { + return base64Regex.MatchString(fl.Field().String()) +} + +// isBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string. +func isBase64URL(fl FieldLevel) bool { + return base64URLRegex.MatchString(fl.Field().String()) +} + +// isURI is the validation function for validating if the current field's value is a valid URI. +func isURI(fl FieldLevel) bool { + + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i := strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if len(s) == 0 { + return false + } + + _, err := url.ParseRequestURI(s) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isURL is the validation function for validating if the current field's value is a valid URL. +func isURL(fl FieldLevel) bool { + + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + var i int + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i = strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if len(s) == 0 { + return false + } + + url, err := url.ParseRequestURI(s) + + if err != nil || url.Scheme == "" { + return false + } + + return true + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141. +func isUrnRFC2141(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + str := field.String() + + _, match := urn.Parse([]byte(str)) + + return match + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isFile is the validation function for validating if the current field's value is a valid file path. +func isFile(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + case reflect.String: + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return !fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number. +func isE164(fl FieldLevel) bool { + return e164Regex.MatchString(fl.Field().String()) +} + +// isEmail is the validation function for validating if the current field's value is a valid email address. +func isEmail(fl FieldLevel) bool { + return emailRegex.MatchString(fl.Field().String()) +} + +// isHSLA is the validation function for validating if the current field's value is a valid HSLA color. +func isHSLA(fl FieldLevel) bool { + return hslaRegex.MatchString(fl.Field().String()) +} + +// isHSL is the validation function for validating if the current field's value is a valid HSL color. +func isHSL(fl FieldLevel) bool { + return hslRegex.MatchString(fl.Field().String()) +} + +// isRGBA is the validation function for validating if the current field's value is a valid RGBA color. +func isRGBA(fl FieldLevel) bool { + return rgbaRegex.MatchString(fl.Field().String()) +} + +// isRGB is the validation function for validating if the current field's value is a valid RGB color. +func isRGB(fl FieldLevel) bool { + return rgbRegex.MatchString(fl.Field().String()) +} + +// isHEXColor is the validation function for validating if the current field's value is a valid HEX color. +func isHEXColor(fl FieldLevel) bool { + return hexColorRegex.MatchString(fl.Field().String()) +} + +// isHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. +func isHexadecimal(fl FieldLevel) bool { + return hexadecimalRegex.MatchString(fl.Field().String()) +} + +// isNumber is the validation function for validating if the current field's value is a valid number. +func isNumber(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numberRegex.MatchString(fl.Field().String()) + } +} + +// isNumeric is the validation function for validating if the current field's value is a valid numeric value. +func isNumeric(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numericRegex.MatchString(fl.Field().String()) + } +} + +// isAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. +func isAlphanum(fl FieldLevel) bool { + return alphaNumericRegex.MatchString(fl.Field().String()) +} + +// isAlpha is the validation function for validating if the current field's value is a valid alpha value. +func isAlpha(fl FieldLevel) bool { + return alphaRegex.MatchString(fl.Field().String()) +} + +// isAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value. +func isAlphanumUnicode(fl FieldLevel) bool { + return alphaUnicodeNumericRegex.MatchString(fl.Field().String()) +} + +// isAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. +func isAlphaUnicode(fl FieldLevel) bool { + return alphaUnicodeRegex.MatchString(fl.Field().String()) +} + +// isBoolean is the validation function for validating if the current field's value can be safely converted to a boolean. +func isBoolean(fl FieldLevel) bool { + _, err := strconv.ParseBool(fl.Field().String()) + return err == nil +} + +// isDefault is the opposite of required aka hasValue +func isDefault(fl FieldLevel) bool { + return !hasValue(fl) +} + +// hasValue is the validation function for validating if the current field's value is not the default static value. +func hasValue(fl FieldLevel) bool { + field := fl.Field() + switch field.Kind() { + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return !field.IsNil() + default: + if fl.(*validate).fldIsPointer && field.Interface() != nil { + return true + } + return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface() + } +} + +// requireCheckField is a func for check field kind +func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool { + field := fl.Field() + kind := field.Kind() + var nullable, found bool + if len(param) > 0 { + field, kind, nullable, found = fl.GetStructFieldOKAdvanced2(fl.Parent(), param) + if !found { + return defaultNotFoundValue + } + } + switch kind { + case reflect.Invalid: + return defaultNotFoundValue + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return field.IsNil() + default: + if nullable && field.Interface() != nil { + return false + } + return field.IsValid() && field.Interface() == reflect.Zero(field.Type()).Interface() + } +} + +// requireCheckFieldValue is a func for check field value +func requireCheckFieldValue(fl FieldLevel, param string, value string, defaultNotFoundValue bool) bool { + field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param) + if !found { + return defaultNotFoundValue + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == asInt(value) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == asUint(value) + + case reflect.Float32, reflect.Float64: + return field.Float() == asFloat(value) + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) == asInt(value) + + case reflect.Bool: + return field.Bool() == asBool(value) + } + + // default reflect.String: + return field.String() == value +} + +// requiredIf is the validation function +// The field under validation must be present and not empty only if all the other specified fields are equal to the value following with the specified field. +func requiredIf(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for required_if %s", fl.FieldName())) + } + for i := 0; i < len(params); i += 2 { + if !requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return hasValue(fl) +} + +// requiredUnless is the validation function +// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field. +func requiredUnless(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for required_unless %s", fl.FieldName())) + } + + for i := 0; i < len(params); i += 2 { + if requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return hasValue(fl) +} + +// excludedWith is the validation function +// The field under validation must not be present or is empty if any of the other specified fields are present. +func excludedWith(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return !hasValue(fl) + } + } + return true +} + +// requiredWith is the validation function +// The field under validation must be present and not empty only if any of the other specified fields are present. +func requiredWith(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return hasValue(fl) + } + } + return true +} + +// excludedWithAll is the validation function +// The field under validation must not be present or is empty if all of the other specified fields are present. +func excludedWithAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if requireCheckFieldKind(fl, param, true) { + return true + } + } + return !hasValue(fl) +} + +// requiredWithAll is the validation function +// The field under validation must be present and not empty only if all of the other specified fields are present. +func requiredWithAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if requireCheckFieldKind(fl, param, true) { + return true + } + } + return hasValue(fl) +} + +// excludedWithout is the validation function +// The field under validation must not be present or is empty when any of the other specified fields are not present. +func excludedWithout(fl FieldLevel) bool { + if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) { + return !hasValue(fl) + } + return true +} + +// requiredWithout is the validation function +// The field under validation must be present and not empty only when any of the other specified fields are not present. +func requiredWithout(fl FieldLevel) bool { + if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) { + return hasValue(fl) + } + return true +} + +// excludedWithoutAll is the validation function +// The field under validation must not be present or is empty when all of the other specified fields are not present. +func excludedWithoutAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return true + } + } + return !hasValue(fl) +} + +// requiredWithoutAll is the validation function +// The field under validation must be present and not empty only when all of the other specified fields are not present. +func requiredWithoutAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return true + } + } + return hasValue(fl) +} + +// isGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. +func isGteField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() >= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() >= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() >= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) >= len(currentField.String()) +} + +// isGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. +func isGtField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() > currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() > currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() > currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) + } + } + + // default reflect.String + return len(field.String()) > len(currentField.String()) +} + +// isGte is the validation function for validating if the current field's value is greater than or equal to the param's value. +func isGte(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) >= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) >= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() >= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() >= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() >= p + + case reflect.Struct: + + if field.Type() == timeType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.After(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isGt is the validation function for validating if the current field's value is greater than the param's value. +func isGt(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) > p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) > p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() > p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() > p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() > p + case reflect.Struct: + + if field.Type() == timeType { + + return field.Interface().(time.Time).After(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// hasLengthOf is the validation function for validating if the current field's value is equal to the param's value. +func hasLengthOf(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) == p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// hasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value. +func hasMinOf(fl FieldLevel) bool { + return isGte(fl) +} + +// isLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. +func isLteField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() <= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() <= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() <= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) <= len(currentField.String()) +} + +// isLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. +func isLtField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() < currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() < currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() < currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) + } + } + + // default reflect.String + return len(field.String()) < len(currentField.String()) +} + +// isLte is the validation function for validating if the current field's value is less than or equal to the param's value. +func isLte(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) <= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) <= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() <= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() <= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() <= p + + case reflect.Struct: + + if field.Type() == timeType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.Before(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isLt is the validation function for validating if the current field's value is less than the param's value. +func isLt(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) < p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) < p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() < p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() < p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() < p + + case reflect.Struct: + + if field.Type() == timeType { + + return field.Interface().(time.Time).Before(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// hasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value. +func hasMaxOf(fl FieldLevel) bool { + return isLte(fl) +} + +// isTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. +func isTCP4AddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp4", fl.Field().String()) + return err == nil +} + +// isTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. +func isTCP6AddrResolvable(fl FieldLevel) bool { + + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp6", fl.Field().String()) + + return err == nil +} + +// isTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. +func isTCPAddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp", fl.Field().String()) + + return err == nil +} + +// isUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. +func isUDP4AddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp4", fl.Field().String()) + + return err == nil +} + +// isUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. +func isUDP6AddrResolvable(fl FieldLevel) bool { + + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp6", fl.Field().String()) + + return err == nil +} + +// isUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. +func isUDPAddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp", fl.Field().String()) + + return err == nil +} + +// isIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. +func isIP4AddrResolvable(fl FieldLevel) bool { + + if !isIPv4(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip4", fl.Field().String()) + + return err == nil +} + +// isIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. +func isIP6AddrResolvable(fl FieldLevel) bool { + + if !isIPv6(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip6", fl.Field().String()) + + return err == nil +} + +// isIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. +func isIPAddrResolvable(fl FieldLevel) bool { + + if !isIP(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip", fl.Field().String()) + + return err == nil +} + +// isUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. +func isUnixAddrResolvable(fl FieldLevel) bool { + + _, err := net.ResolveUnixAddr("unix", fl.Field().String()) + + return err == nil +} + +func isIP4Addr(fl FieldLevel) bool { + + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + val = val[0:idx] + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() != nil +} + +func isIP6Addr(fl FieldLevel) bool { + + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + if idx != 0 && val[idx-1:idx] == "]" { + val = val[1 : idx-1] + } + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() == nil +} + +func isHostnameRFC952(fl FieldLevel) bool { + return hostnameRegexRFC952.MatchString(fl.Field().String()) +} + +func isHostnameRFC1123(fl FieldLevel) bool { + return hostnameRegexRFC1123.MatchString(fl.Field().String()) +} + +func isFQDN(fl FieldLevel) bool { + val := fl.Field().String() + + if val == "" { + return false + } + + return fqdnRegexRFC1123.MatchString(val) +} + +// isDir is the validation function for validating if the current field's value is a valid directory. +func isDir(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isJSON is the validation function for validating if the current field's value is a valid json string. +func isJSON(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + val := field.String() + return json.Valid([]byte(val)) + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isJWT is the validation function for validating if the current field's value is a valid JWT string. +func isJWT(fl FieldLevel) bool { + return jWTRegex.MatchString(fl.Field().String()) +} + +// isHostnamePort validates a : combination for fields typically used for socket address. +func isHostnamePort(fl FieldLevel) bool { + val := fl.Field().String() + host, port, err := net.SplitHostPort(val) + if err != nil { + return false + } + // Port must be a iny <= 65535. + if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 1 { + return false + } + + // If host is specified, it should match a DNS name + if host != "" { + return hostnameRegexRFC1123.MatchString(host) + } + return true +} + +// isLowercase is the validation function for validating if the current field's value is a lowercase string. +func isLowercase(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + if field.String() == "" { + return false + } + return field.String() == strings.ToLower(field.String()) + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isUppercase is the validation function for validating if the current field's value is an uppercase string. +func isUppercase(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + if field.String() == "" { + return false + } + return field.String() == strings.ToUpper(field.String()) + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isDatetime is the validation function for validating if the current field's value is a valid datetime string. +func isDatetime(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + if field.Kind() == reflect.String { + _, err := time.Parse(param, field.String()) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isTimeZone is the validation function for validating if the current field's value is a valid time zone string. +func isTimeZone(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + // empty value is converted to UTC by time.LoadLocation but disallow it as it is not a valid time zone name + if field.String() == "" { + return false + } + + // Local value is converted to the current system time zone by time.LoadLocation but disallow it as it is not a valid time zone name + if strings.ToLower(field.String()) == "local" { + return false + } + + _, err := time.LoadLocation(field.String()) + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code. +func isIso3166Alpha2(fl FieldLevel) bool { + val := fl.Field().String() + return iso3166_1_alpha2[val] +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code. +func isIso3166Alpha3(fl FieldLevel) bool { + val := fl.Field().String() + return iso3166_1_alpha3[val] +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code. +func isIso3166AlphaNumeric(fl FieldLevel) bool { + field := fl.Field() + + var code int + switch field.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + code = int(field.Int() % 1000) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + code = int(field.Uint() % 1000) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + return iso3166_1_alpha_numeric[code] +} + +// isIso31662 is the validation function for validating if the current field's value is a valid iso3166-2 code. +func isIso31662(fl FieldLevel) bool { + val := fl.Field().String() + return iso3166_2[val] +} + +// isIso4217 is the validation function for validating if the current field's value is a valid iso4217 currency code. +func isIso4217(fl FieldLevel) bool { + val := fl.Field().String() + return iso4217[val] +} + +// isIso4217Numeric is the validation function for validating if the current field's value is a valid iso4217 numeric currency code. +func isIso4217Numeric(fl FieldLevel) bool { + field := fl.Field() + + var code int + switch field.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + code = int(field.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + code = int(field.Uint()) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + return iso4217_numeric[code] +} + +// isBCP47LanguageTag is the validation function for validating if the current field's value is a valid BCP 47 language tag, as parsed by language.Parse +func isBCP47LanguageTag(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + _, err := language.Parse(field.String()) + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isIsoBicFormat is the validation function for validating if the current field's value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362 +func isIsoBicFormat(fl FieldLevel) bool { + bicString := fl.Field().String() + + return bicRegex.MatchString(bicString) +} diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go new file mode 100644 index 00000000000..0d18d6ec49c --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/cache.go @@ -0,0 +1,322 @@ +package validator + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type tagType uint8 + +const ( + typeDefault tagType = iota + typeOmitEmpty + typeIsDefault + typeNoStructLevel + typeStructOnly + typeDive + typeOr + typeKeys + typeEndKeys +) + +const ( + invalidValidation = "Invalid validation tag on field '%s'" + undefinedValidation = "Undefined validation function '%s' on field '%s'" + keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag" +) + +type structCache struct { + lock sync.Mutex + m atomic.Value // map[reflect.Type]*cStruct +} + +func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) { + c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key] + return +} + +func (sc *structCache) Set(key reflect.Type, value *cStruct) { + m := sc.m.Load().(map[reflect.Type]*cStruct) + nm := make(map[reflect.Type]*cStruct, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + sc.m.Store(nm) +} + +type tagCache struct { + lock sync.Mutex + m atomic.Value // map[string]*cTag +} + +func (tc *tagCache) Get(key string) (c *cTag, found bool) { + c, found = tc.m.Load().(map[string]*cTag)[key] + return +} + +func (tc *tagCache) Set(key string, value *cTag) { + m := tc.m.Load().(map[string]*cTag) + nm := make(map[string]*cTag, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + tc.m.Store(nm) +} + +type cStruct struct { + name string + fields []*cField + fn StructLevelFuncCtx +} + +type cField struct { + idx int + name string + altName string + namesEqual bool + cTags *cTag +} + +type cTag struct { + tag string + aliasTag string + actualAliasTag string + param string + keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation + next *cTag + fn FuncCtx + typeof tagType + hasTag bool + hasAlias bool + hasParam bool // true if parameter used eg. eq= where the equal sign has been set + isBlockEnd bool // indicates the current tag represents the last validation in the block + runValidationWhenNil bool +} + +func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct { + v.structCache.lock.Lock() + defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise! + + typ := current.Type() + + // could have been multiple trying to access, but once first is done this ensures struct + // isn't parsed again. + cs, ok := v.structCache.Get(typ) + if ok { + return cs + } + + cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]} + + numFields := current.NumField() + + var ctag *cTag + var fld reflect.StructField + var tag string + var customName string + + for i := 0; i < numFields; i++ { + + fld = typ.Field(i) + + if !fld.Anonymous && len(fld.PkgPath) > 0 { + continue + } + + tag = fld.Tag.Get(v.tagName) + + if tag == skipValidationTag { + continue + } + + customName = fld.Name + + if v.hasTagNameFunc { + name := v.tagNameFunc(fld) + if len(name) > 0 { + customName = name + } + } + + // NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different + // and so only struct level caching can be used instead of combined with Field tag caching + + if len(tag) > 0 { + ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false) + } else { + // even if field doesn't have validations need cTag for traversing to potential inner/nested + // elements of the field. + ctag = new(cTag) + } + + cs.fields = append(cs.fields, &cField{ + idx: i, + name: fld.Name, + altName: customName, + cTags: ctag, + namesEqual: fld.Name == customName, + }) + } + v.structCache.Set(typ, cs) + return cs +} + +func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { + var t string + noAlias := len(alias) == 0 + tags := strings.Split(tag, tagSeparator) + + for i := 0; i < len(tags); i++ { + t = tags[i] + if noAlias { + alias = t + } + + // check map for alias and process new tags, otherwise process as usual + if tagsVal, found := v.aliases[t]; found { + if i == 0 { + firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + } else { + next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + current.next, current = next, curr + + } + continue + } + + var prevTag tagType + + if i == 0 { + current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true, typeof: typeDefault} + firstCtag = current + } else { + prevTag = current.typeof + current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + current = current.next + } + + switch t { + case diveTag: + current.typeof = typeDive + continue + + case keysTag: + current.typeof = typeKeys + + if i == 0 || prevTag != typeDive { + panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag)) + } + + current.typeof = typeKeys + + // need to pass along only keys tag + // need to increment i to skip over the keys tags + b := make([]byte, 0, 64) + + i++ + + for ; i < len(tags); i++ { + + b = append(b, tags[i]...) + b = append(b, ',') + + if tags[i] == endKeysTag { + break + } + } + + current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false) + continue + + case endKeysTag: + current.typeof = typeEndKeys + + // if there are more in tags then there was no keysTag defined + // and an error should be thrown + if i != len(tags)-1 { + panic(keysTagNotDefined) + } + return + + case omitempty: + current.typeof = typeOmitEmpty + continue + + case structOnlyTag: + current.typeof = typeStructOnly + continue + + case noStructLevelTag: + current.typeof = typeNoStructLevel + continue + + default: + if t == isdefault { + current.typeof = typeIsDefault + } + // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C" + orVals := strings.Split(t, orSeparator) + + for j := 0; j < len(orVals); j++ { + vals := strings.SplitN(orVals[j], tagKeySeparator, 2) + if noAlias { + alias = vals[0] + current.aliasTag = alias + } else { + current.actualAliasTag = t + } + + if j > 0 { + current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true} + current = current.next + } + current.hasParam = len(vals) > 1 + + current.tag = vals[0] + if len(current.tag) == 0 { + panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName))) + } + + if wrapper, ok := v.validations[current.tag]; ok { + current.fn = wrapper.fn + current.runValidationWhenNil = wrapper.runValidatinOnNil + } else { + panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName))) + } + + if len(orVals) > 1 { + current.typeof = typeOr + } + + if len(vals) > 1 { + current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1) + } + } + current.isBlockEnd = true + } + } + return +} + +func (v *Validate) fetchCacheTag(tag string) *cTag { + // find cached tag + ctag, found := v.tagCache.Get(tag) + if !found { + v.tagCache.lock.Lock() + defer v.tagCache.lock.Unlock() + + // could have been multiple trying to access, but once first is done this ensures tag + // isn't parsed again. + ctag, found = v.tagCache.Get(tag) + if !found { + ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false) + v.tagCache.Set(tag, ctag) + } + } + return ctag +} diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go new file mode 100644 index 00000000000..0d9eda0338e --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/country_codes.go @@ -0,0 +1,1132 @@ +package validator + +var iso3166_1_alpha2 = map[string]bool{ + // see: https://www.iso.org/iso-3166-country-codes.html + "AF": true, "AX": true, "AL": true, "DZ": true, "AS": true, + "AD": true, "AO": true, "AI": true, "AQ": true, "AG": true, + "AR": true, "AM": true, "AW": true, "AU": true, "AT": true, + "AZ": true, "BS": true, "BH": true, "BD": true, "BB": true, + "BY": true, "BE": true, "BZ": true, "BJ": true, "BM": true, + "BT": true, "BO": true, "BQ": true, "BA": true, "BW": true, + "BV": true, "BR": true, "IO": true, "BN": true, "BG": true, + "BF": true, "BI": true, "KH": true, "CM": true, "CA": true, + "CV": true, "KY": true, "CF": true, "TD": true, "CL": true, + "CN": true, "CX": true, "CC": true, "CO": true, "KM": true, + "CG": true, "CD": true, "CK": true, "CR": true, "CI": true, + "HR": true, "CU": true, "CW": true, "CY": true, "CZ": true, + "DK": true, "DJ": true, "DM": true, "DO": true, "EC": true, + "EG": true, "SV": true, "GQ": true, "ER": true, "EE": true, + "ET": true, "FK": true, "FO": true, "FJ": true, "FI": true, + "FR": true, "GF": true, "PF": true, "TF": true, "GA": true, + "GM": true, "GE": true, "DE": true, "GH": true, "GI": true, + "GR": true, "GL": true, "GD": true, "GP": true, "GU": true, + "GT": true, "GG": true, "GN": true, "GW": true, "GY": true, + "HT": true, "HM": true, "VA": true, "HN": true, "HK": true, + "HU": true, "IS": true, "IN": true, "ID": true, "IR": true, + "IQ": true, "IE": true, "IM": true, "IL": true, "IT": true, + "JM": true, "JP": true, "JE": true, "JO": true, "KZ": true, + "KE": true, "KI": true, "KP": true, "KR": true, "KW": true, + "KG": true, "LA": true, "LV": true, "LB": true, "LS": true, + "LR": true, "LY": true, "LI": true, "LT": true, "LU": true, + "MO": true, "MK": true, "MG": true, "MW": true, "MY": true, + "MV": true, "ML": true, "MT": true, "MH": true, "MQ": true, + "MR": true, "MU": true, "YT": true, "MX": true, "FM": true, + "MD": true, "MC": true, "MN": true, "ME": true, "MS": true, + "MA": true, "MZ": true, "MM": true, "NA": true, "NR": true, + "NP": true, "NL": true, "NC": true, "NZ": true, "NI": true, + "NE": true, "NG": true, "NU": true, "NF": true, "MP": true, + "NO": true, "OM": true, "PK": true, "PW": true, "PS": true, + "PA": true, "PG": true, "PY": true, "PE": true, "PH": true, + "PN": true, "PL": true, "PT": true, "PR": true, "QA": true, + "RE": true, "RO": true, "RU": true, "RW": true, "BL": true, + "SH": true, "KN": true, "LC": true, "MF": true, "PM": true, + "VC": true, "WS": true, "SM": true, "ST": true, "SA": true, + "SN": true, "RS": true, "SC": true, "SL": true, "SG": true, + "SX": true, "SK": true, "SI": true, "SB": true, "SO": true, + "ZA": true, "GS": true, "SS": true, "ES": true, "LK": true, + "SD": true, "SR": true, "SJ": true, "SZ": true, "SE": true, + "CH": true, "SY": true, "TW": true, "TJ": true, "TZ": true, + "TH": true, "TL": true, "TG": true, "TK": true, "TO": true, + "TT": true, "TN": true, "TR": true, "TM": true, "TC": true, + "TV": true, "UG": true, "UA": true, "AE": true, "GB": true, + "US": true, "UM": true, "UY": true, "UZ": true, "VU": true, + "VE": true, "VN": true, "VG": true, "VI": true, "WF": true, + "EH": true, "YE": true, "ZM": true, "ZW": true, +} + +var iso3166_1_alpha3 = map[string]bool{ + // see: https://www.iso.org/iso-3166-country-codes.html + "AFG": true, "ALB": true, "DZA": true, "ASM": true, "AND": true, + "AGO": true, "AIA": true, "ATA": true, "ATG": true, "ARG": true, + "ARM": true, "ABW": true, "AUS": true, "AUT": true, "AZE": true, + "BHS": true, "BHR": true, "BGD": true, "BRB": true, "BLR": true, + "BEL": true, "BLZ": true, "BEN": true, "BMU": true, "BTN": true, + "BOL": true, "BES": true, "BIH": true, "BWA": true, "BVT": true, + "BRA": true, "IOT": true, "BRN": true, "BGR": true, "BFA": true, + "BDI": true, "CPV": true, "KHM": true, "CMR": true, "CAN": true, + "CYM": true, "CAF": true, "TCD": true, "CHL": true, "CHN": true, + "CXR": true, "CCK": true, "COL": true, "COM": true, "COD": true, + "COG": true, "COK": true, "CRI": true, "HRV": true, "CUB": true, + "CUW": true, "CYP": true, "CZE": true, "CIV": true, "DNK": true, + "DJI": true, "DMA": true, "DOM": true, "ECU": true, "EGY": true, + "SLV": true, "GNQ": true, "ERI": true, "EST": true, "SWZ": true, + "ETH": true, "FLK": true, "FRO": true, "FJI": true, "FIN": true, + "FRA": true, "GUF": true, "PYF": true, "ATF": true, "GAB": true, + "GMB": true, "GEO": true, "DEU": true, "GHA": true, "GIB": true, + "GRC": true, "GRL": true, "GRD": true, "GLP": true, "GUM": true, + "GTM": true, "GGY": true, "GIN": true, "GNB": true, "GUY": true, + "HTI": true, "HMD": true, "VAT": true, "HND": true, "HKG": true, + "HUN": true, "ISL": true, "IND": true, "IDN": true, "IRN": true, + "IRQ": true, "IRL": true, "IMN": true, "ISR": true, "ITA": true, + "JAM": true, "JPN": true, "JEY": true, "JOR": true, "KAZ": true, + "KEN": true, "KIR": true, "PRK": true, "KOR": true, "KWT": true, + "KGZ": true, "LAO": true, "LVA": true, "LBN": true, "LSO": true, + "LBR": true, "LBY": true, "LIE": true, "LTU": true, "LUX": true, + "MAC": true, "MDG": true, "MWI": true, "MYS": true, "MDV": true, + "MLI": true, "MLT": true, "MHL": true, "MTQ": true, "MRT": true, + "MUS": true, "MYT": true, "MEX": true, "FSM": true, "MDA": true, + "MCO": true, "MNG": true, "MNE": true, "MSR": true, "MAR": true, + "MOZ": true, "MMR": true, "NAM": true, "NRU": true, "NPL": true, + "NLD": true, "NCL": true, "NZL": true, "NIC": true, "NER": true, + "NGA": true, "NIU": true, "NFK": true, "MKD": true, "MNP": true, + "NOR": true, "OMN": true, "PAK": true, "PLW": true, "PSE": true, + "PAN": true, "PNG": true, "PRY": true, "PER": true, "PHL": true, + "PCN": true, "POL": true, "PRT": true, "PRI": true, "QAT": true, + "ROU": true, "RUS": true, "RWA": true, "REU": true, "BLM": true, + "SHN": true, "KNA": true, "LCA": true, "MAF": true, "SPM": true, + "VCT": true, "WSM": true, "SMR": true, "STP": true, "SAU": true, + "SEN": true, "SRB": true, "SYC": true, "SLE": true, "SGP": true, + "SXM": true, "SVK": true, "SVN": true, "SLB": true, "SOM": true, + "ZAF": true, "SGS": true, "SSD": true, "ESP": true, "LKA": true, + "SDN": true, "SUR": true, "SJM": true, "SWE": true, "CHE": true, + "SYR": true, "TWN": true, "TJK": true, "TZA": true, "THA": true, + "TLS": true, "TGO": true, "TKL": true, "TON": true, "TTO": true, + "TUN": true, "TUR": true, "TKM": true, "TCA": true, "TUV": true, + "UGA": true, "UKR": true, "ARE": true, "GBR": true, "UMI": true, + "USA": true, "URY": true, "UZB": true, "VUT": true, "VEN": true, + "VNM": true, "VGB": true, "VIR": true, "WLF": true, "ESH": true, + "YEM": true, "ZMB": true, "ZWE": true, "ALA": true, +} +var iso3166_1_alpha_numeric = map[int]bool{ + // see: https://www.iso.org/iso-3166-country-codes.html + 4: true, 8: true, 12: true, 16: true, 20: true, + 24: true, 660: true, 10: true, 28: true, 32: true, + 51: true, 533: true, 36: true, 40: true, 31: true, + 44: true, 48: true, 50: true, 52: true, 112: true, + 56: true, 84: true, 204: true, 60: true, 64: true, + 68: true, 535: true, 70: true, 72: true, 74: true, + 76: true, 86: true, 96: true, 100: true, 854: true, + 108: true, 132: true, 116: true, 120: true, 124: true, + 136: true, 140: true, 148: true, 152: true, 156: true, + 162: true, 166: true, 170: true, 174: true, 180: true, + 178: true, 184: true, 188: true, 191: true, 192: true, + 531: true, 196: true, 203: true, 384: true, 208: true, + 262: true, 212: true, 214: true, 218: true, 818: true, + 222: true, 226: true, 232: true, 233: true, 748: true, + 231: true, 238: true, 234: true, 242: true, 246: true, + 250: true, 254: true, 258: true, 260: true, 266: true, + 270: true, 268: true, 276: true, 288: true, 292: true, + 300: true, 304: true, 308: true, 312: true, 316: true, + 320: true, 831: true, 324: true, 624: true, 328: true, + 332: true, 334: true, 336: true, 340: true, 344: true, + 348: true, 352: true, 356: true, 360: true, 364: true, + 368: true, 372: true, 833: true, 376: true, 380: true, + 388: true, 392: true, 832: true, 400: true, 398: true, + 404: true, 296: true, 408: true, 410: true, 414: true, + 417: true, 418: true, 428: true, 422: true, 426: true, + 430: true, 434: true, 438: true, 440: true, 442: true, + 446: true, 450: true, 454: true, 458: true, 462: true, + 466: true, 470: true, 584: true, 474: true, 478: true, + 480: true, 175: true, 484: true, 583: true, 498: true, + 492: true, 496: true, 499: true, 500: true, 504: true, + 508: true, 104: true, 516: true, 520: true, 524: true, + 528: true, 540: true, 554: true, 558: true, 562: true, + 566: true, 570: true, 574: true, 807: true, 580: true, + 578: true, 512: true, 586: true, 585: true, 275: true, + 591: true, 598: true, 600: true, 604: true, 608: true, + 612: true, 616: true, 620: true, 630: true, 634: true, + 642: true, 643: true, 646: true, 638: true, 652: true, + 654: true, 659: true, 662: true, 663: true, 666: true, + 670: true, 882: true, 674: true, 678: true, 682: true, + 686: true, 688: true, 690: true, 694: true, 702: true, + 534: true, 703: true, 705: true, 90: true, 706: true, + 710: true, 239: true, 728: true, 724: true, 144: true, + 729: true, 740: true, 744: true, 752: true, 756: true, + 760: true, 158: true, 762: true, 834: true, 764: true, + 626: true, 768: true, 772: true, 776: true, 780: true, + 788: true, 792: true, 795: true, 796: true, 798: true, + 800: true, 804: true, 784: true, 826: true, 581: true, + 840: true, 858: true, 860: true, 548: true, 862: true, + 704: true, 92: true, 850: true, 876: true, 732: true, + 887: true, 894: true, 716: true, 248: true, +} + +var iso3166_2 = map[string]bool{ + "AD-02" : true, "AD-03" : true, "AD-04" : true, "AD-05" : true, "AD-06" : true, + "AD-07" : true, "AD-08" : true, "AE-AJ" : true, "AE-AZ" : true, "AE-DU" : true, + "AE-FU" : true, "AE-RK" : true, "AE-SH" : true, "AE-UQ" : true, "AF-BAL" : true, + "AF-BAM" : true, "AF-BDG" : true, "AF-BDS" : true, "AF-BGL" : true, "AF-DAY" : true, + "AF-FRA" : true, "AF-FYB" : true, "AF-GHA" : true, "AF-GHO" : true, "AF-HEL" : true, + "AF-HER" : true, "AF-JOW" : true, "AF-KAB" : true, "AF-KAN" : true, "AF-KAP" : true, + "AF-KDZ" : true, "AF-KHO" : true, "AF-KNR" : true, "AF-LAG" : true, "AF-LOG" : true, + "AF-NAN" : true, "AF-NIM" : true, "AF-NUR" : true, "AF-PAN" : true, "AF-PAR" : true, + "AF-PIA" : true, "AF-PKA" : true, "AF-SAM" : true, "AF-SAR" : true, "AF-TAK" : true, + "AF-URU" : true, "AF-WAR" : true, "AF-ZAB" : true, "AG-03" : true, "AG-04" : true, + "AG-05" : true, "AG-06" : true, "AG-07" : true, "AG-08" : true, "AG-10" : true, + "AG-11" : true, "AL-01" : true, "AL-02" : true, "AL-03" : true, "AL-04" : true, + "AL-05" : true, "AL-06" : true, "AL-07" : true, "AL-08" : true, "AL-09" : true, + "AL-10" : true, "AL-11" : true, "AL-12" : true, "AL-BR" : true, "AL-BU" : true, + "AL-DI" : true, "AL-DL" : true, "AL-DR" : true, "AL-DV" : true, "AL-EL" : true, + "AL-ER" : true, "AL-FR" : true, "AL-GJ" : true, "AL-GR" : true, "AL-HA" : true, + "AL-KA" : true, "AL-KB" : true, "AL-KC" : true, "AL-KO" : true, "AL-KR" : true, + "AL-KU" : true, "AL-LB" : true, "AL-LE" : true, "AL-LU" : true, "AL-MK" : true, + "AL-MM" : true, "AL-MR" : true, "AL-MT" : true, "AL-PG" : true, "AL-PQ" : true, + "AL-PR" : true, "AL-PU" : true, "AL-SH" : true, "AL-SK" : true, "AL-SR" : true, + "AL-TE" : true, "AL-TP" : true, "AL-TR" : true, "AL-VL" : true, "AM-AG" : true, + "AM-AR" : true, "AM-AV" : true, "AM-ER" : true, "AM-GR" : true, "AM-KT" : true, + "AM-LO" : true, "AM-SH" : true, "AM-SU" : true, "AM-TV" : true, "AM-VD" : true, + "AO-BGO" : true, "AO-BGU" : true, "AO-BIE" : true, "AO-CAB" : true, "AO-CCU" : true, + "AO-CNN" : true, "AO-CNO" : true, "AO-CUS" : true, "AO-HUA" : true, "AO-HUI" : true, + "AO-LNO" : true, "AO-LSU" : true, "AO-LUA" : true, "AO-MAL" : true, "AO-MOX" : true, + "AO-NAM" : true, "AO-UIG" : true, "AO-ZAI" : true, "AR-A" : true, "AR-B" : true, + "AR-C" : true, "AR-D" : true, "AR-E" : true, "AR-G" : true, "AR-H" : true, + "AR-J" : true, "AR-K" : true, "AR-L" : true, "AR-M" : true, "AR-N" : true, + "AR-P" : true, "AR-Q" : true, "AR-R" : true, "AR-S" : true, "AR-T" : true, + "AR-U" : true, "AR-V" : true, "AR-W" : true, "AR-X" : true, "AR-Y" : true, + "AR-Z" : true, "AT-1" : true, "AT-2" : true, "AT-3" : true, "AT-4" : true, + "AT-5" : true, "AT-6" : true, "AT-7" : true, "AT-8" : true, "AT-9" : true, + "AU-ACT" : true, "AU-NSW" : true, "AU-NT" : true, "AU-QLD" : true, "AU-SA" : true, + "AU-TAS" : true, "AU-VIC" : true, "AU-WA" : true, "AZ-ABS" : true, "AZ-AGA" : true, + "AZ-AGC" : true, "AZ-AGM" : true, "AZ-AGS" : true, "AZ-AGU" : true, "AZ-AST" : true, + "AZ-BA" : true, "AZ-BAB" : true, "AZ-BAL" : true, "AZ-BAR" : true, "AZ-BEY" : true, + "AZ-BIL" : true, "AZ-CAB" : true, "AZ-CAL" : true, "AZ-CUL" : true, "AZ-DAS" : true, + "AZ-FUZ" : true, "AZ-GA" : true, "AZ-GAD" : true, "AZ-GOR" : true, "AZ-GOY" : true, + "AZ-GYG" : true, "AZ-HAC" : true, "AZ-IMI" : true, "AZ-ISM" : true, "AZ-KAL" : true, + "AZ-KAN" : true, "AZ-KUR" : true, "AZ-LA" : true, "AZ-LAC" : true, "AZ-LAN" : true, + "AZ-LER" : true, "AZ-MAS" : true, "AZ-MI" : true, "AZ-NA" : true, "AZ-NEF" : true, + "AZ-NV" : true, "AZ-NX" : true, "AZ-OGU" : true, "AZ-ORD" : true, "AZ-QAB" : true, + "AZ-QAX" : true, "AZ-QAZ" : true, "AZ-QBA" : true, "AZ-QBI" : true, "AZ-QOB" : true, + "AZ-QUS" : true, "AZ-SA" : true, "AZ-SAB" : true, "AZ-SAD" : true, "AZ-SAH" : true, + "AZ-SAK" : true, "AZ-SAL" : true, "AZ-SAR" : true, "AZ-SAT" : true, "AZ-SBN" : true, + "AZ-SIY" : true, "AZ-SKR" : true, "AZ-SM" : true, "AZ-SMI" : true, "AZ-SMX" : true, + "AZ-SR" : true, "AZ-SUS" : true, "AZ-TAR" : true, "AZ-TOV" : true, "AZ-UCA" : true, + "AZ-XA" : true, "AZ-XAC" : true, "AZ-XCI" : true, "AZ-XIZ" : true, "AZ-XVD" : true, + "AZ-YAR" : true, "AZ-YE" : true, "AZ-YEV" : true, "AZ-ZAN" : true, "AZ-ZAQ" : true, + "AZ-ZAR" : true, "BA-01" : true, "BA-02" : true, "BA-03" : true, "BA-04" : true, + "BA-05" : true, "BA-06" : true, "BA-07" : true, "BA-08" : true, "BA-09" : true, + "BA-10" : true, "BA-BIH" : true, "BA-BRC" : true, "BA-SRP" : true, "BB-01" : true, + "BB-02" : true, "BB-03" : true, "BB-04" : true, "BB-05" : true, "BB-06" : true, + "BB-07" : true, "BB-08" : true, "BB-09" : true, "BB-10" : true, "BB-11" : true, + "BD-01" : true, "BD-02" : true, "BD-03" : true, "BD-04" : true, "BD-05" : true, + "BD-06" : true, "BD-07" : true, "BD-08" : true, "BD-09" : true, "BD-10" : true, + "BD-11" : true, "BD-12" : true, "BD-13" : true, "BD-14" : true, "BD-15" : true, + "BD-16" : true, "BD-17" : true, "BD-18" : true, "BD-19" : true, "BD-20" : true, + "BD-21" : true, "BD-22" : true, "BD-23" : true, "BD-24" : true, "BD-25" : true, + "BD-26" : true, "BD-27" : true, "BD-28" : true, "BD-29" : true, "BD-30" : true, + "BD-31" : true, "BD-32" : true, "BD-33" : true, "BD-34" : true, "BD-35" : true, + "BD-36" : true, "BD-37" : true, "BD-38" : true, "BD-39" : true, "BD-40" : true, + "BD-41" : true, "BD-42" : true, "BD-43" : true, "BD-44" : true, "BD-45" : true, + "BD-46" : true, "BD-47" : true, "BD-48" : true, "BD-49" : true, "BD-50" : true, + "BD-51" : true, "BD-52" : true, "BD-53" : true, "BD-54" : true, "BD-55" : true, + "BD-56" : true, "BD-57" : true, "BD-58" : true, "BD-59" : true, "BD-60" : true, + "BD-61" : true, "BD-62" : true, "BD-63" : true, "BD-64" : true, "BD-A" : true, + "BD-B" : true, "BD-C" : true, "BD-D" : true, "BD-E" : true, "BD-F" : true, + "BD-G" : true, "BE-BRU" : true, "BE-VAN" : true, "BE-VBR" : true, "BE-VLG" : true, + "BE-VLI" : true, "BE-VOV" : true, "BE-VWV" : true, "BE-WAL" : true, "BE-WBR" : true, + "BE-WHT" : true, "BE-WLG" : true, "BE-WLX" : true, "BE-WNA" : true, "BF-01" : true, + "BF-02" : true, "BF-03" : true, "BF-04" : true, "BF-05" : true, "BF-06" : true, + "BF-07" : true, "BF-08" : true, "BF-09" : true, "BF-10" : true, "BF-11" : true, + "BF-12" : true, "BF-13" : true, "BF-BAL" : true, "BF-BAM" : true, "BF-BAN" : true, + "BF-BAZ" : true, "BF-BGR" : true, "BF-BLG" : true, "BF-BLK" : true, "BF-COM" : true, + "BF-GAN" : true, "BF-GNA" : true, "BF-GOU" : true, "BF-HOU" : true, "BF-IOB" : true, + "BF-KAD" : true, "BF-KEN" : true, "BF-KMD" : true, "BF-KMP" : true, "BF-KOP" : true, + "BF-KOS" : true, "BF-KOT" : true, "BF-KOW" : true, "BF-LER" : true, "BF-LOR" : true, + "BF-MOU" : true, "BF-NAM" : true, "BF-NAO" : true, "BF-NAY" : true, "BF-NOU" : true, + "BF-OUB" : true, "BF-OUD" : true, "BF-PAS" : true, "BF-PON" : true, "BF-SEN" : true, + "BF-SIS" : true, "BF-SMT" : true, "BF-SNG" : true, "BF-SOM" : true, "BF-SOR" : true, + "BF-TAP" : true, "BF-TUI" : true, "BF-YAG" : true, "BF-YAT" : true, "BF-ZIR" : true, + "BF-ZON" : true, "BF-ZOU" : true, "BG-01" : true, "BG-02" : true, "BG-03" : true, + "BG-04" : true, "BG-05" : true, "BG-06" : true, "BG-07" : true, "BG-08" : true, + "BG-09" : true, "BG-10" : true, "BG-11" : true, "BG-12" : true, "BG-13" : true, + "BG-14" : true, "BG-15" : true, "BG-16" : true, "BG-17" : true, "BG-18" : true, + "BG-19" : true, "BG-20" : true, "BG-21" : true, "BG-22" : true, "BG-23" : true, + "BG-24" : true, "BG-25" : true, "BG-26" : true, "BG-27" : true, "BG-28" : true, + "BH-13" : true, "BH-14" : true, "BH-15" : true, "BH-16" : true, "BH-17" : true, + "BI-BB" : true, "BI-BL" : true, "BI-BM" : true, "BI-BR" : true, "BI-CA" : true, + "BI-CI" : true, "BI-GI" : true, "BI-KI" : true, "BI-KR" : true, "BI-KY" : true, + "BI-MA" : true, "BI-MU" : true, "BI-MW" : true, "BI-NG" : true, "BI-RT" : true, + "BI-RY" : true, "BJ-AK" : true, "BJ-AL" : true, "BJ-AQ" : true, "BJ-BO" : true, + "BJ-CO" : true, "BJ-DO" : true, "BJ-KO" : true, "BJ-LI" : true, "BJ-MO" : true, + "BJ-OU" : true, "BJ-PL" : true, "BJ-ZO" : true, "BN-BE" : true, "BN-BM" : true, + "BN-TE" : true, "BN-TU" : true, "BO-B" : true, "BO-C" : true, "BO-H" : true, + "BO-L" : true, "BO-N" : true, "BO-O" : true, "BO-P" : true, "BO-S" : true, + "BO-T" : true, "BQ-BO" : true, "BQ-SA" : true, "BQ-SE" : true, "BR-AC" : true, + "BR-AL" : true, "BR-AM" : true, "BR-AP" : true, "BR-BA" : true, "BR-CE" : true, + "BR-DF" : true, "BR-ES" : true, "BR-FN" : true, "BR-GO" : true, "BR-MA" : true, + "BR-MG" : true, "BR-MS" : true, "BR-MT" : true, "BR-PA" : true, "BR-PB" : true, + "BR-PE" : true, "BR-PI" : true, "BR-PR" : true, "BR-RJ" : true, "BR-RN" : true, + "BR-RO" : true, "BR-RR" : true, "BR-RS" : true, "BR-SC" : true, "BR-SE" : true, + "BR-SP" : true, "BR-TO" : true, "BS-AK" : true, "BS-BI" : true, "BS-BP" : true, + "BS-BY" : true, "BS-CE" : true, "BS-CI" : true, "BS-CK" : true, "BS-CO" : true, + "BS-CS" : true, "BS-EG" : true, "BS-EX" : true, "BS-FP" : true, "BS-GC" : true, + "BS-HI" : true, "BS-HT" : true, "BS-IN" : true, "BS-LI" : true, "BS-MC" : true, + "BS-MG" : true, "BS-MI" : true, "BS-NE" : true, "BS-NO" : true, "BS-NS" : true, + "BS-RC" : true, "BS-RI" : true, "BS-SA" : true, "BS-SE" : true, "BS-SO" : true, + "BS-SS" : true, "BS-SW" : true, "BS-WG" : true, "BT-11" : true, "BT-12" : true, + "BT-13" : true, "BT-14" : true, "BT-15" : true, "BT-21" : true, "BT-22" : true, + "BT-23" : true, "BT-24" : true, "BT-31" : true, "BT-32" : true, "BT-33" : true, + "BT-34" : true, "BT-41" : true, "BT-42" : true, "BT-43" : true, "BT-44" : true, + "BT-45" : true, "BT-GA" : true, "BT-TY" : true, "BW-CE" : true, "BW-GH" : true, + "BW-KG" : true, "BW-KL" : true, "BW-KW" : true, "BW-NE" : true, "BW-NW" : true, + "BW-SE" : true, "BW-SO" : true, "BY-BR" : true, "BY-HM" : true, "BY-HO" : true, + "BY-HR" : true, "BY-MA" : true, "BY-MI" : true, "BY-VI" : true, "BZ-BZ" : true, + "BZ-CY" : true, "BZ-CZL" : true, "BZ-OW" : true, "BZ-SC" : true, "BZ-TOL" : true, + "CA-AB" : true, "CA-BC" : true, "CA-MB" : true, "CA-NB" : true, "CA-NL" : true, + "CA-NS" : true, "CA-NT" : true, "CA-NU" : true, "CA-ON" : true, "CA-PE" : true, + "CA-QC" : true, "CA-SK" : true, "CA-YT" : true, "CD-BC" : true, "CD-BN" : true, + "CD-EQ" : true, "CD-KA" : true, "CD-KE" : true, "CD-KN" : true, "CD-KW" : true, + "CD-MA" : true, "CD-NK" : true, "CD-OR" : true, "CD-SK" : true, "CF-AC" : true, + "CF-BB" : true, "CF-BGF" : true, "CF-BK" : true, "CF-HK" : true, "CF-HM" : true, + "CF-HS" : true, "CF-KB" : true, "CF-KG" : true, "CF-LB" : true, "CF-MB" : true, + "CF-MP" : true, "CF-NM" : true, "CF-OP" : true, "CF-SE" : true, "CF-UK" : true, + "CF-VK" : true, "CG-11" : true, "CG-12" : true, "CG-13" : true, "CG-14" : true, + "CG-15" : true, "CG-2" : true, "CG-5" : true, "CG-7" : true, "CG-8" : true, + "CG-9" : true, "CG-BZV" : true, "CH-AG" : true, "CH-AI" : true, "CH-AR" : true, + "CH-BE" : true, "CH-BL" : true, "CH-BS" : true, "CH-FR" : true, "CH-GE" : true, + "CH-GL" : true, "CH-GR" : true, "CH-JU" : true, "CH-LU" : true, "CH-NE" : true, + "CH-NW" : true, "CH-OW" : true, "CH-SG" : true, "CH-SH" : true, "CH-SO" : true, + "CH-SZ" : true, "CH-TG" : true, "CH-TI" : true, "CH-UR" : true, "CH-VD" : true, + "CH-VS" : true, "CH-ZG" : true, "CH-ZH" : true, "CI-01" : true, "CI-02" : true, + "CI-03" : true, "CI-04" : true, "CI-05" : true, "CI-06" : true, "CI-07" : true, + "CI-08" : true, "CI-09" : true, "CI-10" : true, "CI-11" : true, "CI-12" : true, + "CI-13" : true, "CI-14" : true, "CI-15" : true, "CI-16" : true, "CI-17" : true, + "CI-18" : true, "CI-19" : true, "CL-AI" : true, "CL-AN" : true, "CL-AP" : true, + "CL-AR" : true, "CL-AT" : true, "CL-BI" : true, "CL-CO" : true, "CL-LI" : true, + "CL-LL" : true, "CL-LR" : true, "CL-MA" : true, "CL-ML" : true, "CL-RM" : true, + "CL-TA" : true, "CL-VS" : true, "CM-AD" : true, "CM-CE" : true, "CM-EN" : true, + "CM-ES" : true, "CM-LT" : true, "CM-NO" : true, "CM-NW" : true, "CM-OU" : true, + "CM-SU" : true, "CM-SW" : true, "CN-11" : true, "CN-12" : true, "CN-13" : true, + "CN-14" : true, "CN-15" : true, "CN-21" : true, "CN-22" : true, "CN-23" : true, + "CN-31" : true, "CN-32" : true, "CN-33" : true, "CN-34" : true, "CN-35" : true, + "CN-36" : true, "CN-37" : true, "CN-41" : true, "CN-42" : true, "CN-43" : true, + "CN-44" : true, "CN-45" : true, "CN-46" : true, "CN-50" : true, "CN-51" : true, + "CN-52" : true, "CN-53" : true, "CN-54" : true, "CN-61" : true, "CN-62" : true, + "CN-63" : true, "CN-64" : true, "CN-65" : true, "CN-71" : true, "CN-91" : true, + "CN-92" : true, "CO-AMA" : true, "CO-ANT" : true, "CO-ARA" : true, "CO-ATL" : true, + "CO-BOL" : true, "CO-BOY" : true, "CO-CAL" : true, "CO-CAQ" : true, "CO-CAS" : true, + "CO-CAU" : true, "CO-CES" : true, "CO-CHO" : true, "CO-COR" : true, "CO-CUN" : true, + "CO-DC" : true, "CO-GUA" : true, "CO-GUV" : true, "CO-HUI" : true, "CO-LAG" : true, + "CO-MAG" : true, "CO-MET" : true, "CO-NAR" : true, "CO-NSA" : true, "CO-PUT" : true, + "CO-QUI" : true, "CO-RIS" : true, "CO-SAN" : true, "CO-SAP" : true, "CO-SUC" : true, + "CO-TOL" : true, "CO-VAC" : true, "CO-VAU" : true, "CO-VID" : true, "CR-A" : true, + "CR-C" : true, "CR-G" : true, "CR-H" : true, "CR-L" : true, "CR-P" : true, + "CR-SJ" : true, "CU-01" : true, "CU-02" : true, "CU-03" : true, "CU-04" : true, + "CU-05" : true, "CU-06" : true, "CU-07" : true, "CU-08" : true, "CU-09" : true, + "CU-10" : true, "CU-11" : true, "CU-12" : true, "CU-13" : true, "CU-14" : true, + "CU-99" : true, "CV-B" : true, "CV-BR" : true, "CV-BV" : true, "CV-CA" : true, + "CV-CF" : true, "CV-CR" : true, "CV-MA" : true, "CV-MO" : true, "CV-PA" : true, + "CV-PN" : true, "CV-PR" : true, "CV-RB" : true, "CV-RG" : true, "CV-RS" : true, + "CV-S" : true, "CV-SD" : true, "CV-SF" : true, "CV-SL" : true, "CV-SM" : true, + "CV-SO" : true, "CV-SS" : true, "CV-SV" : true, "CV-TA" : true, "CV-TS" : true, + "CY-01" : true, "CY-02" : true, "CY-03" : true, "CY-04" : true, "CY-05" : true, + "CY-06" : true, "CZ-10" : true, "CZ-101" : true, "CZ-102" : true, "CZ-103" : true, + "CZ-104" : true, "CZ-105" : true, "CZ-106" : true, "CZ-107" : true, "CZ-108" : true, + "CZ-109" : true, "CZ-110" : true, "CZ-111" : true, "CZ-112" : true, "CZ-113" : true, + "CZ-114" : true, "CZ-115" : true, "CZ-116" : true, "CZ-117" : true, "CZ-118" : true, + "CZ-119" : true, "CZ-120" : true, "CZ-121" : true, "CZ-122" : true, "CZ-20" : true, + "CZ-201" : true, "CZ-202" : true, "CZ-203" : true, "CZ-204" : true, "CZ-205" : true, + "CZ-206" : true, "CZ-207" : true, "CZ-208" : true, "CZ-209" : true, "CZ-20A" : true, + "CZ-20B" : true, "CZ-20C" : true, "CZ-31" : true, "CZ-311" : true, "CZ-312" : true, + "CZ-313" : true, "CZ-314" : true, "CZ-315" : true, "CZ-316" : true, "CZ-317" : true, + "CZ-32" : true, "CZ-321" : true, "CZ-322" : true, "CZ-323" : true, "CZ-324" : true, + "CZ-325" : true, "CZ-326" : true, "CZ-327" : true, "CZ-41" : true, "CZ-411" : true, + "CZ-412" : true, "CZ-413" : true, "CZ-42" : true, "CZ-421" : true, "CZ-422" : true, + "CZ-423" : true, "CZ-424" : true, "CZ-425" : true, "CZ-426" : true, "CZ-427" : true, + "CZ-51" : true, "CZ-511" : true, "CZ-512" : true, "CZ-513" : true, "CZ-514" : true, + "CZ-52" : true, "CZ-521" : true, "CZ-522" : true, "CZ-523" : true, "CZ-524" : true, + "CZ-525" : true, "CZ-53" : true, "CZ-531" : true, "CZ-532" : true, "CZ-533" : true, + "CZ-534" : true, "CZ-63" : true, "CZ-631" : true, "CZ-632" : true, "CZ-633" : true, + "CZ-634" : true, "CZ-635" : true, "CZ-64" : true, "CZ-641" : true, "CZ-642" : true, + "CZ-643" : true, "CZ-644" : true, "CZ-645" : true, "CZ-646" : true, "CZ-647" : true, + "CZ-71" : true, "CZ-711" : true, "CZ-712" : true, "CZ-713" : true, "CZ-714" : true, + "CZ-715" : true, "CZ-72" : true, "CZ-721" : true, "CZ-722" : true, "CZ-723" : true, + "CZ-724" : true, "CZ-80" : true, "CZ-801" : true, "CZ-802" : true, "CZ-803" : true, + "CZ-804" : true, "CZ-805" : true, "CZ-806" : true, "DE-BB" : true, "DE-BE" : true, + "DE-BW" : true, "DE-BY" : true, "DE-HB" : true, "DE-HE" : true, "DE-HH" : true, + "DE-MV" : true, "DE-NI" : true, "DE-NW" : true, "DE-RP" : true, "DE-SH" : true, + "DE-SL" : true, "DE-SN" : true, "DE-ST" : true, "DE-TH" : true, "DJ-AR" : true, + "DJ-AS" : true, "DJ-DI" : true, "DJ-DJ" : true, "DJ-OB" : true, "DJ-TA" : true, + "DK-81" : true, "DK-82" : true, "DK-83" : true, "DK-84" : true, "DK-85" : true, + "DM-01" : true, "DM-02" : true, "DM-03" : true, "DM-04" : true, "DM-05" : true, + "DM-06" : true, "DM-07" : true, "DM-08" : true, "DM-09" : true, "DM-10" : true, + "DO-01" : true, "DO-02" : true, "DO-03" : true, "DO-04" : true, "DO-05" : true, + "DO-06" : true, "DO-07" : true, "DO-08" : true, "DO-09" : true, "DO-10" : true, + "DO-11" : true, "DO-12" : true, "DO-13" : true, "DO-14" : true, "DO-15" : true, + "DO-16" : true, "DO-17" : true, "DO-18" : true, "DO-19" : true, "DO-20" : true, + "DO-21" : true, "DO-22" : true, "DO-23" : true, "DO-24" : true, "DO-25" : true, + "DO-26" : true, "DO-27" : true, "DO-28" : true, "DO-29" : true, "DO-30" : true, + "DZ-01" : true, "DZ-02" : true, "DZ-03" : true, "DZ-04" : true, "DZ-05" : true, + "DZ-06" : true, "DZ-07" : true, "DZ-08" : true, "DZ-09" : true, "DZ-10" : true, + "DZ-11" : true, "DZ-12" : true, "DZ-13" : true, "DZ-14" : true, "DZ-15" : true, + "DZ-16" : true, "DZ-17" : true, "DZ-18" : true, "DZ-19" : true, "DZ-20" : true, + "DZ-21" : true, "DZ-22" : true, "DZ-23" : true, "DZ-24" : true, "DZ-25" : true, + "DZ-26" : true, "DZ-27" : true, "DZ-28" : true, "DZ-29" : true, "DZ-30" : true, + "DZ-31" : true, "DZ-32" : true, "DZ-33" : true, "DZ-34" : true, "DZ-35" : true, + "DZ-36" : true, "DZ-37" : true, "DZ-38" : true, "DZ-39" : true, "DZ-40" : true, + "DZ-41" : true, "DZ-42" : true, "DZ-43" : true, "DZ-44" : true, "DZ-45" : true, + "DZ-46" : true, "DZ-47" : true, "DZ-48" : true, "EC-A" : true, "EC-B" : true, + "EC-C" : true, "EC-D" : true, "EC-E" : true, "EC-F" : true, "EC-G" : true, + "EC-H" : true, "EC-I" : true, "EC-L" : true, "EC-M" : true, "EC-N" : true, + "EC-O" : true, "EC-P" : true, "EC-R" : true, "EC-S" : true, "EC-SD" : true, + "EC-SE" : true, "EC-T" : true, "EC-U" : true, "EC-W" : true, "EC-X" : true, + "EC-Y" : true, "EC-Z" : true, "EE-37" : true, "EE-39" : true, "EE-44" : true, + "EE-49" : true, "EE-51" : true, "EE-57" : true, "EE-59" : true, "EE-65" : true, + "EE-67" : true, "EE-70" : true, "EE-74" : true, "EE-78" : true, "EE-82" : true, + "EE-84" : true, "EE-86" : true, "EG-ALX" : true, "EG-ASN" : true, "EG-AST" : true, + "EG-BA" : true, "EG-BH" : true, "EG-BNS" : true, "EG-C" : true, "EG-DK" : true, + "EG-DT" : true, "EG-FYM" : true, "EG-GH" : true, "EG-GZ" : true, "EG-HU" : true, + "EG-IS" : true, "EG-JS" : true, "EG-KB" : true, "EG-KFS" : true, "EG-KN" : true, + "EG-MN" : true, "EG-MNF" : true, "EG-MT" : true, "EG-PTS" : true, "EG-SHG" : true, + "EG-SHR" : true, "EG-SIN" : true, "EG-SU" : true, "EG-SUZ" : true, "EG-WAD" : true, + "ER-AN" : true, "ER-DK" : true, "ER-DU" : true, "ER-GB" : true, "ER-MA" : true, + "ER-SK" : true, "ES-A" : true, "ES-AB" : true, "ES-AL" : true, "ES-AN" : true, + "ES-AR" : true, "ES-AS" : true, "ES-AV" : true, "ES-B" : true, "ES-BA" : true, + "ES-BI" : true, "ES-BU" : true, "ES-C" : true, "ES-CA" : true, "ES-CB" : true, + "ES-CC" : true, "ES-CE" : true, "ES-CL" : true, "ES-CM" : true, "ES-CN" : true, + "ES-CO" : true, "ES-CR" : true, "ES-CS" : true, "ES-CT" : true, "ES-CU" : true, + "ES-EX" : true, "ES-GA" : true, "ES-GC" : true, "ES-GI" : true, "ES-GR" : true, + "ES-GU" : true, "ES-H" : true, "ES-HU" : true, "ES-IB" : true, "ES-J" : true, + "ES-L" : true, "ES-LE" : true, "ES-LO" : true, "ES-LU" : true, "ES-M" : true, + "ES-MA" : true, "ES-MC" : true, "ES-MD" : true, "ES-ML" : true, "ES-MU" : true, + "ES-NA" : true, "ES-NC" : true, "ES-O" : true, "ES-OR" : true, "ES-P" : true, + "ES-PM" : true, "ES-PO" : true, "ES-PV" : true, "ES-RI" : true, "ES-S" : true, + "ES-SA" : true, "ES-SE" : true, "ES-SG" : true, "ES-SO" : true, "ES-SS" : true, + "ES-T" : true, "ES-TE" : true, "ES-TF" : true, "ES-TO" : true, "ES-V" : true, + "ES-VA" : true, "ES-VC" : true, "ES-VI" : true, "ES-Z" : true, "ES-ZA" : true, + "ET-AA" : true, "ET-AF" : true, "ET-AM" : true, "ET-BE" : true, "ET-DD" : true, + "ET-GA" : true, "ET-HA" : true, "ET-OR" : true, "ET-SN" : true, "ET-SO" : true, + "ET-TI" : true, "FI-01" : true, "FI-02" : true, "FI-03" : true, "FI-04" : true, + "FI-05" : true, "FI-06" : true, "FI-07" : true, "FI-08" : true, "FI-09" : true, + "FI-10" : true, "FI-11" : true, "FI-12" : true, "FI-13" : true, "FI-14" : true, + "FI-15" : true, "FI-16" : true, "FI-17" : true, "FI-18" : true, "FI-19" : true, + "FJ-C" : true, "FJ-E" : true, "FJ-N" : true, "FJ-R" : true, "FJ-W" : true, + "FM-KSA" : true, "FM-PNI" : true, "FM-TRK" : true, "FM-YAP" : true, "FR-01" : true, + "FR-02" : true, "FR-03" : true, "FR-04" : true, "FR-05" : true, "FR-06" : true, + "FR-07" : true, "FR-08" : true, "FR-09" : true, "FR-10" : true, "FR-11" : true, + "FR-12" : true, "FR-13" : true, "FR-14" : true, "FR-15" : true, "FR-16" : true, + "FR-17" : true, "FR-18" : true, "FR-19" : true, "FR-21" : true, "FR-22" : true, + "FR-23" : true, "FR-24" : true, "FR-25" : true, "FR-26" : true, "FR-27" : true, + "FR-28" : true, "FR-29" : true, "FR-2A" : true, "FR-2B" : true, "FR-30" : true, + "FR-31" : true, "FR-32" : true, "FR-33" : true, "FR-34" : true, "FR-35" : true, + "FR-36" : true, "FR-37" : true, "FR-38" : true, "FR-39" : true, "FR-40" : true, + "FR-41" : true, "FR-42" : true, "FR-43" : true, "FR-44" : true, "FR-45" : true, + "FR-46" : true, "FR-47" : true, "FR-48" : true, "FR-49" : true, "FR-50" : true, + "FR-51" : true, "FR-52" : true, "FR-53" : true, "FR-54" : true, "FR-55" : true, + "FR-56" : true, "FR-57" : true, "FR-58" : true, "FR-59" : true, "FR-60" : true, + "FR-61" : true, "FR-62" : true, "FR-63" : true, "FR-64" : true, "FR-65" : true, + "FR-66" : true, "FR-67" : true, "FR-68" : true, "FR-69" : true, "FR-70" : true, + "FR-71" : true, "FR-72" : true, "FR-73" : true, "FR-74" : true, "FR-75" : true, + "FR-76" : true, "FR-77" : true, "FR-78" : true, "FR-79" : true, "FR-80" : true, + "FR-81" : true, "FR-82" : true, "FR-83" : true, "FR-84" : true, "FR-85" : true, + "FR-86" : true, "FR-87" : true, "FR-88" : true, "FR-89" : true, "FR-90" : true, + "FR-91" : true, "FR-92" : true, "FR-93" : true, "FR-94" : true, "FR-95" : true, + "FR-ARA" : true, "FR-BFC" : true, "FR-BL" : true, "FR-BRE" : true, "FR-COR" : true, + "FR-CP" : true, "FR-CVL" : true, "FR-GES" : true, "FR-GF" : true, "FR-GP" : true, + "FR-GUA" : true, "FR-HDF" : true, "FR-IDF" : true, "FR-LRE" : true, "FR-MAY" : true, + "FR-MF" : true, "FR-MQ" : true, "FR-NAQ" : true, "FR-NC" : true, "FR-NOR" : true, + "FR-OCC" : true, "FR-PAC" : true, "FR-PDL" : true, "FR-PF" : true, "FR-PM" : true, + "FR-RE" : true, "FR-TF" : true, "FR-WF" : true, "FR-YT" : true, "GA-1" : true, + "GA-2" : true, "GA-3" : true, "GA-4" : true, "GA-5" : true, "GA-6" : true, + "GA-7" : true, "GA-8" : true, "GA-9" : true, "GB-ABC" : true, "GB-ABD" : true, + "GB-ABE" : true, "GB-AGB" : true, "GB-AGY" : true, "GB-AND" : true, "GB-ANN" : true, + "GB-ANS" : true, "GB-BAS" : true, "GB-BBD" : true, "GB-BDF" : true, "GB-BDG" : true, + "GB-BEN" : true, "GB-BEX" : true, "GB-BFS" : true, "GB-BGE" : true, "GB-BGW" : true, + "GB-BIR" : true, "GB-BKM" : true, "GB-BMH" : true, "GB-BNE" : true, "GB-BNH" : true, + "GB-BNS" : true, "GB-BOL" : true, "GB-BPL" : true, "GB-BRC" : true, "GB-BRD" : true, + "GB-BRY" : true, "GB-BST" : true, "GB-BUR" : true, "GB-CAM" : true, "GB-CAY" : true, + "GB-CBF" : true, "GB-CCG" : true, "GB-CGN" : true, "GB-CHE" : true, "GB-CHW" : true, + "GB-CLD" : true, "GB-CLK" : true, "GB-CMA" : true, "GB-CMD" : true, "GB-CMN" : true, + "GB-CON" : true, "GB-COV" : true, "GB-CRF" : true, "GB-CRY" : true, "GB-CWY" : true, + "GB-DAL" : true, "GB-DBY" : true, "GB-DEN" : true, "GB-DER" : true, "GB-DEV" : true, + "GB-DGY" : true, "GB-DNC" : true, "GB-DND" : true, "GB-DOR" : true, "GB-DRS" : true, + "GB-DUD" : true, "GB-DUR" : true, "GB-EAL" : true, "GB-EAW" : true, "GB-EAY" : true, + "GB-EDH" : true, "GB-EDU" : true, "GB-ELN" : true, "GB-ELS" : true, "GB-ENF" : true, + "GB-ENG" : true, "GB-ERW" : true, "GB-ERY" : true, "GB-ESS" : true, "GB-ESX" : true, + "GB-FAL" : true, "GB-FIF" : true, "GB-FLN" : true, "GB-FMO" : true, "GB-GAT" : true, + "GB-GBN" : true, "GB-GLG" : true, "GB-GLS" : true, "GB-GRE" : true, "GB-GWN" : true, + "GB-HAL" : true, "GB-HAM" : true, "GB-HAV" : true, "GB-HCK" : true, "GB-HEF" : true, + "GB-HIL" : true, "GB-HLD" : true, "GB-HMF" : true, "GB-HNS" : true, "GB-HPL" : true, + "GB-HRT" : true, "GB-HRW" : true, "GB-HRY" : true, "GB-IOS" : true, "GB-IOW" : true, + "GB-ISL" : true, "GB-IVC" : true, "GB-KEC" : true, "GB-KEN" : true, "GB-KHL" : true, + "GB-KIR" : true, "GB-KTT" : true, "GB-KWL" : true, "GB-LAN" : true, "GB-LBC" : true, + "GB-LBH" : true, "GB-LCE" : true, "GB-LDS" : true, "GB-LEC" : true, "GB-LEW" : true, + "GB-LIN" : true, "GB-LIV" : true, "GB-LND" : true, "GB-LUT" : true, "GB-MAN" : true, + "GB-MDB" : true, "GB-MDW" : true, "GB-MEA" : true, "GB-MIK" : true, "GD-01" : true, + "GB-MLN" : true, "GB-MON" : true, "GB-MRT" : true, "GB-MRY" : true, "GB-MTY" : true, + "GB-MUL" : true, "GB-NAY" : true, "GB-NBL" : true, "GB-NEL" : true, "GB-NET" : true, + "GB-NFK" : true, "GB-NGM" : true, "GB-NIR" : true, "GB-NLK" : true, "GB-NLN" : true, + "GB-NMD" : true, "GB-NSM" : true, "GB-NTH" : true, "GB-NTL" : true, "GB-NTT" : true, + "GB-NTY" : true, "GB-NWM" : true, "GB-NWP" : true, "GB-NYK" : true, "GB-OLD" : true, + "GB-ORK" : true, "GB-OXF" : true, "GB-PEM" : true, "GB-PKN" : true, "GB-PLY" : true, + "GB-POL" : true, "GB-POR" : true, "GB-POW" : true, "GB-PTE" : true, "GB-RCC" : true, + "GB-RCH" : true, "GB-RCT" : true, "GB-RDB" : true, "GB-RDG" : true, "GB-RFW" : true, + "GB-RIC" : true, "GB-ROT" : true, "GB-RUT" : true, "GB-SAW" : true, "GB-SAY" : true, + "GB-SCB" : true, "GB-SCT" : true, "GB-SFK" : true, "GB-SFT" : true, "GB-SGC" : true, + "GB-SHF" : true, "GB-SHN" : true, "GB-SHR" : true, "GB-SKP" : true, "GB-SLF" : true, + "GB-SLG" : true, "GB-SLK" : true, "GB-SND" : true, "GB-SOL" : true, "GB-SOM" : true, + "GB-SOS" : true, "GB-SRY" : true, "GB-STE" : true, "GB-STG" : true, "GB-STH" : true, + "GB-STN" : true, "GB-STS" : true, "GB-STT" : true, "GB-STY" : true, "GB-SWA" : true, + "GB-SWD" : true, "GB-SWK" : true, "GB-TAM" : true, "GB-TFW" : true, "GB-THR" : true, + "GB-TOB" : true, "GB-TOF" : true, "GB-TRF" : true, "GB-TWH" : true, "GB-UKM" : true, + "GB-VGL" : true, "GB-WAR" : true, "GB-WBK" : true, "GB-WDU" : true, "GB-WFT" : true, + "GB-WGN" : true, "GB-WIL" : true, "GB-WKF" : true, "GB-WLL" : true, "GB-WLN" : true, + "GB-WLS" : true, "GB-WLV" : true, "GB-WND" : true, "GB-WNM" : true, "GB-WOK" : true, + "GB-WOR" : true, "GB-WRL" : true, "GB-WRT" : true, "GB-WRX" : true, "GB-WSM" : true, + "GB-WSX" : true, "GB-YOR" : true, "GB-ZET" : true, "GD-02" : true, "GD-03" : true, + "GD-04" : true, "GD-05" : true, "GD-06" : true, "GD-10" : true, "GE-AB" : true, + "GE-AJ" : true, "GE-GU" : true, "GE-IM" : true, "GE-KA" : true, "GE-KK" : true, + "GE-MM" : true, "GE-RL" : true, "GE-SJ" : true, "GE-SK" : true, "GE-SZ" : true, + "GE-TB" : true, "GH-AA" : true, "GH-AH" : true, "GH-BA" : true, "GH-CP" : true, + "GH-EP" : true, "GH-NP" : true, "GH-TV" : true, "GH-UE" : true, "GH-UW" : true, + "GH-WP" : true, "GL-KU" : true, "GL-QA" : true, "GL-QE" : true, "GL-SM" : true, + "GM-B" : true, "GM-L" : true, "GM-M" : true, "GM-N" : true, "GM-U" : true, + "GM-W" : true, "GN-B" : true, "GN-BE" : true, "GN-BF" : true, "GN-BK" : true, + "GN-C" : true, "GN-CO" : true, "GN-D" : true, "GN-DB" : true, "GN-DI" : true, + "GN-DL" : true, "GN-DU" : true, "GN-F" : true, "GN-FA" : true, "GN-FO" : true, + "GN-FR" : true, "GN-GA" : true, "GN-GU" : true, "GN-K" : true, "GN-KA" : true, + "GN-KB" : true, "GN-KD" : true, "GN-KE" : true, "GN-KN" : true, "GN-KO" : true, + "GN-KS" : true, "GN-L" : true, "GN-LA" : true, "GN-LE" : true, "GN-LO" : true, + "GN-M" : true, "GN-MC" : true, "GN-MD" : true, "GN-ML" : true, "GN-MM" : true, + "GN-N" : true, "GN-NZ" : true, "GN-PI" : true, "GN-SI" : true, "GN-TE" : true, + "GN-TO" : true, "GN-YO" : true, "GQ-AN" : true, "GQ-BN" : true, "GQ-BS" : true, + "GQ-C" : true, "GQ-CS" : true, "GQ-I" : true, "GQ-KN" : true, "GQ-LI" : true, + "GQ-WN" : true, "GR-01" : true, "GR-03" : true, "GR-04" : true, "GR-05" : true, + "GR-06" : true, "GR-07" : true, "GR-11" : true, "GR-12" : true, "GR-13" : true, + "GR-14" : true, "GR-15" : true, "GR-16" : true, "GR-17" : true, "GR-21" : true, + "GR-22" : true, "GR-23" : true, "GR-24" : true, "GR-31" : true, "GR-32" : true, + "GR-33" : true, "GR-34" : true, "GR-41" : true, "GR-42" : true, "GR-43" : true, + "GR-44" : true, "GR-51" : true, "GR-52" : true, "GR-53" : true, "GR-54" : true, + "GR-55" : true, "GR-56" : true, "GR-57" : true, "GR-58" : true, "GR-59" : true, + "GR-61" : true, "GR-62" : true, "GR-63" : true, "GR-64" : true, "GR-69" : true, + "GR-71" : true, "GR-72" : true, "GR-73" : true, "GR-81" : true, "GR-82" : true, + "GR-83" : true, "GR-84" : true, "GR-85" : true, "GR-91" : true, "GR-92" : true, + "GR-93" : true, "GR-94" : true, "GR-A" : true, "GR-A1" : true, "GR-B" : true, + "GR-C" : true, "GR-D" : true, "GR-E" : true, "GR-F" : true, "GR-G" : true, + "GR-H" : true, "GR-I" : true, "GR-J" : true, "GR-K" : true, "GR-L" : true, + "GR-M" : true, "GT-AV" : true, "GT-BV" : true, "GT-CM" : true, "GT-CQ" : true, + "GT-ES" : true, "GT-GU" : true, "GT-HU" : true, "GT-IZ" : true, "GT-JA" : true, + "GT-JU" : true, "GT-PE" : true, "GT-PR" : true, "GT-QC" : true, "GT-QZ" : true, + "GT-RE" : true, "GT-SA" : true, "GT-SM" : true, "GT-SO" : true, "GT-SR" : true, + "GT-SU" : true, "GT-TO" : true, "GT-ZA" : true, "GW-BA" : true, "GW-BL" : true, + "GW-BM" : true, "GW-BS" : true, "GW-CA" : true, "GW-GA" : true, "GW-L" : true, + "GW-N" : true, "GW-OI" : true, "GW-QU" : true, "GW-S" : true, "GW-TO" : true, + "GY-BA" : true, "GY-CU" : true, "GY-DE" : true, "GY-EB" : true, "GY-ES" : true, + "GY-MA" : true, "GY-PM" : true, "GY-PT" : true, "GY-UD" : true, "GY-UT" : true, + "HN-AT" : true, "HN-CH" : true, "HN-CL" : true, "HN-CM" : true, "HN-CP" : true, + "HN-CR" : true, "HN-EP" : true, "HN-FM" : true, "HN-GD" : true, "HN-IB" : true, + "HN-IN" : true, "HN-LE" : true, "HN-LP" : true, "HN-OC" : true, "HN-OL" : true, + "HN-SB" : true, "HN-VA" : true, "HN-YO" : true, "HR-01" : true, "HR-02" : true, + "HR-03" : true, "HR-04" : true, "HR-05" : true, "HR-06" : true, "HR-07" : true, + "HR-08" : true, "HR-09" : true, "HR-10" : true, "HR-11" : true, "HR-12" : true, + "HR-13" : true, "HR-14" : true, "HR-15" : true, "HR-16" : true, "HR-17" : true, + "HR-18" : true, "HR-19" : true, "HR-20" : true, "HR-21" : true, "HT-AR" : true, + "HT-CE" : true, "HT-GA" : true, "HT-ND" : true, "HT-NE" : true, "HT-NO" : true, + "HT-OU" : true, "HT-SD" : true, "HT-SE" : true, "HU-BA" : true, "HU-BC" : true, + "HU-BE" : true, "HU-BK" : true, "HU-BU" : true, "HU-BZ" : true, "HU-CS" : true, + "HU-DE" : true, "HU-DU" : true, "HU-EG" : true, "HU-ER" : true, "HU-FE" : true, + "HU-GS" : true, "HU-GY" : true, "HU-HB" : true, "HU-HE" : true, "HU-HV" : true, + "HU-JN" : true, "HU-KE" : true, "HU-KM" : true, "HU-KV" : true, "HU-MI" : true, + "HU-NK" : true, "HU-NO" : true, "HU-NY" : true, "HU-PE" : true, "HU-PS" : true, + "HU-SD" : true, "HU-SF" : true, "HU-SH" : true, "HU-SK" : true, "HU-SN" : true, + "HU-SO" : true, "HU-SS" : true, "HU-ST" : true, "HU-SZ" : true, "HU-TB" : true, + "HU-TO" : true, "HU-VA" : true, "HU-VE" : true, "HU-VM" : true, "HU-ZA" : true, + "HU-ZE" : true, "ID-AC" : true, "ID-BA" : true, "ID-BB" : true, "ID-BE" : true, + "ID-BT" : true, "ID-GO" : true, "ID-IJ" : true, "ID-JA" : true, "ID-JB" : true, + "ID-JI" : true, "ID-JK" : true, "ID-JT" : true, "ID-JW" : true, "ID-KA" : true, + "ID-KB" : true, "ID-KI" : true, "ID-KR" : true, "ID-KS" : true, "ID-KT" : true, + "ID-LA" : true, "ID-MA" : true, "ID-ML" : true, "ID-MU" : true, "ID-NB" : true, + "ID-NT" : true, "ID-NU" : true, "ID-PA" : true, "ID-PB" : true, "ID-RI" : true, + "ID-SA" : true, "ID-SB" : true, "ID-SG" : true, "ID-SL" : true, "ID-SM" : true, + "ID-SN" : true, "ID-SR" : true, "ID-SS" : true, "ID-ST" : true, "ID-SU" : true, + "ID-YO" : true, "IE-C" : true, "IE-CE" : true, "IE-CN" : true, "IE-CO" : true, + "IE-CW" : true, "IE-D" : true, "IE-DL" : true, "IE-G" : true, "IE-KE" : true, + "IE-KK" : true, "IE-KY" : true, "IE-L" : true, "IE-LD" : true, "IE-LH" : true, + "IE-LK" : true, "IE-LM" : true, "IE-LS" : true, "IE-M" : true, "IE-MH" : true, + "IE-MN" : true, "IE-MO" : true, "IE-OY" : true, "IE-RN" : true, "IE-SO" : true, + "IE-TA" : true, "IE-U" : true, "IE-WD" : true, "IE-WH" : true, "IE-WW" : true, + "IE-WX" : true, "IL-D" : true, "IL-HA" : true, "IL-JM" : true, "IL-M" : true, + "IL-TA" : true, "IL-Z" : true, "IN-AN" : true, "IN-AP" : true, "IN-AR" : true, + "IN-AS" : true, "IN-BR" : true, "IN-CH" : true, "IN-CT" : true, "IN-DD" : true, + "IN-DL" : true, "IN-DN" : true, "IN-GA" : true, "IN-GJ" : true, "IN-HP" : true, + "IN-HR" : true, "IN-JH" : true, "IN-JK" : true, "IN-KA" : true, "IN-KL" : true, + "IN-LD" : true, "IN-MH" : true, "IN-ML" : true, "IN-MN" : true, "IN-MP" : true, + "IN-MZ" : true, "IN-NL" : true, "IN-OR" : true, "IN-PB" : true, "IN-PY" : true, + "IN-RJ" : true, "IN-SK" : true, "IN-TN" : true, "IN-TR" : true, "IN-UP" : true, + "IN-UT" : true, "IN-WB" : true, "IQ-AN" : true, "IQ-AR" : true, "IQ-BA" : true, + "IQ-BB" : true, "IQ-BG" : true, "IQ-DA" : true, "IQ-DI" : true, "IQ-DQ" : true, + "IQ-KA" : true, "IQ-MA" : true, "IQ-MU" : true, "IQ-NA" : true, "IQ-NI" : true, + "IQ-QA" : true, "IQ-SD" : true, "IQ-SW" : true, "IQ-TS" : true, "IQ-WA" : true, + "IR-01" : true, "IR-02" : true, "IR-03" : true, "IR-04" : true, "IR-05" : true, + "IR-06" : true, "IR-07" : true, "IR-08" : true, "IR-10" : true, "IR-11" : true, + "IR-12" : true, "IR-13" : true, "IR-14" : true, "IR-15" : true, "IR-16" : true, + "IR-17" : true, "IR-18" : true, "IR-19" : true, "IR-20" : true, "IR-21" : true, + "IR-22" : true, "IR-23" : true, "IR-24" : true, "IR-25" : true, "IR-26" : true, + "IR-27" : true, "IR-28" : true, "IR-29" : true, "IR-30" : true, "IR-31" : true, + "IS-0" : true, "IS-1" : true, "IS-2" : true, "IS-3" : true, "IS-4" : true, + "IS-5" : true, "IS-6" : true, "IS-7" : true, "IS-8" : true, "IT-21" : true, + "IT-23" : true, "IT-25" : true, "IT-32" : true, "IT-34" : true, "IT-36" : true, + "IT-42" : true, "IT-45" : true, "IT-52" : true, "IT-55" : true, "IT-57" : true, + "IT-62" : true, "IT-65" : true, "IT-67" : true, "IT-72" : true, "IT-75" : true, + "IT-77" : true, "IT-78" : true, "IT-82" : true, "IT-88" : true, "IT-AG" : true, + "IT-AL" : true, "IT-AN" : true, "IT-AO" : true, "IT-AP" : true, "IT-AQ" : true, + "IT-AR" : true, "IT-AT" : true, "IT-AV" : true, "IT-BA" : true, "IT-BG" : true, + "IT-BI" : true, "IT-BL" : true, "IT-BN" : true, "IT-BO" : true, "IT-BR" : true, + "IT-BS" : true, "IT-BT" : true, "IT-BZ" : true, "IT-CA" : true, "IT-CB" : true, + "IT-CE" : true, "IT-CH" : true, "IT-CI" : true, "IT-CL" : true, "IT-CN" : true, + "IT-CO" : true, "IT-CR" : true, "IT-CS" : true, "IT-CT" : true, "IT-CZ" : true, + "IT-EN" : true, "IT-FC" : true, "IT-FE" : true, "IT-FG" : true, "IT-FI" : true, + "IT-FM" : true, "IT-FR" : true, "IT-GE" : true, "IT-GO" : true, "IT-GR" : true, + "IT-IM" : true, "IT-IS" : true, "IT-KR" : true, "IT-LC" : true, "IT-LE" : true, + "IT-LI" : true, "IT-LO" : true, "IT-LT" : true, "IT-LU" : true, "IT-MB" : true, + "IT-MC" : true, "IT-ME" : true, "IT-MI" : true, "IT-MN" : true, "IT-MO" : true, + "IT-MS" : true, "IT-MT" : true, "IT-NA" : true, "IT-NO" : true, "IT-NU" : true, + "IT-OG" : true, "IT-OR" : true, "IT-OT" : true, "IT-PA" : true, "IT-PC" : true, + "IT-PD" : true, "IT-PE" : true, "IT-PG" : true, "IT-PI" : true, "IT-PN" : true, + "IT-PO" : true, "IT-PR" : true, "IT-PT" : true, "IT-PU" : true, "IT-PV" : true, + "IT-PZ" : true, "IT-RA" : true, "IT-RC" : true, "IT-RE" : true, "IT-RG" : true, + "IT-RI" : true, "IT-RM" : true, "IT-RN" : true, "IT-RO" : true, "IT-SA" : true, + "IT-SI" : true, "IT-SO" : true, "IT-SP" : true, "IT-SR" : true, "IT-SS" : true, + "IT-SV" : true, "IT-TA" : true, "IT-TE" : true, "IT-TN" : true, "IT-TO" : true, + "IT-TP" : true, "IT-TR" : true, "IT-TS" : true, "IT-TV" : true, "IT-UD" : true, + "IT-VA" : true, "IT-VB" : true, "IT-VC" : true, "IT-VE" : true, "IT-VI" : true, + "IT-VR" : true, "IT-VS" : true, "IT-VT" : true, "IT-VV" : true, "JM-01" : true, + "JM-02" : true, "JM-03" : true, "JM-04" : true, "JM-05" : true, "JM-06" : true, + "JM-07" : true, "JM-08" : true, "JM-09" : true, "JM-10" : true, "JM-11" : true, + "JM-12" : true, "JM-13" : true, "JM-14" : true, "JO-AJ" : true, "JO-AM" : true, + "JO-AQ" : true, "JO-AT" : true, "JO-AZ" : true, "JO-BA" : true, "JO-IR" : true, + "JO-JA" : true, "JO-KA" : true, "JO-MA" : true, "JO-MD" : true, "JO-MN" : true, + "JP-01" : true, "JP-02" : true, "JP-03" : true, "JP-04" : true, "JP-05" : true, + "JP-06" : true, "JP-07" : true, "JP-08" : true, "JP-09" : true, "JP-10" : true, + "JP-11" : true, "JP-12" : true, "JP-13" : true, "JP-14" : true, "JP-15" : true, + "JP-16" : true, "JP-17" : true, "JP-18" : true, "JP-19" : true, "JP-20" : true, + "JP-21" : true, "JP-22" : true, "JP-23" : true, "JP-24" : true, "JP-25" : true, + "JP-26" : true, "JP-27" : true, "JP-28" : true, "JP-29" : true, "JP-30" : true, + "JP-31" : true, "JP-32" : true, "JP-33" : true, "JP-34" : true, "JP-35" : true, + "JP-36" : true, "JP-37" : true, "JP-38" : true, "JP-39" : true, "JP-40" : true, + "JP-41" : true, "JP-42" : true, "JP-43" : true, "JP-44" : true, "JP-45" : true, + "JP-46" : true, "JP-47" : true, "KE-110" : true, "KE-200" : true, "KE-300" : true, + "KE-400" : true, "KE-500" : true, "KE-700" : true, "KE-800" : true, "KG-B" : true, + "KG-C" : true, "KG-GB" : true, "KG-J" : true, "KG-N" : true, "KG-O" : true, + "KG-T" : true, "KG-Y" : true, "KH-1" : true, "KH-10" : true, "KH-11" : true, + "KH-12" : true, "KH-13" : true, "KH-14" : true, "KH-15" : true, "KH-16" : true, + "KH-17" : true, "KH-18" : true, "KH-19" : true, "KH-2" : true, "KH-20" : true, + "KH-21" : true, "KH-22" : true, "KH-23" : true, "KH-24" : true, "KH-3" : true, + "KH-4" : true, "KH-5" : true, "KH-6" : true, "KH-7" : true, "KH-8" : true, + "KH-9" : true, "KI-G" : true, "KI-L" : true, "KI-P" : true, "KM-A" : true, + "KM-G" : true, "KM-M" : true, "KN-01" : true, "KN-02" : true, "KN-03" : true, + "KN-04" : true, "KN-05" : true, "KN-06" : true, "KN-07" : true, "KN-08" : true, + "KN-09" : true, "KN-10" : true, "KN-11" : true, "KN-12" : true, "KN-13" : true, + "KN-15" : true, "KN-K" : true, "KN-N" : true, "KP-01" : true, "KP-02" : true, + "KP-03" : true, "KP-04" : true, "KP-05" : true, "KP-06" : true, "KP-07" : true, + "KP-08" : true, "KP-09" : true, "KP-10" : true, "KP-13" : true, "KR-11" : true, + "KR-26" : true, "KR-27" : true, "KR-28" : true, "KR-29" : true, "KR-30" : true, + "KR-31" : true, "KR-41" : true, "KR-42" : true, "KR-43" : true, "KR-44" : true, + "KR-45" : true, "KR-46" : true, "KR-47" : true, "KR-48" : true, "KR-49" : true, + "KW-AH" : true, "KW-FA" : true, "KW-HA" : true, "KW-JA" : true, "KW-KU" : true, + "KW-MU" : true, "KZ-AKM" : true, "KZ-AKT" : true, "KZ-ALA" : true, "KZ-ALM" : true, + "KZ-AST" : true, "KZ-ATY" : true, "KZ-KAR" : true, "KZ-KUS" : true, "KZ-KZY" : true, + "KZ-MAN" : true, "KZ-PAV" : true, "KZ-SEV" : true, "KZ-VOS" : true, "KZ-YUZ" : true, + "KZ-ZAP" : true, "KZ-ZHA" : true, "LA-AT" : true, "LA-BK" : true, "LA-BL" : true, + "LA-CH" : true, "LA-HO" : true, "LA-KH" : true, "LA-LM" : true, "LA-LP" : true, + "LA-OU" : true, "LA-PH" : true, "LA-SL" : true, "LA-SV" : true, "LA-VI" : true, + "LA-VT" : true, "LA-XA" : true, "LA-XE" : true, "LA-XI" : true, "LA-XS" : true, + "LB-AK" : true, "LB-AS" : true, "LB-BA" : true, "LB-BH" : true, "LB-BI" : true, + "LB-JA" : true, "LB-JL" : true, "LB-NA" : true, "LI-01" : true, "LI-02" : true, + "LI-03" : true, "LI-04" : true, "LI-05" : true, "LI-06" : true, "LI-07" : true, + "LI-08" : true, "LI-09" : true, "LI-10" : true, "LI-11" : true, "LK-1" : true, + "LK-11" : true, "LK-12" : true, "LK-13" : true, "LK-2" : true, "LK-21" : true, + "LK-22" : true, "LK-23" : true, "LK-3" : true, "LK-31" : true, "LK-32" : true, + "LK-33" : true, "LK-4" : true, "LK-41" : true, "LK-42" : true, "LK-43" : true, + "LK-44" : true, "LK-45" : true, "LK-5" : true, "LK-51" : true, "LK-52" : true, + "LK-53" : true, "LK-6" : true, "LK-61" : true, "LK-62" : true, "LK-7" : true, + "LK-71" : true, "LK-72" : true, "LK-8" : true, "LK-81" : true, "LK-82" : true, + "LK-9" : true, "LK-91" : true, "LK-92" : true, "LR-BG" : true, "LR-BM" : true, + "LR-CM" : true, "LR-GB" : true, "LR-GG" : true, "LR-GK" : true, "LR-LO" : true, + "LR-MG" : true, "LR-MO" : true, "LR-MY" : true, "LR-NI" : true, "LR-RI" : true, + "LR-SI" : true, "LS-A" : true, "LS-B" : true, "LS-C" : true, "LS-D" : true, + "LS-E" : true, "LS-F" : true, "LS-G" : true, "LS-H" : true, "LS-J" : true, + "LS-K" : true, "LT-AL" : true, "LT-KL" : true, "LT-KU" : true, "LT-MR" : true, + "LT-PN" : true, "LT-SA" : true, "LT-TA" : true, "LT-TE" : true, "LT-UT" : true, + "LT-VL" : true, "LU-D" : true, "LU-G" : true, "LU-L" : true, "LV-001" : true, + "LV-002" : true, "LV-003" : true, "LV-004" : true, "LV-005" : true, "LV-006" : true, + "LV-007" : true, "LV-008" : true, "LV-009" : true, "LV-010" : true, "LV-011" : true, + "LV-012" : true, "LV-013" : true, "LV-014" : true, "LV-015" : true, "LV-016" : true, + "LV-017" : true, "LV-018" : true, "LV-019" : true, "LV-020" : true, "LV-021" : true, + "LV-022" : true, "LV-023" : true, "LV-024" : true, "LV-025" : true, "LV-026" : true, + "LV-027" : true, "LV-028" : true, "LV-029" : true, "LV-030" : true, "LV-031" : true, + "LV-032" : true, "LV-033" : true, "LV-034" : true, "LV-035" : true, "LV-036" : true, + "LV-037" : true, "LV-038" : true, "LV-039" : true, "LV-040" : true, "LV-041" : true, + "LV-042" : true, "LV-043" : true, "LV-044" : true, "LV-045" : true, "LV-046" : true, + "LV-047" : true, "LV-048" : true, "LV-049" : true, "LV-050" : true, "LV-051" : true, + "LV-052" : true, "LV-053" : true, "LV-054" : true, "LV-055" : true, "LV-056" : true, + "LV-057" : true, "LV-058" : true, "LV-059" : true, "LV-060" : true, "LV-061" : true, + "LV-062" : true, "LV-063" : true, "LV-064" : true, "LV-065" : true, "LV-066" : true, + "LV-067" : true, "LV-068" : true, "LV-069" : true, "LV-070" : true, "LV-071" : true, + "LV-072" : true, "LV-073" : true, "LV-074" : true, "LV-075" : true, "LV-076" : true, + "LV-077" : true, "LV-078" : true, "LV-079" : true, "LV-080" : true, "LV-081" : true, + "LV-082" : true, "LV-083" : true, "LV-084" : true, "LV-085" : true, "LV-086" : true, + "LV-087" : true, "LV-088" : true, "LV-089" : true, "LV-090" : true, "LV-091" : true, + "LV-092" : true, "LV-093" : true, "LV-094" : true, "LV-095" : true, "LV-096" : true, + "LV-097" : true, "LV-098" : true, "LV-099" : true, "LV-100" : true, "LV-101" : true, + "LV-102" : true, "LV-103" : true, "LV-104" : true, "LV-105" : true, "LV-106" : true, + "LV-107" : true, "LV-108" : true, "LV-109" : true, "LV-110" : true, "LV-DGV" : true, + "LV-JEL" : true, "LV-JKB" : true, "LV-JUR" : true, "LV-LPX" : true, "LV-REZ" : true, + "LV-RIX" : true, "LV-VEN" : true, "LV-VMR" : true, "LY-BA" : true, "LY-BU" : true, + "LY-DR" : true, "LY-GT" : true, "LY-JA" : true, "LY-JB" : true, "LY-JG" : true, + "LY-JI" : true, "LY-JU" : true, "LY-KF" : true, "LY-MB" : true, "LY-MI" : true, + "LY-MJ" : true, "LY-MQ" : true, "LY-NL" : true, "LY-NQ" : true, "LY-SB" : true, + "LY-SR" : true, "LY-TB" : true, "LY-WA" : true, "LY-WD" : true, "LY-WS" : true, + "LY-ZA" : true, "MA-01" : true, "MA-02" : true, "MA-03" : true, "MA-04" : true, + "MA-05" : true, "MA-06" : true, "MA-07" : true, "MA-08" : true, "MA-09" : true, + "MA-10" : true, "MA-11" : true, "MA-12" : true, "MA-13" : true, "MA-14" : true, + "MA-15" : true, "MA-16" : true, "MA-AGD" : true, "MA-AOU" : true, "MA-ASZ" : true, + "MA-AZI" : true, "MA-BEM" : true, "MA-BER" : true, "MA-BES" : true, "MA-BOD" : true, + "MA-BOM" : true, "MA-CAS" : true, "MA-CHE" : true, "MA-CHI" : true, "MA-CHT" : true, + "MA-ERR" : true, "MA-ESI" : true, "MA-ESM" : true, "MA-FAH" : true, "MA-FES" : true, + "MA-FIG" : true, "MA-GUE" : true, "MA-HAJ" : true, "MA-HAO" : true, "MA-HOC" : true, + "MA-IFR" : true, "MA-INE" : true, "MA-JDI" : true, "MA-JRA" : true, "MA-KEN" : true, + "MA-KES" : true, "MA-KHE" : true, "MA-KHN" : true, "MA-KHO" : true, "MA-LAA" : true, + "MA-LAR" : true, "MA-MED" : true, "MA-MEK" : true, "MA-MMD" : true, "MA-MMN" : true, + "MA-MOH" : true, "MA-MOU" : true, "MA-NAD" : true, "MA-NOU" : true, "MA-OUA" : true, + "MA-OUD" : true, "MA-OUJ" : true, "MA-RAB" : true, "MA-SAF" : true, "MA-SAL" : true, + "MA-SEF" : true, "MA-SET" : true, "MA-SIK" : true, "MA-SKH" : true, "MA-SYB" : true, + "MA-TAI" : true, "MA-TAO" : true, "MA-TAR" : true, "MA-TAT" : true, "MA-TAZ" : true, + "MA-TET" : true, "MA-TIZ" : true, "MA-TNG" : true, "MA-TNT" : true, "MA-ZAG" : true, + "MC-CL" : true, "MC-CO" : true, "MC-FO" : true, "MC-GA" : true, "MC-JE" : true, + "MC-LA" : true, "MC-MA" : true, "MC-MC" : true, "MC-MG" : true, "MC-MO" : true, + "MC-MU" : true, "MC-PH" : true, "MC-SD" : true, "MC-SO" : true, "MC-SP" : true, + "MC-SR" : true, "MC-VR" : true, "MD-AN" : true, "MD-BA" : true, "MD-BD" : true, + "MD-BR" : true, "MD-BS" : true, "MD-CA" : true, "MD-CL" : true, "MD-CM" : true, + "MD-CR" : true, "MD-CS" : true, "MD-CT" : true, "MD-CU" : true, "MD-DO" : true, + "MD-DR" : true, "MD-DU" : true, "MD-ED" : true, "MD-FA" : true, "MD-FL" : true, + "MD-GA" : true, "MD-GL" : true, "MD-HI" : true, "MD-IA" : true, "MD-LE" : true, + "MD-NI" : true, "MD-OC" : true, "MD-OR" : true, "MD-RE" : true, "MD-RI" : true, + "MD-SD" : true, "MD-SI" : true, "MD-SN" : true, "MD-SO" : true, "MD-ST" : true, + "MD-SV" : true, "MD-TA" : true, "MD-TE" : true, "MD-UN" : true, "ME-01" : true, + "ME-02" : true, "ME-03" : true, "ME-04" : true, "ME-05" : true, "ME-06" : true, + "ME-07" : true, "ME-08" : true, "ME-09" : true, "ME-10" : true, "ME-11" : true, + "ME-12" : true, "ME-13" : true, "ME-14" : true, "ME-15" : true, "ME-16" : true, + "ME-17" : true, "ME-18" : true, "ME-19" : true, "ME-20" : true, "ME-21" : true, + "MG-A" : true, "MG-D" : true, "MG-F" : true, "MG-M" : true, "MG-T" : true, + "MG-U" : true, "MH-ALK" : true, "MH-ALL" : true, "MH-ARN" : true, "MH-AUR" : true, + "MH-EBO" : true, "MH-ENI" : true, "MH-JAB" : true, "MH-JAL" : true, "MH-KIL" : true, + "MH-KWA" : true, "MH-L" : true, "MH-LAE" : true, "MH-LIB" : true, "MH-LIK" : true, + "MH-MAJ" : true, "MH-MAL" : true, "MH-MEJ" : true, "MH-MIL" : true, "MH-NMK" : true, + "MH-NMU" : true, "MH-RON" : true, "MH-T" : true, "MH-UJA" : true, "MH-UTI" : true, + "MH-WTJ" : true, "MH-WTN" : true, "MK-01" : true, "MK-02" : true, "MK-03" : true, + "MK-04" : true, "MK-05" : true, "MK-06" : true, "MK-07" : true, "MK-08" : true, + "MK-09" : true, "MK-10" : true, "MK-11" : true, "MK-12" : true, "MK-13" : true, + "MK-14" : true, "MK-15" : true, "MK-16" : true, "MK-17" : true, "MK-18" : true, + "MK-19" : true, "MK-20" : true, "MK-21" : true, "MK-22" : true, "MK-23" : true, + "MK-24" : true, "MK-25" : true, "MK-26" : true, "MK-27" : true, "MK-28" : true, + "MK-29" : true, "MK-30" : true, "MK-31" : true, "MK-32" : true, "MK-33" : true, + "MK-34" : true, "MK-35" : true, "MK-36" : true, "MK-37" : true, "MK-38" : true, + "MK-39" : true, "MK-40" : true, "MK-41" : true, "MK-42" : true, "MK-43" : true, + "MK-44" : true, "MK-45" : true, "MK-46" : true, "MK-47" : true, "MK-48" : true, + "MK-49" : true, "MK-50" : true, "MK-51" : true, "MK-52" : true, "MK-53" : true, + "MK-54" : true, "MK-55" : true, "MK-56" : true, "MK-57" : true, "MK-58" : true, + "MK-59" : true, "MK-60" : true, "MK-61" : true, "MK-62" : true, "MK-63" : true, + "MK-64" : true, "MK-65" : true, "MK-66" : true, "MK-67" : true, "MK-68" : true, + "MK-69" : true, "MK-70" : true, "MK-71" : true, "MK-72" : true, "MK-73" : true, + "MK-74" : true, "MK-75" : true, "MK-76" : true, "MK-77" : true, "MK-78" : true, + "MK-79" : true, "MK-80" : true, "MK-81" : true, "MK-82" : true, "MK-83" : true, + "MK-84" : true, "ML-1" : true, "ML-2" : true, "ML-3" : true, "ML-4" : true, + "ML-5" : true, "ML-6" : true, "ML-7" : true, "ML-8" : true, "ML-BK0" : true, + "MM-01" : true, "MM-02" : true, "MM-03" : true, "MM-04" : true, "MM-05" : true, + "MM-06" : true, "MM-07" : true, "MM-11" : true, "MM-12" : true, "MM-13" : true, + "MM-14" : true, "MM-15" : true, "MM-16" : true, "MM-17" : true, "MN-035" : true, + "MN-037" : true, "MN-039" : true, "MN-041" : true, "MN-043" : true, "MN-046" : true, + "MN-047" : true, "MN-049" : true, "MN-051" : true, "MN-053" : true, "MN-055" : true, + "MN-057" : true, "MN-059" : true, "MN-061" : true, "MN-063" : true, "MN-064" : true, + "MN-065" : true, "MN-067" : true, "MN-069" : true, "MN-071" : true, "MN-073" : true, + "MN-1" : true, "MR-01" : true, "MR-02" : true, "MR-03" : true, "MR-04" : true, + "MR-05" : true, "MR-06" : true, "MR-07" : true, "MR-08" : true, "MR-09" : true, + "MR-10" : true, "MR-11" : true, "MR-12" : true, "MR-NKC" : true, "MT-01" : true, + "MT-02" : true, "MT-03" : true, "MT-04" : true, "MT-05" : true, "MT-06" : true, + "MT-07" : true, "MT-08" : true, "MT-09" : true, "MT-10" : true, "MT-11" : true, + "MT-12" : true, "MT-13" : true, "MT-14" : true, "MT-15" : true, "MT-16" : true, + "MT-17" : true, "MT-18" : true, "MT-19" : true, "MT-20" : true, "MT-21" : true, + "MT-22" : true, "MT-23" : true, "MT-24" : true, "MT-25" : true, "MT-26" : true, + "MT-27" : true, "MT-28" : true, "MT-29" : true, "MT-30" : true, "MT-31" : true, + "MT-32" : true, "MT-33" : true, "MT-34" : true, "MT-35" : true, "MT-36" : true, + "MT-37" : true, "MT-38" : true, "MT-39" : true, "MT-40" : true, "MT-41" : true, + "MT-42" : true, "MT-43" : true, "MT-44" : true, "MT-45" : true, "MT-46" : true, + "MT-47" : true, "MT-48" : true, "MT-49" : true, "MT-50" : true, "MT-51" : true, + "MT-52" : true, "MT-53" : true, "MT-54" : true, "MT-55" : true, "MT-56" : true, + "MT-57" : true, "MT-58" : true, "MT-59" : true, "MT-60" : true, "MT-61" : true, + "MT-62" : true, "MT-63" : true, "MT-64" : true, "MT-65" : true, "MT-66" : true, + "MT-67" : true, "MT-68" : true, "MU-AG" : true, "MU-BL" : true, "MU-BR" : true, + "MU-CC" : true, "MU-CU" : true, "MU-FL" : true, "MU-GP" : true, "MU-MO" : true, + "MU-PA" : true, "MU-PL" : true, "MU-PU" : true, "MU-PW" : true, "MU-QB" : true, + "MU-RO" : true, "MU-RP" : true, "MU-SA" : true, "MU-VP" : true, "MV-00" : true, + "MV-01" : true, "MV-02" : true, "MV-03" : true, "MV-04" : true, "MV-05" : true, + "MV-07" : true, "MV-08" : true, "MV-12" : true, "MV-13" : true, "MV-14" : true, + "MV-17" : true, "MV-20" : true, "MV-23" : true, "MV-24" : true, "MV-25" : true, + "MV-26" : true, "MV-27" : true, "MV-28" : true, "MV-29" : true, "MV-CE" : true, + "MV-MLE" : true, "MV-NC" : true, "MV-NO" : true, "MV-SC" : true, "MV-SU" : true, + "MV-UN" : true, "MV-US" : true, "MW-BA" : true, "MW-BL" : true, "MW-C" : true, + "MW-CK" : true, "MW-CR" : true, "MW-CT" : true, "MW-DE" : true, "MW-DO" : true, + "MW-KR" : true, "MW-KS" : true, "MW-LI" : true, "MW-LK" : true, "MW-MC" : true, + "MW-MG" : true, "MW-MH" : true, "MW-MU" : true, "MW-MW" : true, "MW-MZ" : true, + "MW-N" : true, "MW-NB" : true, "MW-NE" : true, "MW-NI" : true, "MW-NK" : true, + "MW-NS" : true, "MW-NU" : true, "MW-PH" : true, "MW-RU" : true, "MW-S" : true, + "MW-SA" : true, "MW-TH" : true, "MW-ZO" : true, "MX-AGU" : true, "MX-BCN" : true, + "MX-BCS" : true, "MX-CAM" : true, "MX-CHH" : true, "MX-CHP" : true, "MX-COA" : true, + "MX-COL" : true, "MX-DIF" : true, "MX-DUR" : true, "MX-GRO" : true, "MX-GUA" : true, + "MX-HID" : true, "MX-JAL" : true, "MX-MEX" : true, "MX-MIC" : true, "MX-MOR" : true, + "MX-NAY" : true, "MX-NLE" : true, "MX-OAX" : true, "MX-PUE" : true, "MX-QUE" : true, + "MX-ROO" : true, "MX-SIN" : true, "MX-SLP" : true, "MX-SON" : true, "MX-TAB" : true, + "MX-TAM" : true, "MX-TLA" : true, "MX-VER" : true, "MX-YUC" : true, "MX-ZAC" : true, + "MY-01" : true, "MY-02" : true, "MY-03" : true, "MY-04" : true, "MY-05" : true, + "MY-06" : true, "MY-07" : true, "MY-08" : true, "MY-09" : true, "MY-10" : true, + "MY-11" : true, "MY-12" : true, "MY-13" : true, "MY-14" : true, "MY-15" : true, + "MY-16" : true, "MZ-A" : true, "MZ-B" : true, "MZ-G" : true, "MZ-I" : true, + "MZ-L" : true, "MZ-MPM" : true, "MZ-N" : true, "MZ-P" : true, "MZ-Q" : true, + "MZ-S" : true, "MZ-T" : true, "NA-CA" : true, "NA-ER" : true, "NA-HA" : true, + "NA-KA" : true, "NA-KH" : true, "NA-KU" : true, "NA-OD" : true, "NA-OH" : true, + "NA-OK" : true, "NA-ON" : true, "NA-OS" : true, "NA-OT" : true, "NA-OW" : true, + "NE-1" : true, "NE-2" : true, "NE-3" : true, "NE-4" : true, "NE-5" : true, + "NE-6" : true, "NE-7" : true, "NE-8" : true, "NG-AB" : true, "NG-AD" : true, + "NG-AK" : true, "NG-AN" : true, "NG-BA" : true, "NG-BE" : true, "NG-BO" : true, + "NG-BY" : true, "NG-CR" : true, "NG-DE" : true, "NG-EB" : true, "NG-ED" : true, + "NG-EK" : true, "NG-EN" : true, "NG-FC" : true, "NG-GO" : true, "NG-IM" : true, + "NG-JI" : true, "NG-KD" : true, "NG-KE" : true, "NG-KN" : true, "NG-KO" : true, + "NG-KT" : true, "NG-KW" : true, "NG-LA" : true, "NG-NA" : true, "NG-NI" : true, + "NG-OG" : true, "NG-ON" : true, "NG-OS" : true, "NG-OY" : true, "NG-PL" : true, + "NG-RI" : true, "NG-SO" : true, "NG-TA" : true, "NG-YO" : true, "NG-ZA" : true, + "NI-AN" : true, "NI-AS" : true, "NI-BO" : true, "NI-CA" : true, "NI-CI" : true, + "NI-CO" : true, "NI-ES" : true, "NI-GR" : true, "NI-JI" : true, "NI-LE" : true, + "NI-MD" : true, "NI-MN" : true, "NI-MS" : true, "NI-MT" : true, "NI-NS" : true, + "NI-RI" : true, "NI-SJ" : true, "NL-AW" : true, "NL-BQ1" : true, "NL-BQ2" : true, + "NL-BQ3" : true, "NL-CW" : true, "NL-DR" : true, "NL-FL" : true, "NL-FR" : true, + "NL-GE" : true, "NL-GR" : true, "NL-LI" : true, "NL-NB" : true, "NL-NH" : true, + "NL-OV" : true, "NL-SX" : true, "NL-UT" : true, "NL-ZE" : true, "NL-ZH" : true, + "NO-01" : true, "NO-02" : true, "NO-03" : true, "NO-04" : true, "NO-05" : true, + "NO-06" : true, "NO-07" : true, "NO-08" : true, "NO-09" : true, "NO-10" : true, + "NO-11" : true, "NO-12" : true, "NO-14" : true, "NO-15" : true, "NO-16" : true, + "NO-17" : true, "NO-18" : true, "NO-19" : true, "NO-20" : true, "NO-21" : true, + "NO-22" : true, "NP-1" : true, "NP-2" : true, "NP-3" : true, "NP-4" : true, + "NP-5" : true, "NP-BA" : true, "NP-BH" : true, "NP-DH" : true, "NP-GA" : true, + "NP-JA" : true, "NP-KA" : true, "NP-KO" : true, "NP-LU" : true, "NP-MA" : true, + "NP-ME" : true, "NP-NA" : true, "NP-RA" : true, "NP-SA" : true, "NP-SE" : true, + "NR-01" : true, "NR-02" : true, "NR-03" : true, "NR-04" : true, "NR-05" : true, + "NR-06" : true, "NR-07" : true, "NR-08" : true, "NR-09" : true, "NR-10" : true, + "NR-11" : true, "NR-12" : true, "NR-13" : true, "NR-14" : true, "NZ-AUK" : true, + "NZ-BOP" : true, "NZ-CAN" : true, "NZ-CIT" : true, "NZ-GIS" : true, "NZ-HKB" : true, + "NZ-MBH" : true, "NZ-MWT" : true, "NZ-N" : true, "NZ-NSN" : true, "NZ-NTL" : true, + "NZ-OTA" : true, "NZ-S" : true, "NZ-STL" : true, "NZ-TAS" : true, "NZ-TKI" : true, + "NZ-WGN" : true, "NZ-WKO" : true, "NZ-WTC" : true, "OM-BA" : true, "OM-BU" : true, + "OM-DA" : true, "OM-MA" : true, "OM-MU" : true, "OM-SH" : true, "OM-WU" : true, + "OM-ZA" : true, "OM-ZU" : true, "PA-1" : true, "PA-2" : true, "PA-3" : true, + "PA-4" : true, "PA-5" : true, "PA-6" : true, "PA-7" : true, "PA-8" : true, + "PA-9" : true, "PA-EM" : true, "PA-KY" : true, "PA-NB" : true, "PE-AMA" : true, + "PE-ANC" : true, "PE-APU" : true, "PE-ARE" : true, "PE-AYA" : true, "PE-CAJ" : true, + "PE-CAL" : true, "PE-CUS" : true, "PE-HUC" : true, "PE-HUV" : true, "PE-ICA" : true, + "PE-JUN" : true, "PE-LAL" : true, "PE-LAM" : true, "PE-LIM" : true, "PE-LMA" : true, + "PE-LOR" : true, "PE-MDD" : true, "PE-MOQ" : true, "PE-PAS" : true, "PE-PIU" : true, + "PE-PUN" : true, "PE-SAM" : true, "PE-TAC" : true, "PE-TUM" : true, "PE-UCA" : true, + "PG-CPK" : true, "PG-CPM" : true, "PG-EBR" : true, "PG-EHG" : true, "PG-EPW" : true, + "PG-ESW" : true, "PG-GPK" : true, "PG-MBA" : true, "PG-MPL" : true, "PG-MPM" : true, + "PG-MRL" : true, "PG-NCD" : true, "PG-NIK" : true, "PG-NPP" : true, "PG-NSB" : true, + "PG-SAN" : true, "PG-SHM" : true, "PG-WBK" : true, "PG-WHM" : true, "PG-WPD" : true, + "PH-00" : true, "PH-01" : true, "PH-02" : true, "PH-03" : true, "PH-05" : true, + "PH-06" : true, "PH-07" : true, "PH-08" : true, "PH-09" : true, "PH-10" : true, + "PH-11" : true, "PH-12" : true, "PH-13" : true, "PH-14" : true, "PH-15" : true, + "PH-40" : true, "PH-41" : true, "PH-ABR" : true, "PH-AGN" : true, "PH-AGS" : true, + "PH-AKL" : true, "PH-ALB" : true, "PH-ANT" : true, "PH-APA" : true, "PH-AUR" : true, + "PH-BAN" : true, "PH-BAS" : true, "PH-BEN" : true, "PH-BIL" : true, "PH-BOH" : true, + "PH-BTG" : true, "PH-BTN" : true, "PH-BUK" : true, "PH-BUL" : true, "PH-CAG" : true, + "PH-CAM" : true, "PH-CAN" : true, "PH-CAP" : true, "PH-CAS" : true, "PH-CAT" : true, + "PH-CAV" : true, "PH-CEB" : true, "PH-COM" : true, "PH-DAO" : true, "PH-DAS" : true, + "PH-DAV" : true, "PH-DIN" : true, "PH-EAS" : true, "PH-GUI" : true, "PH-IFU" : true, + "PH-ILI" : true, "PH-ILN" : true, "PH-ILS" : true, "PH-ISA" : true, "PH-KAL" : true, + "PH-LAG" : true, "PH-LAN" : true, "PH-LAS" : true, "PH-LEY" : true, "PH-LUN" : true, + "PH-MAD" : true, "PH-MAG" : true, "PH-MAS" : true, "PH-MDC" : true, "PH-MDR" : true, + "PH-MOU" : true, "PH-MSC" : true, "PH-MSR" : true, "PH-NCO" : true, "PH-NEC" : true, + "PH-NER" : true, "PH-NSA" : true, "PH-NUE" : true, "PH-NUV" : true, "PH-PAM" : true, + "PH-PAN" : true, "PH-PLW" : true, "PH-QUE" : true, "PH-QUI" : true, "PH-RIZ" : true, + "PH-ROM" : true, "PH-SAR" : true, "PH-SCO" : true, "PH-SIG" : true, "PH-SLE" : true, + "PH-SLU" : true, "PH-SOR" : true, "PH-SUK" : true, "PH-SUN" : true, "PH-SUR" : true, + "PH-TAR" : true, "PH-TAW" : true, "PH-WSA" : true, "PH-ZAN" : true, "PH-ZAS" : true, + "PH-ZMB" : true, "PH-ZSI" : true, "PK-BA" : true, "PK-GB" : true, "PK-IS" : true, + "PK-JK" : true, "PK-KP" : true, "PK-PB" : true, "PK-SD" : true, "PK-TA" : true, + "PL-DS" : true, "PL-KP" : true, "PL-LB" : true, "PL-LD" : true, "PL-LU" : true, + "PL-MA" : true, "PL-MZ" : true, "PL-OP" : true, "PL-PD" : true, "PL-PK" : true, + "PL-PM" : true, "PL-SK" : true, "PL-SL" : true, "PL-WN" : true, "PL-WP" : true, + "PL-ZP" : true, "PS-BTH" : true, "PS-DEB" : true, "PS-GZA" : true, "PS-HBN" : true, + "PS-JEM" : true, "PS-JEN" : true, "PS-JRH" : true, "PS-KYS" : true, "PS-NBS" : true, + "PS-NGZ" : true, "PS-QQA" : true, "PS-RBH" : true, "PS-RFH" : true, "PS-SLT" : true, + "PS-TBS" : true, "PS-TKM" : true, "PT-01" : true, "PT-02" : true, "PT-03" : true, + "PT-04" : true, "PT-05" : true, "PT-06" : true, "PT-07" : true, "PT-08" : true, + "PT-09" : true, "PT-10" : true, "PT-11" : true, "PT-12" : true, "PT-13" : true, + "PT-14" : true, "PT-15" : true, "PT-16" : true, "PT-17" : true, "PT-18" : true, + "PT-20" : true, "PT-30" : true, "PW-002" : true, "PW-004" : true, "PW-010" : true, + "PW-050" : true, "PW-100" : true, "PW-150" : true, "PW-212" : true, "PW-214" : true, + "PW-218" : true, "PW-222" : true, "PW-224" : true, "PW-226" : true, "PW-227" : true, + "PW-228" : true, "PW-350" : true, "PW-370" : true, "PY-1" : true, "PY-10" : true, + "PY-11" : true, "PY-12" : true, "PY-13" : true, "PY-14" : true, "PY-15" : true, + "PY-16" : true, "PY-19" : true, "PY-2" : true, "PY-3" : true, "PY-4" : true, + "PY-5" : true, "PY-6" : true, "PY-7" : true, "PY-8" : true, "PY-9" : true, + "PY-ASU" : true, "QA-DA" : true, "QA-KH" : true, "QA-MS" : true, "QA-RA" : true, + "QA-US" : true, "QA-WA" : true, "QA-ZA" : true, "RO-AB" : true, "RO-AG" : true, + "RO-AR" : true, "RO-B" : true, "RO-BC" : true, "RO-BH" : true, "RO-BN" : true, + "RO-BR" : true, "RO-BT" : true, "RO-BV" : true, "RO-BZ" : true, "RO-CJ" : true, + "RO-CL" : true, "RO-CS" : true, "RO-CT" : true, "RO-CV" : true, "RO-DB" : true, + "RO-DJ" : true, "RO-GJ" : true, "RO-GL" : true, "RO-GR" : true, "RO-HD" : true, + "RO-HR" : true, "RO-IF" : true, "RO-IL" : true, "RO-IS" : true, "RO-MH" : true, + "RO-MM" : true, "RO-MS" : true, "RO-NT" : true, "RO-OT" : true, "RO-PH" : true, + "RO-SB" : true, "RO-SJ" : true, "RO-SM" : true, "RO-SV" : true, "RO-TL" : true, + "RO-TM" : true, "RO-TR" : true, "RO-VL" : true, "RO-VN" : true, "RO-VS" : true, + "RS-00" : true, "RS-01" : true, "RS-02" : true, "RS-03" : true, "RS-04" : true, + "RS-05" : true, "RS-06" : true, "RS-07" : true, "RS-08" : true, "RS-09" : true, + "RS-10" : true, "RS-11" : true, "RS-12" : true, "RS-13" : true, "RS-14" : true, + "RS-15" : true, "RS-16" : true, "RS-17" : true, "RS-18" : true, "RS-19" : true, + "RS-20" : true, "RS-21" : true, "RS-22" : true, "RS-23" : true, "RS-24" : true, + "RS-25" : true, "RS-26" : true, "RS-27" : true, "RS-28" : true, "RS-29" : true, + "RS-KM" : true, "RS-VO" : true, "RU-AD" : true, "RU-AL" : true, "RU-ALT" : true, + "RU-AMU" : true, "RU-ARK" : true, "RU-AST" : true, "RU-BA" : true, "RU-BEL" : true, + "RU-BRY" : true, "RU-BU" : true, "RU-CE" : true, "RU-CHE" : true, "RU-CHU" : true, + "RU-CU" : true, "RU-DA" : true, "RU-IN" : true, "RU-IRK" : true, "RU-IVA" : true, + "RU-KAM" : true, "RU-KB" : true, "RU-KC" : true, "RU-KDA" : true, "RU-KEM" : true, + "RU-KGD" : true, "RU-KGN" : true, "RU-KHA" : true, "RU-KHM" : true, "RU-KIR" : true, + "RU-KK" : true, "RU-KL" : true, "RU-KLU" : true, "RU-KO" : true, "RU-KOS" : true, + "RU-KR" : true, "RU-KRS" : true, "RU-KYA" : true, "RU-LEN" : true, "RU-LIP" : true, + "RU-MAG" : true, "RU-ME" : true, "RU-MO" : true, "RU-MOS" : true, "RU-MOW" : true, + "RU-MUR" : true, "RU-NEN" : true, "RU-NGR" : true, "RU-NIZ" : true, "RU-NVS" : true, + "RU-OMS" : true, "RU-ORE" : true, "RU-ORL" : true, "RU-PER" : true, "RU-PNZ" : true, + "RU-PRI" : true, "RU-PSK" : true, "RU-ROS" : true, "RU-RYA" : true, "RU-SA" : true, + "RU-SAK" : true, "RU-SAM" : true, "RU-SAR" : true, "RU-SE" : true, "RU-SMO" : true, + "RU-SPE" : true, "RU-STA" : true, "RU-SVE" : true, "RU-TA" : true, "RU-TAM" : true, + "RU-TOM" : true, "RU-TUL" : true, "RU-TVE" : true, "RU-TY" : true, "RU-TYU" : true, + "RU-UD" : true, "RU-ULY" : true, "RU-VGG" : true, "RU-VLA" : true, "RU-VLG" : true, + "RU-VOR" : true, "RU-YAN" : true, "RU-YAR" : true, "RU-YEV" : true, "RU-ZAB" : true, + "RW-01" : true, "RW-02" : true, "RW-03" : true, "RW-04" : true, "RW-05" : true, + "SA-01" : true, "SA-02" : true, "SA-03" : true, "SA-04" : true, "SA-05" : true, + "SA-06" : true, "SA-07" : true, "SA-08" : true, "SA-09" : true, "SA-10" : true, + "SA-11" : true, "SA-12" : true, "SA-14" : true, "SB-CE" : true, "SB-CH" : true, + "SB-CT" : true, "SB-GU" : true, "SB-IS" : true, "SB-MK" : true, "SB-ML" : true, + "SB-RB" : true, "SB-TE" : true, "SB-WE" : true, "SC-01" : true, "SC-02" : true, + "SC-03" : true, "SC-04" : true, "SC-05" : true, "SC-06" : true, "SC-07" : true, + "SC-08" : true, "SC-09" : true, "SC-10" : true, "SC-11" : true, "SC-12" : true, + "SC-13" : true, "SC-14" : true, "SC-15" : true, "SC-16" : true, "SC-17" : true, + "SC-18" : true, "SC-19" : true, "SC-20" : true, "SC-21" : true, "SC-22" : true, + "SC-23" : true, "SC-24" : true, "SC-25" : true, "SD-DC" : true, "SD-DE" : true, + "SD-DN" : true, "SD-DS" : true, "SD-DW" : true, "SD-GD" : true, "SD-GZ" : true, + "SD-KA" : true, "SD-KH" : true, "SD-KN" : true, "SD-KS" : true, "SD-NB" : true, + "SD-NO" : true, "SD-NR" : true, "SD-NW" : true, "SD-RS" : true, "SD-SI" : true, + "SE-AB" : true, "SE-AC" : true, "SE-BD" : true, "SE-C" : true, "SE-D" : true, + "SE-E" : true, "SE-F" : true, "SE-G" : true, "SE-H" : true, "SE-I" : true, + "SE-K" : true, "SE-M" : true, "SE-N" : true, "SE-O" : true, "SE-S" : true, + "SE-T" : true, "SE-U" : true, "SE-W" : true, "SE-X" : true, "SE-Y" : true, + "SE-Z" : true, "SG-01" : true, "SG-02" : true, "SG-03" : true, "SG-04" : true, + "SG-05" : true, "SH-AC" : true, "SH-HL" : true, "SH-TA" : true, "SI-001" : true, + "SI-002" : true, "SI-003" : true, "SI-004" : true, "SI-005" : true, "SI-006" : true, + "SI-007" : true, "SI-008" : true, "SI-009" : true, "SI-010" : true, "SI-011" : true, + "SI-012" : true, "SI-013" : true, "SI-014" : true, "SI-015" : true, "SI-016" : true, + "SI-017" : true, "SI-018" : true, "SI-019" : true, "SI-020" : true, "SI-021" : true, + "SI-022" : true, "SI-023" : true, "SI-024" : true, "SI-025" : true, "SI-026" : true, + "SI-027" : true, "SI-028" : true, "SI-029" : true, "SI-030" : true, "SI-031" : true, + "SI-032" : true, "SI-033" : true, "SI-034" : true, "SI-035" : true, "SI-036" : true, + "SI-037" : true, "SI-038" : true, "SI-039" : true, "SI-040" : true, "SI-041" : true, + "SI-042" : true, "SI-043" : true, "SI-044" : true, "SI-045" : true, "SI-046" : true, + "SI-047" : true, "SI-048" : true, "SI-049" : true, "SI-050" : true, "SI-051" : true, + "SI-052" : true, "SI-053" : true, "SI-054" : true, "SI-055" : true, "SI-056" : true, + "SI-057" : true, "SI-058" : true, "SI-059" : true, "SI-060" : true, "SI-061" : true, + "SI-062" : true, "SI-063" : true, "SI-064" : true, "SI-065" : true, "SI-066" : true, + "SI-067" : true, "SI-068" : true, "SI-069" : true, "SI-070" : true, "SI-071" : true, + "SI-072" : true, "SI-073" : true, "SI-074" : true, "SI-075" : true, "SI-076" : true, + "SI-077" : true, "SI-078" : true, "SI-079" : true, "SI-080" : true, "SI-081" : true, + "SI-082" : true, "SI-083" : true, "SI-084" : true, "SI-085" : true, "SI-086" : true, + "SI-087" : true, "SI-088" : true, "SI-089" : true, "SI-090" : true, "SI-091" : true, + "SI-092" : true, "SI-093" : true, "SI-094" : true, "SI-095" : true, "SI-096" : true, + "SI-097" : true, "SI-098" : true, "SI-099" : true, "SI-100" : true, "SI-101" : true, + "SI-102" : true, "SI-103" : true, "SI-104" : true, "SI-105" : true, "SI-106" : true, + "SI-107" : true, "SI-108" : true, "SI-109" : true, "SI-110" : true, "SI-111" : true, + "SI-112" : true, "SI-113" : true, "SI-114" : true, "SI-115" : true, "SI-116" : true, + "SI-117" : true, "SI-118" : true, "SI-119" : true, "SI-120" : true, "SI-121" : true, + "SI-122" : true, "SI-123" : true, "SI-124" : true, "SI-125" : true, "SI-126" : true, + "SI-127" : true, "SI-128" : true, "SI-129" : true, "SI-130" : true, "SI-131" : true, + "SI-132" : true, "SI-133" : true, "SI-134" : true, "SI-135" : true, "SI-136" : true, + "SI-137" : true, "SI-138" : true, "SI-139" : true, "SI-140" : true, "SI-141" : true, + "SI-142" : true, "SI-143" : true, "SI-144" : true, "SI-146" : true, "SI-147" : true, + "SI-148" : true, "SI-149" : true, "SI-150" : true, "SI-151" : true, "SI-152" : true, + "SI-153" : true, "SI-154" : true, "SI-155" : true, "SI-156" : true, "SI-157" : true, + "SI-158" : true, "SI-159" : true, "SI-160" : true, "SI-161" : true, "SI-162" : true, + "SI-163" : true, "SI-164" : true, "SI-165" : true, "SI-166" : true, "SI-167" : true, + "SI-168" : true, "SI-169" : true, "SI-170" : true, "SI-171" : true, "SI-172" : true, + "SI-173" : true, "SI-174" : true, "SI-175" : true, "SI-176" : true, "SI-177" : true, + "SI-178" : true, "SI-179" : true, "SI-180" : true, "SI-181" : true, "SI-182" : true, + "SI-183" : true, "SI-184" : true, "SI-185" : true, "SI-186" : true, "SI-187" : true, + "SI-188" : true, "SI-189" : true, "SI-190" : true, "SI-191" : true, "SI-192" : true, + "SI-193" : true, "SI-194" : true, "SI-195" : true, "SI-196" : true, "SI-197" : true, + "SI-198" : true, "SI-199" : true, "SI-200" : true, "SI-201" : true, "SI-202" : true, + "SI-203" : true, "SI-204" : true, "SI-205" : true, "SI-206" : true, "SI-207" : true, + "SI-208" : true, "SI-209" : true, "SI-210" : true, "SI-211" : true, "SK-BC" : true, + "SK-BL" : true, "SK-KI" : true, "SK-NI" : true, "SK-PV" : true, "SK-TA" : true, + "SK-TC" : true, "SK-ZI" : true, "SL-E" : true, "SL-N" : true, "SL-S" : true, + "SL-W" : true, "SM-01" : true, "SM-02" : true, "SM-03" : true, "SM-04" : true, + "SM-05" : true, "SM-06" : true, "SM-07" : true, "SM-08" : true, "SM-09" : true, + "SN-DB" : true, "SN-DK" : true, "SN-FK" : true, "SN-KA" : true, "SN-KD" : true, + "SN-KE" : true, "SN-KL" : true, "SN-LG" : true, "SN-MT" : true, "SN-SE" : true, + "SN-SL" : true, "SN-TC" : true, "SN-TH" : true, "SN-ZG" : true, "SO-AW" : true, + "SO-BK" : true, "SO-BN" : true, "SO-BR" : true, "SO-BY" : true, "SO-GA" : true, + "SO-GE" : true, "SO-HI" : true, "SO-JD" : true, "SO-JH" : true, "SO-MU" : true, + "SO-NU" : true, "SO-SA" : true, "SO-SD" : true, "SO-SH" : true, "SO-SO" : true, + "SO-TO" : true, "SO-WO" : true, "SR-BR" : true, "SR-CM" : true, "SR-CR" : true, + "SR-MA" : true, "SR-NI" : true, "SR-PM" : true, "SR-PR" : true, "SR-SA" : true, + "SR-SI" : true, "SR-WA" : true, "SS-BN" : true, "SS-BW" : true, "SS-EC" : true, + "SS-EE8" : true, "SS-EW" : true, "SS-JG" : true, "SS-LK" : true, "SS-NU" : true, + "SS-UY" : true, "SS-WR" : true, "ST-P" : true, "ST-S" : true, "SV-AH" : true, + "SV-CA" : true, "SV-CH" : true, "SV-CU" : true, "SV-LI" : true, "SV-MO" : true, + "SV-PA" : true, "SV-SA" : true, "SV-SM" : true, "SV-SO" : true, "SV-SS" : true, + "SV-SV" : true, "SV-UN" : true, "SV-US" : true, "SY-DI" : true, "SY-DR" : true, + "SY-DY" : true, "SY-HA" : true, "SY-HI" : true, "SY-HL" : true, "SY-HM" : true, + "SY-ID" : true, "SY-LA" : true, "SY-QU" : true, "SY-RA" : true, "SY-RD" : true, + "SY-SU" : true, "SY-TA" : true, "SZ-HH" : true, "SZ-LU" : true, "SZ-MA" : true, + "SZ-SH" : true, "TD-BA" : true, "TD-BG" : true, "TD-BO" : true, "TD-CB" : true, + "TD-EN" : true, "TD-GR" : true, "TD-HL" : true, "TD-KA" : true, "TD-LC" : true, + "TD-LO" : true, "TD-LR" : true, "TD-MA" : true, "TD-MC" : true, "TD-ME" : true, + "TD-MO" : true, "TD-ND" : true, "TD-OD" : true, "TD-SA" : true, "TD-SI" : true, + "TD-TA" : true, "TD-TI" : true, "TD-WF" : true, "TG-C" : true, "TG-K" : true, + "TG-M" : true, "TG-P" : true, "TG-S" : true, "TH-10" : true, "TH-11" : true, + "TH-12" : true, "TH-13" : true, "TH-14" : true, "TH-15" : true, "TH-16" : true, + "TH-17" : true, "TH-18" : true, "TH-19" : true, "TH-20" : true, "TH-21" : true, + "TH-22" : true, "TH-23" : true, "TH-24" : true, "TH-25" : true, "TH-26" : true, + "TH-27" : true, "TH-30" : true, "TH-31" : true, "TH-32" : true, "TH-33" : true, + "TH-34" : true, "TH-35" : true, "TH-36" : true, "TH-37" : true, "TH-39" : true, + "TH-40" : true, "TH-41" : true, "TH-42" : true, "TH-43" : true, "TH-44" : true, + "TH-45" : true, "TH-46" : true, "TH-47" : true, "TH-48" : true, "TH-49" : true, + "TH-50" : true, "TH-51" : true, "TH-52" : true, "TH-53" : true, "TH-54" : true, + "TH-55" : true, "TH-56" : true, "TH-57" : true, "TH-58" : true, "TH-60" : true, + "TH-61" : true, "TH-62" : true, "TH-63" : true, "TH-64" : true, "TH-65" : true, + "TH-66" : true, "TH-67" : true, "TH-70" : true, "TH-71" : true, "TH-72" : true, + "TH-73" : true, "TH-74" : true, "TH-75" : true, "TH-76" : true, "TH-77" : true, + "TH-80" : true, "TH-81" : true, "TH-82" : true, "TH-83" : true, "TH-84" : true, + "TH-85" : true, "TH-86" : true, "TH-90" : true, "TH-91" : true, "TH-92" : true, + "TH-93" : true, "TH-94" : true, "TH-95" : true, "TH-96" : true, "TH-S" : true, + "TJ-GB" : true, "TJ-KT" : true, "TJ-SU" : true, "TL-AL" : true, "TL-AN" : true, + "TL-BA" : true, "TL-BO" : true, "TL-CO" : true, "TL-DI" : true, "TL-ER" : true, + "TL-LA" : true, "TL-LI" : true, "TL-MF" : true, "TL-MT" : true, "TL-OE" : true, + "TL-VI" : true, "TM-A" : true, "TM-B" : true, "TM-D" : true, "TM-L" : true, + "TM-M" : true, "TM-S" : true, "TN-11" : true, "TN-12" : true, "TN-13" : true, + "TN-14" : true, "TN-21" : true, "TN-22" : true, "TN-23" : true, "TN-31" : true, + "TN-32" : true, "TN-33" : true, "TN-34" : true, "TN-41" : true, "TN-42" : true, + "TN-43" : true, "TN-51" : true, "TN-52" : true, "TN-53" : true, "TN-61" : true, + "TN-71" : true, "TN-72" : true, "TN-73" : true, "TN-81" : true, "TN-82" : true, + "TN-83" : true, "TO-01" : true, "TO-02" : true, "TO-03" : true, "TO-04" : true, + "TO-05" : true, "TR-01" : true, "TR-02" : true, "TR-03" : true, "TR-04" : true, + "TR-05" : true, "TR-06" : true, "TR-07" : true, "TR-08" : true, "TR-09" : true, + "TR-10" : true, "TR-11" : true, "TR-12" : true, "TR-13" : true, "TR-14" : true, + "TR-15" : true, "TR-16" : true, "TR-17" : true, "TR-18" : true, "TR-19" : true, + "TR-20" : true, "TR-21" : true, "TR-22" : true, "TR-23" : true, "TR-24" : true, + "TR-25" : true, "TR-26" : true, "TR-27" : true, "TR-28" : true, "TR-29" : true, + "TR-30" : true, "TR-31" : true, "TR-32" : true, "TR-33" : true, "TR-34" : true, + "TR-35" : true, "TR-36" : true, "TR-37" : true, "TR-38" : true, "TR-39" : true, + "TR-40" : true, "TR-41" : true, "TR-42" : true, "TR-43" : true, "TR-44" : true, + "TR-45" : true, "TR-46" : true, "TR-47" : true, "TR-48" : true, "TR-49" : true, + "TR-50" : true, "TR-51" : true, "TR-52" : true, "TR-53" : true, "TR-54" : true, + "TR-55" : true, "TR-56" : true, "TR-57" : true, "TR-58" : true, "TR-59" : true, + "TR-60" : true, "TR-61" : true, "TR-62" : true, "TR-63" : true, "TR-64" : true, + "TR-65" : true, "TR-66" : true, "TR-67" : true, "TR-68" : true, "TR-69" : true, + "TR-70" : true, "TR-71" : true, "TR-72" : true, "TR-73" : true, "TR-74" : true, + "TR-75" : true, "TR-76" : true, "TR-77" : true, "TR-78" : true, "TR-79" : true, + "TR-80" : true, "TR-81" : true, "TT-ARI" : true, "TT-CHA" : true, "TT-CTT" : true, + "TT-DMN" : true, "TT-ETO" : true, "TT-PED" : true, "TT-POS" : true, "TT-PRT" : true, + "TT-PTF" : true, "TT-RCM" : true, "TT-SFO" : true, "TT-SGE" : true, "TT-SIP" : true, + "TT-SJL" : true, "TT-TUP" : true, "TT-WTO" : true, "TV-FUN" : true, "TV-NIT" : true, + "TV-NKF" : true, "TV-NKL" : true, "TV-NMA" : true, "TV-NMG" : true, "TV-NUI" : true, + "TV-VAI" : true, "TW-CHA" : true, "TW-CYI" : true, "TW-CYQ" : true, "TW-HSQ" : true, + "TW-HSZ" : true, "TW-HUA" : true, "TW-ILA" : true, "TW-KEE" : true, "TW-KHH" : true, + "TW-KHQ" : true, "TW-MIA" : true, "TW-NAN" : true, "TW-PEN" : true, "TW-PIF" : true, + "TW-TAO" : true, "TW-TNN" : true, "TW-TNQ" : true, "TW-TPE" : true, "TW-TPQ" : true, + "TW-TTT" : true, "TW-TXG" : true, "TW-TXQ" : true, "TW-YUN" : true, "TZ-01" : true, + "TZ-02" : true, "TZ-03" : true, "TZ-04" : true, "TZ-05" : true, "TZ-06" : true, + "TZ-07" : true, "TZ-08" : true, "TZ-09" : true, "TZ-10" : true, "TZ-11" : true, + "TZ-12" : true, "TZ-13" : true, "TZ-14" : true, "TZ-15" : true, "TZ-16" : true, + "TZ-17" : true, "TZ-18" : true, "TZ-19" : true, "TZ-20" : true, "TZ-21" : true, + "TZ-22" : true, "TZ-23" : true, "TZ-24" : true, "TZ-25" : true, "TZ-26" : true, + "UA-05" : true, "UA-07" : true, "UA-09" : true, "UA-12" : true, "UA-14" : true, + "UA-18" : true, "UA-21" : true, "UA-23" : true, "UA-26" : true, "UA-30" : true, + "UA-32" : true, "UA-35" : true, "UA-40" : true, "UA-43" : true, "UA-46" : true, + "UA-48" : true, "UA-51" : true, "UA-53" : true, "UA-56" : true, "UA-59" : true, + "UA-61" : true, "UA-63" : true, "UA-65" : true, "UA-68" : true, "UA-71" : true, + "UA-74" : true, "UA-77" : true, "UG-101" : true, "UG-102" : true, "UG-103" : true, + "UG-104" : true, "UG-105" : true, "UG-106" : true, "UG-107" : true, "UG-108" : true, + "UG-109" : true, "UG-110" : true, "UG-111" : true, "UG-112" : true, "UG-113" : true, + "UG-114" : true, "UG-115" : true, "UG-116" : true, "UG-201" : true, "UG-202" : true, + "UG-203" : true, "UG-204" : true, "UG-205" : true, "UG-206" : true, "UG-207" : true, + "UG-208" : true, "UG-209" : true, "UG-210" : true, "UG-211" : true, "UG-212" : true, + "UG-213" : true, "UG-214" : true, "UG-215" : true, "UG-216" : true, "UG-217" : true, + "UG-218" : true, "UG-219" : true, "UG-220" : true, "UG-221" : true, "UG-222" : true, + "UG-223" : true, "UG-224" : true, "UG-301" : true, "UG-302" : true, "UG-303" : true, + "UG-304" : true, "UG-305" : true, "UG-306" : true, "UG-307" : true, "UG-308" : true, + "UG-309" : true, "UG-310" : true, "UG-311" : true, "UG-312" : true, "UG-313" : true, + "UG-314" : true, "UG-315" : true, "UG-316" : true, "UG-317" : true, "UG-318" : true, + "UG-319" : true, "UG-320" : true, "UG-321" : true, "UG-401" : true, "UG-402" : true, + "UG-403" : true, "UG-404" : true, "UG-405" : true, "UG-406" : true, "UG-407" : true, + "UG-408" : true, "UG-409" : true, "UG-410" : true, "UG-411" : true, "UG-412" : true, + "UG-413" : true, "UG-414" : true, "UG-415" : true, "UG-416" : true, "UG-417" : true, + "UG-418" : true, "UG-419" : true, "UG-C" : true, "UG-E" : true, "UG-N" : true, + "UG-W" : true, "UM-67" : true, "UM-71" : true, "UM-76" : true, "UM-79" : true, + "UM-81" : true, "UM-84" : true, "UM-86" : true, "UM-89" : true, "UM-95" : true, + "US-AK" : true, "US-AL" : true, "US-AR" : true, "US-AS" : true, "US-AZ" : true, + "US-CA" : true, "US-CO" : true, "US-CT" : true, "US-DC" : true, "US-DE" : true, + "US-FL" : true, "US-GA" : true, "US-GU" : true, "US-HI" : true, "US-IA" : true, + "US-ID" : true, "US-IL" : true, "US-IN" : true, "US-KS" : true, "US-KY" : true, + "US-LA" : true, "US-MA" : true, "US-MD" : true, "US-ME" : true, "US-MI" : true, + "US-MN" : true, "US-MO" : true, "US-MP" : true, "US-MS" : true, "US-MT" : true, + "US-NC" : true, "US-ND" : true, "US-NE" : true, "US-NH" : true, "US-NJ" : true, + "US-NM" : true, "US-NV" : true, "US-NY" : true, "US-OH" : true, "US-OK" : true, + "US-OR" : true, "US-PA" : true, "US-PR" : true, "US-RI" : true, "US-SC" : true, + "US-SD" : true, "US-TN" : true, "US-TX" : true, "US-UM" : true, "US-UT" : true, + "US-VA" : true, "US-VI" : true, "US-VT" : true, "US-WA" : true, "US-WI" : true, + "US-WV" : true, "US-WY" : true, "UY-AR" : true, "UY-CA" : true, "UY-CL" : true, + "UY-CO" : true, "UY-DU" : true, "UY-FD" : true, "UY-FS" : true, "UY-LA" : true, + "UY-MA" : true, "UY-MO" : true, "UY-PA" : true, "UY-RN" : true, "UY-RO" : true, + "UY-RV" : true, "UY-SA" : true, "UY-SJ" : true, "UY-SO" : true, "UY-TA" : true, + "UY-TT" : true, "UZ-AN" : true, "UZ-BU" : true, "UZ-FA" : true, "UZ-JI" : true, + "UZ-NG" : true, "UZ-NW" : true, "UZ-QA" : true, "UZ-QR" : true, "UZ-SA" : true, + "UZ-SI" : true, "UZ-SU" : true, "UZ-TK" : true, "UZ-TO" : true, "UZ-XO" : true, + "VC-01" : true, "VC-02" : true, "VC-03" : true, "VC-04" : true, "VC-05" : true, + "VC-06" : true, "VE-A" : true, "VE-B" : true, "VE-C" : true, "VE-D" : true, + "VE-E" : true, "VE-F" : true, "VE-G" : true, "VE-H" : true, "VE-I" : true, + "VE-J" : true, "VE-K" : true, "VE-L" : true, "VE-M" : true, "VE-N" : true, + "VE-O" : true, "VE-P" : true, "VE-R" : true, "VE-S" : true, "VE-T" : true, + "VE-U" : true, "VE-V" : true, "VE-W" : true, "VE-X" : true, "VE-Y" : true, + "VE-Z" : true, "VN-01" : true, "VN-02" : true, "VN-03" : true, "VN-04" : true, + "VN-05" : true, "VN-06" : true, "VN-07" : true, "VN-09" : true, "VN-13" : true, + "VN-14" : true, "VN-15" : true, "VN-18" : true, "VN-20" : true, "VN-21" : true, + "VN-22" : true, "VN-23" : true, "VN-24" : true, "VN-25" : true, "VN-26" : true, + "VN-27" : true, "VN-28" : true, "VN-29" : true, "VN-30" : true, "VN-31" : true, + "VN-32" : true, "VN-33" : true, "VN-34" : true, "VN-35" : true, "VN-36" : true, + "VN-37" : true, "VN-39" : true, "VN-40" : true, "VN-41" : true, "VN-43" : true, + "VN-44" : true, "VN-45" : true, "VN-46" : true, "VN-47" : true, "VN-49" : true, + "VN-50" : true, "VN-51" : true, "VN-52" : true, "VN-53" : true, "VN-54" : true, + "VN-55" : true, "VN-56" : true, "VN-57" : true, "VN-58" : true, "VN-59" : true, + "VN-61" : true, "VN-63" : true, "VN-66" : true, "VN-67" : true, "VN-68" : true, + "VN-69" : true, "VN-70" : true, "VN-71" : true, "VN-72" : true, "VN-73" : true, + "VN-CT" : true, "VN-DN" : true, "VN-HN" : true, "VN-HP" : true, "VN-SG" : true, + "VU-MAP" : true, "VU-PAM" : true, "VU-SAM" : true, "VU-SEE" : true, "VU-TAE" : true, + "VU-TOB" : true, "WS-AA" : true, "WS-AL" : true, "WS-AT" : true, "WS-FA" : true, + "WS-GE" : true, "WS-GI" : true, "WS-PA" : true, "WS-SA" : true, "WS-TU" : true, + "WS-VF" : true, "WS-VS" : true, "YE-AB" : true, "YE-AD" : true, "YE-AM" : true, + "YE-BA" : true, "YE-DA" : true, "YE-DH" : true, "YE-HD" : true, "YE-HJ" : true, + "YE-IB" : true, "YE-JA" : true, "YE-LA" : true, "YE-MA" : true, "YE-MR" : true, + "YE-MU" : true, "YE-MW" : true, "YE-RA" : true, "YE-SD" : true, "YE-SH" : true, + "YE-SN" : true, "YE-TA" : true, "ZA-EC" : true, "ZA-FS" : true, "ZA-GP" : true, + "ZA-LP" : true, "ZA-MP" : true, "ZA-NC" : true, "ZA-NW" : true, "ZA-WC" : true, + "ZA-ZN" : true, "ZM-01" : true, "ZM-02" : true, "ZM-03" : true, "ZM-04" : true, + "ZM-05" : true, "ZM-06" : true, "ZM-07" : true, "ZM-08" : true, "ZM-09" : true, + "ZW-BU" : true, "ZW-HA" : true, "ZW-MA" : true, "ZW-MC" : true, "ZW-ME" : true, + "ZW-MI" : true, "ZW-MN" : true, "ZW-MS" : true, "ZW-MV" : true, "ZW-MW" : true, +} diff --git a/vendor/github.com/go-playground/validator/v10/currency_codes.go b/vendor/github.com/go-playground/validator/v10/currency_codes.go new file mode 100644 index 00000000000..a5cd9b18a0a --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/currency_codes.go @@ -0,0 +1,79 @@ +package validator + +var iso4217 = map[string]bool{ + "AFN": true, "EUR": true, "ALL": true, "DZD": true, "USD": true, + "AOA": true, "XCD": true, "ARS": true, "AMD": true, "AWG": true, + "AUD": true, "AZN": true, "BSD": true, "BHD": true, "BDT": true, + "BBD": true, "BYN": true, "BZD": true, "XOF": true, "BMD": true, + "INR": true, "BTN": true, "BOB": true, "BOV": true, "BAM": true, + "BWP": true, "NOK": true, "BRL": true, "BND": true, "BGN": true, + "BIF": true, "CVE": true, "KHR": true, "XAF": true, "CAD": true, + "KYD": true, "CLP": true, "CLF": true, "CNY": true, "COP": true, + "COU": true, "KMF": true, "CDF": true, "NZD": true, "CRC": true, + "HRK": true, "CUP": true, "CUC": true, "ANG": true, "CZK": true, + "DKK": true, "DJF": true, "DOP": true, "EGP": true, "SVC": true, + "ERN": true, "SZL": true, "ETB": true, "FKP": true, "FJD": true, + "XPF": true, "GMD": true, "GEL": true, "GHS": true, "GIP": true, + "GTQ": true, "GBP": true, "GNF": true, "GYD": true, "HTG": true, + "HNL": true, "HKD": true, "HUF": true, "ISK": true, "IDR": true, + "XDR": true, "IRR": true, "IQD": true, "ILS": true, "JMD": true, + "JPY": true, "JOD": true, "KZT": true, "KES": true, "KPW": true, + "KRW": true, "KWD": true, "KGS": true, "LAK": true, "LBP": true, + "LSL": true, "ZAR": true, "LRD": true, "LYD": true, "CHF": true, + "MOP": true, "MKD": true, "MGA": true, "MWK": true, "MYR": true, + "MVR": true, "MRU": true, "MUR": true, "XUA": true, "MXN": true, + "MXV": true, "MDL": true, "MNT": true, "MAD": true, "MZN": true, + "MMK": true, "NAD": true, "NPR": true, "NIO": true, "NGN": true, + "OMR": true, "PKR": true, "PAB": true, "PGK": true, "PYG": true, + "PEN": true, "PHP": true, "PLN": true, "QAR": true, "RON": true, + "RUB": true, "RWF": true, "SHP": true, "WST": true, "STN": true, + "SAR": true, "RSD": true, "SCR": true, "SLL": true, "SGD": true, + "XSU": true, "SBD": true, "SOS": true, "SSP": true, "LKR": true, + "SDG": true, "SRD": true, "SEK": true, "CHE": true, "CHW": true, + "SYP": true, "TWD": true, "TJS": true, "TZS": true, "THB": true, + "TOP": true, "TTD": true, "TND": true, "TRY": true, "TMT": true, + "UGX": true, "UAH": true, "AED": true, "USN": true, "UYU": true, + "UYI": true, "UYW": true, "UZS": true, "VUV": true, "VES": true, + "VND": true, "YER": true, "ZMW": true, "ZWL": true, "XBA": true, + "XBB": true, "XBC": true, "XBD": true, "XTS": true, "XXX": true, + "XAU": true, "XPD": true, "XPT": true, "XAG": true, +} + +var iso4217_numeric = map[int]bool{ + 8: true, 12: true, 32: true, 36: true, 44: true, + 48: true, 50: true, 51: true, 52: true, 60: true, + 64: true, 68: true, 72: true, 84: true, 90: true, + 96: true, 104: true, 108: true, 116: true, 124: true, + 132: true, 136: true, 144: true, 152: true, 156: true, + 170: true, 174: true, 188: true, 191: true, 192: true, + 203: true, 208: true, 214: true, 222: true, 230: true, + 232: true, 238: true, 242: true, 262: true, 270: true, + 292: true, 320: true, 324: true, 328: true, 332: true, + 340: true, 344: true, 348: true, 352: true, 356: true, + 360: true, 364: true, 368: true, 376: true, 388: true, + 392: true, 398: true, 400: true, 404: true, 408: true, + 410: true, 414: true, 417: true, 418: true, 422: true, + 426: true, 430: true, 434: true, 446: true, 454: true, + 458: true, 462: true, 480: true, 484: true, 496: true, + 498: true, 504: true, 512: true, 516: true, 524: true, + 532: true, 533: true, 548: true, 554: true, 558: true, + 566: true, 578: true, 586: true, 590: true, 598: true, + 600: true, 604: true, 608: true, 634: true, 643: true, + 646: true, 654: true, 682: true, 690: true, 694: true, + 702: true, 704: true, 706: true, 710: true, 728: true, + 748: true, 752: true, 756: true, 760: true, 764: true, + 776: true, 780: true, 784: true, 788: true, 800: true, + 807: true, 818: true, 826: true, 834: true, 840: true, + 858: true, 860: true, 882: true, 886: true, 901: true, + 927: true, 928: true, 929: true, 930: true, 931: true, + 932: true, 933: true, 934: true, 936: true, 938: true, + 940: true, 941: true, 943: true, 944: true, 946: true, + 947: true, 948: true, 949: true, 950: true, 951: true, + 952: true, 953: true, 955: true, 956: true, 957: true, + 958: true, 959: true, 960: true, 961: true, 962: true, + 963: true, 964: true, 965: true, 967: true, 968: true, + 969: true, 970: true, 971: true, 972: true, 973: true, + 975: true, 976: true, 977: true, 978: true, 979: true, + 980: true, 981: true, 984: true, 985: true, 986: true, + 990: true, 994: true, 997: true, 999: true, +} diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go new file mode 100644 index 00000000000..8c2584792e6 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -0,0 +1,1342 @@ +/* +Package validator implements value validations for structs and individual fields +based on tags. + +It can also handle Cross-Field and Cross-Struct validation for nested structs +and has the ability to dive into arrays and maps of any type. + +see more examples https://github.com/go-playground/validator/tree/master/_examples + +Singleton + +Validator is designed to be thread-safe and used as a singleton instance. +It caches information about your struct and validations, +in essence only parsing your validation tags once per struct type. +Using multiple instances neglects the benefit of caching. +The not thread-safe functions are explicitly marked as such in the documentation. + +Validation Functions Return Type error + +Doing things this way is actually the way the standard library does, see the +file.Open method here: + + https://golang.org/pkg/os/#Open. + +The authors return type "error" to avoid the issue discussed in the following, +where err is always != nil: + + http://stackoverflow.com/a/29138676/3158232 + https://github.com/go-playground/validator/issues/134 + +Validator only InvalidValidationError for bad validation input, nil or +ValidationErrors as type error; so, in your code all you need to do is check +if the error returned is not nil, and if it's not check if error is +InvalidValidationError ( if necessary, most of the time it isn't ) type cast +it to type ValidationErrors like so err.(validator.ValidationErrors). + +Custom Validation Functions + +Custom Validation functions can be added. Example: + + // Structure + func customFunc(fl validator.FieldLevel) bool { + + if fl.Field().String() == "invalid" { + return false + } + + return true + } + + validate.RegisterValidation("custom tag name", customFunc) + // NOTES: using the same tag name as an existing function + // will overwrite the existing one + +Cross-Field Validation + +Cross-Field Validation can be done via the following tags: + - eqfield + - nefield + - gtfield + - gtefield + - ltfield + - ltefield + - eqcsfield + - necsfield + - gtcsfield + - gtecsfield + - ltcsfield + - ltecsfield + +If, however, some custom cross-field validation is required, it can be done +using a custom validation. + +Why not just have cross-fields validation tags (i.e. only eqcsfield and not +eqfield)? + +The reason is efficiency. If you want to check a field within the same struct +"eqfield" only has to find the field on the same struct (1 level). But, if we +used "eqcsfield" it could be multiple levels down. Example: + + type Inner struct { + StartDate time.Time + } + + type Outer struct { + InnerStructField *Inner + CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"` + } + + now := time.Now() + + inner := &Inner{ + StartDate: now, + } + + outer := &Outer{ + InnerStructField: inner, + CreatedAt: now, + } + + errs := validate.Struct(outer) + + // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed + // into the function + // when calling validate.VarWithValue(val, field, tag) val will be + // whatever you pass, struct, field... + // when calling validate.Field(field, tag) val will be nil + +Multiple Validators + +Multiple validators on a field will process in the order defined. Example: + + type Test struct { + Field `validate:"max=10,min=1"` + } + + // max will be checked then min + +Bad Validator definitions are not handled by the library. Example: + + type Test struct { + Field `validate:"min=10,max=0"` + } + + // this definition of min max will never succeed + +Using Validator Tags + +Baked In Cross-Field validation only compares fields on the same struct. +If Cross-Field + Cross-Struct validation is needed you should implement your +own custom validator. + +Comma (",") is the default separator of validation tags. If you wish to +have a comma included within the parameter (i.e. excludesall=,) you will need to +use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma, +so the above will become excludesall=0x2C. + + type Test struct { + Field `validate:"excludesall=,"` // BAD! Do not include a comma. + Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation. + } + +Pipe ("|") is the 'or' validation tags deparator. If you wish to +have a pipe included within the parameter i.e. excludesall=| you will need to +use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe, +so the above will become excludesall=0x7C + + type Test struct { + Field `validate:"excludesall=|"` // BAD! Do not include a a pipe! + Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation. + } + + +Baked In Validators and Tags + +Here is a list of the current built in validators: + + +Skip Field + +Tells the validation to skip this struct field; this is particularly +handy in ignoring embedded structs from being validated. (Usage: -) + Usage: - + + +Or Operator + +This is the 'or' operator allowing multiple validators to be used and +accepted. (Usage: rgb|rgba) <-- this would allow either rgb or rgba +colors to be accepted. This can also be combined with 'and' for example +( Usage: omitempty,rgb|rgba) + + Usage: | + +StructOnly + +When a field that is a nested struct is encountered, and contains this flag +any validation on the nested struct will be run, but none of the nested +struct fields will be validated. This is useful if inside of your program +you know the struct will be valid, but need to verify it has been assigned. +NOTE: only "required" and "omitempty" can be used on a struct itself. + + Usage: structonly + +NoStructLevel + +Same as structonly tag except that any struct level validations will not run. + + Usage: nostructlevel + +Omit Empty + +Allows conditional validation, for example if a field is not set with +a value (Determined by the "required" validator) then other validation +such as min or max won't run, but if a value is set validation will run. + + Usage: omitempty + +Dive + +This tells the validator to dive into a slice, array or map and validate that +level of the slice, array or map with the validation tags that follow. +Multidimensional nesting is also supported, each level you wish to dive will +require another dive tag. dive has some sub-tags, 'keys' & 'endkeys', please see +the Keys & EndKeys section just below. + + Usage: dive + +Example #1 + + [][]string with validation tag "gt=0,dive,len=1,dive,required" + // gt=0 will be applied to [] + // len=1 will be applied to []string + // required will be applied to string + +Example #2 + + [][]string with validation tag "gt=0,dive,dive,required" + // gt=0 will be applied to [] + // []string will be spared validation + // required will be applied to string + +Keys & EndKeys + +These are to be used together directly after the dive tag and tells the validator +that anything between 'keys' and 'endkeys' applies to the keys of a map and not the +values; think of it like the 'dive' tag, but for map keys instead of values. +Multidimensional nesting is also supported, each level you wish to validate will +require another 'keys' and 'endkeys' tag. These tags are only valid for maps. + + Usage: dive,keys,othertagvalidation(s),endkeys,valuevalidationtags + +Example #1 + + map[string]string with validation tag "gt=0,dive,keys,eg=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eg=1|eq=2 will be applied to the map keys + // required will be applied to map values + +Example #2 + + map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eg=1|eq=2 will be applied to each array element in the the map keys + // required will be applied to map values + +Required + +This validates that the value is not the data types default zero value. +For numbers ensures value is not zero. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required + +Required If + +The field under validation must be present and not empty only if all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. + + Usage: required_if + +Examples: + + // require the field if the Field1 is equal to the parameter given: + Usage: required_if=Field1 foobar + + // require the field if the Field1 and Field2 is equal to the value respectively: + Usage: required_if=Field1 foo Field2 bar + +Required Unless + +The field under validation must be present and not empty unless all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. + + Usage: required_unless + +Examples: + + // require the field unless the Field1 is equal to the parameter given: + Usage: required_unless=Field1 foobar + + // require the field unless the Field1 and Field2 is equal to the value respectively: + Usage: required_unless=Field1 foo Field2 bar + +Required With + +The field under validation must be present and not empty only if any +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_with + +Examples: + + // require the field if the Field1 is present: + Usage: required_with=Field1 + + // require the field if the Field1 or Field2 is present: + Usage: required_with=Field1 Field2 + +Required With All + +The field under validation must be present and not empty only if all +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_with_all + +Example: + + // require the field if the Field1 and Field2 is present: + Usage: required_with_all=Field1 Field2 + +Required Without + +The field under validation must be present and not empty only when any +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_without + +Examples: + + // require the field if the Field1 is not present: + Usage: required_without=Field1 + + // require the field if the Field1 or Field2 is not present: + Usage: required_without=Field1 Field2 + +Required Without All + +The field under validation must be present and not empty only when all +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_without_all + +Example: + + // require the field if the Field1 and Field2 is not present: + Usage: required_without_all=Field1 Field2 + +Is Default + +This validates that the value is the default value and is almost the +opposite of required. + + Usage: isdefault + +Length + +For numbers, length will ensure that the value is +equal to the parameter given. For strings, it checks that +the string length is exactly that number of characters. For slices, +arrays, and maps, validates the number of items. + +Example #1 + + Usage: len=10 + +Example #2 (time.Duration) + +For time.Duration, len will ensure that the value is equal to the duration given +in the parameter. + + Usage: len=1h30m + +Maximum + +For numbers, max will ensure that the value is +less than or equal to the parameter given. For strings, it checks +that the string length is at most that number of characters. For +slices, arrays, and maps, validates the number of items. + +Example #1 + + Usage: max=10 + +Example #2 (time.Duration) + +For time.Duration, max will ensure that the value is less than or equal to the +duration given in the parameter. + + Usage: max=1h30m + +Minimum + +For numbers, min will ensure that the value is +greater or equal to the parameter given. For strings, it checks that +the string length is at least that number of characters. For slices, +arrays, and maps, validates the number of items. + +Example #1 + + Usage: min=10 + +Example #2 (time.Duration) + +For time.Duration, min will ensure that the value is greater than or equal to +the duration given in the parameter. + + Usage: min=1h30m + +Equals + +For strings & numbers, eq will ensure that the value is +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + +Example #1 + + Usage: eq=10 + +Example #2 (time.Duration) + +For time.Duration, eq will ensure that the value is equal to the duration given +in the parameter. + + Usage: eq=1h30m + +Not Equal + +For strings & numbers, ne will ensure that the value is not +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + +Example #1 + + Usage: ne=10 + +Example #2 (time.Duration) + +For time.Duration, ne will ensure that the value is not equal to the duration +given in the parameter. + + Usage: ne=1h30m + +One Of + +For strings, ints, and uints, oneof will ensure that the value +is one of the values in the parameter. The parameter should be +a list of values separated by whitespace. Values may be +strings or numbers. To match strings with spaces in them, include +the target string between single quotes. + + Usage: oneof=red green + oneof='red green' 'blue yellow' + oneof=5 7 9 + +Greater Than + +For numbers, this will ensure that the value is greater than the +parameter given. For strings, it checks that the string length +is greater than that number of characters. For slices, arrays +and maps it validates the number of items. + +Example #1 + + Usage: gt=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than time.Now.UTC(). + + Usage: gt + +Example #3 (time.Duration) + +For time.Duration, gt will ensure that the value is greater than the duration +given in the parameter. + + Usage: gt=1h30m + +Greater Than or Equal + +Same as 'min' above. Kept both to make terminology with 'len' easier. + +Example #1 + + Usage: gte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than or equal to time.Now.UTC(). + + Usage: gte + +Example #3 (time.Duration) + +For time.Duration, gte will ensure that the value is greater than or equal to +the duration given in the parameter. + + Usage: gte=1h30m + +Less Than + +For numbers, this will ensure that the value is less than the parameter given. +For strings, it checks that the string length is less than that number of +characters. For slices, arrays, and maps it validates the number of items. + +Example #1 + + Usage: lt=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is less than time.Now.UTC(). + + Usage: lt + +Example #3 (time.Duration) + +For time.Duration, lt will ensure that the value is less than the duration given +in the parameter. + + Usage: lt=1h30m + +Less Than or Equal + +Same as 'max' above. Kept both to make terminology with 'len' easier. + +Example #1 + + Usage: lte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is less than or equal to time.Now.UTC(). + + Usage: lte + +Example #3 (time.Duration) + +For time.Duration, lte will ensure that the value is less than or equal to the +duration given in the parameter. + + Usage: lte=1h30m + +Field Equals Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Example #1: + + // Validation on Password field using: + Usage: eqfield=ConfirmPassword + +Example #2: + + // Validating by field: + validate.VarWithValue(password, confirmpassword, "eqfield") + +Field Equals Another Field (relative) + +This does the same as eqfield except that it validates the field provided relative +to the top level struct. + + Usage: eqcsfield=InnerStructField.Field) + +Field Does Not Equal Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Examples: + + // Confirm two colors are not the same: + // + // Validation on Color field: + Usage: nefield=Color2 + + // Validating by field: + validate.VarWithValue(color1, color2, "nefield") + +Field Does Not Equal Another Field (relative) + +This does the same as nefield except that it validates the field provided +relative to the top level struct. + + Usage: necsfield=InnerStructField.Field + +Field Greater Than Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtfield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "gtfield") + +Field Greater Than Another Relative Field + +This does the same as gtfield except that it validates the field provided +relative to the top level struct. + + Usage: gtcsfield=InnerStructField.Field + +Field Greater Than or Equal To Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtefield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "gtefield") + +Field Greater Than or Equal To Another Relative Field + +This does the same as gtefield except that it validates the field provided relative +to the top level struct. + + Usage: gtecsfield=InnerStructField.Field + +Less Than Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltfield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "ltfield") + +Less Than Another Relative Field + +This does the same as ltfield except that it validates the field provided relative +to the top level struct. + + Usage: ltcsfield=InnerStructField.Field + +Less Than or Equal To Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltefield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "ltefield") + +Less Than or Equal To Another Relative Field + +This does the same as ltefield except that it validates the field provided relative +to the top level struct. + + Usage: ltecsfield=InnerStructField.Field + +Field Contains Another Field + +This does the same as contains except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: containsfield=InnerStructField.Field + +Field Excludes Another Field + +This does the same as excludes except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: excludesfield=InnerStructField.Field + +Unique + +For arrays & slices, unique will ensure that there are no duplicates. +For maps, unique will ensure that there are no duplicate values. +For slices of struct, unique will ensure that there are no duplicate values +in a field of the struct specified via a parameter. + + // For arrays, slices, and maps: + Usage: unique + + // For slices of struct: + Usage: unique=field + +Alpha Only + +This validates that a string value contains ASCII alpha characters only + + Usage: alpha + +Alphanumeric + +This validates that a string value contains ASCII alphanumeric characters only + + Usage: alphanum + +Alpha Unicode + +This validates that a string value contains unicode alpha characters only + + Usage: alphaunicode + +Alphanumeric Unicode + +This validates that a string value contains unicode alphanumeric characters only + + Usage: alphanumunicode + +Boolean + +This validates that a string value can successfully be parsed into a boolean with strconv.ParseBool + + Usage: boolean + +Number + +This validates that a string value contains number values only. +For integers or float it returns true. + + Usage: number + +Numeric + +This validates that a string value contains a basic numeric value. +basic excludes exponents etc... +for integers or float it returns true. + + Usage: numeric + +Hexadecimal String + +This validates that a string value contains a valid hexadecimal. + + Usage: hexadecimal + +Hexcolor String + +This validates that a string value contains a valid hex color including +hashtag (#) + + Usage: hexcolor + +Lowercase String + +This validates that a string value contains only lowercase characters. An empty string is not a valid lowercase string. + + Usage: lowercase + +Uppercase String + +This validates that a string value contains only uppercase characters. An empty string is not a valid uppercase string. + + Usage: uppercase + +RGB String + +This validates that a string value contains a valid rgb color + + Usage: rgb + +RGBA String + +This validates that a string value contains a valid rgba color + + Usage: rgba + +HSL String + +This validates that a string value contains a valid hsl color + + Usage: hsl + +HSLA String + +This validates that a string value contains a valid hsla color + + Usage: hsla + +E.164 Phone Number String + +This validates that a string value contains a valid E.164 Phone number +https://en.wikipedia.org/wiki/E.164 (ex. +1123456789) + + Usage: e164 + +E-mail String + +This validates that a string value contains a valid email +This may not conform to all possibilities of any rfc standard, but neither +does any email provider accept all possibilities. + + Usage: email + +JSON String + +This validates that a string value is valid JSON + + Usage: json + +JWT String + +This validates that a string value is a valid JWT + + Usage: jwt + +File path + +This validates that a string value contains a valid file path and that +the file exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: file + +URL String + +This validates that a string value contains a valid url +This will accept any url the golang request uri accepts but must contain +a schema for example http:// or rtmp:// + + Usage: url + +URI String + +This validates that a string value contains a valid uri +This will accept any uri the golang request uri accepts + + Usage: uri + +Urn RFC 2141 String + +This validataes that a string value contains a valid URN +according to the RFC 2141 spec. + + Usage: urn_rfc2141 + +Base64 String + +This validates that a string value contains a valid base64 value. +Although an empty string is valid base64 this will report an empty string +as an error, if you wish to accept an empty string as valid you can use +this with the omitempty tag. + + Usage: base64 + +Base64URL String + +This validates that a string value contains a valid base64 URL safe value +according the the RFC4648 spec. +Although an empty string is a valid base64 URL safe value, this will report +an empty string as an error, if you wish to accept an empty string as valid +you can use this with the omitempty tag. + + Usage: base64url + +Bitcoin Address + +This validates that a string value contains a valid bitcoin address. +The format of the string is checked to ensure it matches one of the three formats +P2PKH, P2SH and performs checksum validation. + + Usage: btc_addr + +Bitcoin Bech32 Address (segwit) + +This validates that a string value contains a valid bitcoin Bech32 address as defined +by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki) +Special thanks to Pieter Wuille for providng reference implementations. + + Usage: btc_addr_bech32 + +Ethereum Address + +This validates that a string value contains a valid ethereum address. +The format of the string is checked to ensure it matches the standard Ethereum address format. + + Usage: eth_addr + +Contains + +This validates that a string value contains the substring value. + + Usage: contains=@ + +Contains Any + +This validates that a string value contains any Unicode code points +in the substring value. + + Usage: containsany=!@#? + +Contains Rune + +This validates that a string value contains the supplied rune value. + + Usage: containsrune=@ + +Excludes + +This validates that a string value does not contain the substring value. + + Usage: excludes=@ + +Excludes All + +This validates that a string value does not contain any Unicode code +points in the substring value. + + Usage: excludesall=!@#? + +Excludes Rune + +This validates that a string value does not contain the supplied rune value. + + Usage: excludesrune=@ + +Starts With + +This validates that a string value starts with the supplied string value + + Usage: startswith=hello + +Ends With + +This validates that a string value ends with the supplied string value + + Usage: endswith=goodbye + +Does Not Start With + +This validates that a string value does not start with the supplied string value + + Usage: startsnotwith=hello + +Does Not End With + +This validates that a string value does not end with the supplied string value + + Usage: endsnotwith=goodbye + +International Standard Book Number + +This validates that a string value contains a valid isbn10 or isbn13 value. + + Usage: isbn + +International Standard Book Number 10 + +This validates that a string value contains a valid isbn10 value. + + Usage: isbn10 + +International Standard Book Number 13 + +This validates that a string value contains a valid isbn13 value. + + Usage: isbn13 + +Universally Unique Identifier UUID + +This validates that a string value contains a valid UUID. Uppercase UUID values will not pass - use `uuid_rfc4122` instead. + + Usage: uuid + +Universally Unique Identifier UUID v3 + +This validates that a string value contains a valid version 3 UUID. Uppercase UUID values will not pass - use `uuid3_rfc4122` instead. + + Usage: uuid3 + +Universally Unique Identifier UUID v4 + +This validates that a string value contains a valid version 4 UUID. Uppercase UUID values will not pass - use `uuid4_rfc4122` instead. + + Usage: uuid4 + +Universally Unique Identifier UUID v5 + +This validates that a string value contains a valid version 5 UUID. Uppercase UUID values will not pass - use `uuid5_rfc4122` instead. + + Usage: uuid5 + +ASCII + +This validates that a string value contains only ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: ascii + +Printable ASCII + +This validates that a string value contains only printable ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: printascii + +Multi-Byte Characters + +This validates that a string value contains one or more multibyte characters. +NOTE: if the string is blank, this validates as true. + + Usage: multibyte + +Data URL + +This validates that a string value contains a valid DataURI. +NOTE: this will also validate that the data portion is valid base64 + + Usage: datauri + +Latitude + +This validates that a string value contains a valid latitude. + + Usage: latitude + +Longitude + +This validates that a string value contains a valid longitude. + + Usage: longitude + +Social Security Number SSN + +This validates that a string value contains a valid U.S. Social Security Number. + + Usage: ssn + +Internet Protocol Address IP + +This validates that a string value contains a valid IP Address. + + Usage: ip + +Internet Protocol Address IPv4 + +This validates that a string value contains a valid v4 IP Address. + + Usage: ipv4 + +Internet Protocol Address IPv6 + +This validates that a string value contains a valid v6 IP Address. + + Usage: ipv6 + +Classless Inter-Domain Routing CIDR + +This validates that a string value contains a valid CIDR Address. + + Usage: cidr + +Classless Inter-Domain Routing CIDRv4 + +This validates that a string value contains a valid v4 CIDR Address. + + Usage: cidrv4 + +Classless Inter-Domain Routing CIDRv6 + +This validates that a string value contains a valid v6 CIDR Address. + + Usage: cidrv6 + +Transmission Control Protocol Address TCP + +This validates that a string value contains a valid resolvable TCP Address. + + Usage: tcp_addr + +Transmission Control Protocol Address TCPv4 + +This validates that a string value contains a valid resolvable v4 TCP Address. + + Usage: tcp4_addr + +Transmission Control Protocol Address TCPv6 + +This validates that a string value contains a valid resolvable v6 TCP Address. + + Usage: tcp6_addr + +User Datagram Protocol Address UDP + +This validates that a string value contains a valid resolvable UDP Address. + + Usage: udp_addr + +User Datagram Protocol Address UDPv4 + +This validates that a string value contains a valid resolvable v4 UDP Address. + + Usage: udp4_addr + +User Datagram Protocol Address UDPv6 + +This validates that a string value contains a valid resolvable v6 UDP Address. + + Usage: udp6_addr + +Internet Protocol Address IP + +This validates that a string value contains a valid resolvable IP Address. + + Usage: ip_addr + +Internet Protocol Address IPv4 + +This validates that a string value contains a valid resolvable v4 IP Address. + + Usage: ip4_addr + +Internet Protocol Address IPv6 + +This validates that a string value contains a valid resolvable v6 IP Address. + + Usage: ip6_addr + +Unix domain socket end point Address + +This validates that a string value contains a valid Unix Address. + + Usage: unix_addr + +Media Access Control Address MAC + +This validates that a string value contains a valid MAC Address. + + Usage: mac + +Note: See Go's ParseMAC for accepted formats and types: + + http://golang.org/src/net/mac.go?s=866:918#L29 + +Hostname RFC 952 + +This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952 + + Usage: hostname + +Hostname RFC 1123 + +This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123 + + Usage: hostname_rfc1123 or if you want to continue to use 'hostname' in your tags, create an alias. + +Full Qualified Domain Name (FQDN) + +This validates that a string value contains a valid FQDN. + + Usage: fqdn + +HTML Tags + +This validates that a string value appears to be an HTML element tag +including those described at https://developer.mozilla.org/en-US/docs/Web/HTML/Element + + Usage: html + +HTML Encoded + +This validates that a string value is a proper character reference in decimal +or hexadecimal format + + Usage: html_encoded + +URL Encoded + +This validates that a string value is percent-encoded (URL encoded) according +to https://tools.ietf.org/html/rfc3986#section-2.1 + + Usage: url_encoded + +Directory + +This validates that a string value contains a valid directory and that +it exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: dir + +HostPort + +This validates that a string value contains a valid DNS hostname and port that +can be used to valiate fields typically passed to sockets and connections. + + Usage: hostname_port + +Datetime + +This validates that a string value is a valid datetime based on the supplied datetime format. +Supplied format must match the official Go time format layout as documented in https://golang.org/pkg/time/ + + Usage: datetime=2006-01-02 + +Iso3166-1 alpha-2 + +This validates that a string value is a valid country code based on iso3166-1 alpha-2 standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha2 + +Iso3166-1 alpha-3 + +This validates that a string value is a valid country code based on iso3166-1 alpha-3 standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha3 + +Iso3166-1 alpha-numeric + +This validates that a string value is a valid country code based on iso3166-1 alpha-numeric standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha3 + +BCP 47 Language Tag + +This validates that a string value is a valid BCP 47 language tag, as parsed by language.Parse. +More information on https://pkg.go.dev/golang.org/x/text/language + + Usage: bcp47_language_tag + +BIC (SWIFT code) + +This validates that a string value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362. +More information on https://www.iso.org/standard/60390.html + + Usage: bic + +TimeZone + +This validates that a string value is a valid time zone based on the time zone database present on the system. +Although empty value and Local value are allowed by time.LoadLocation golang function, they are not allowed by this validator. +More information on https://golang.org/pkg/time/#LoadLocation + + Usage: timezone + + +Alias Validators and Tags + +NOTE: When returning an error, the tag returned in "FieldError" will be +the alias tag unless the dive tag is part of the alias. Everything after the +dive tag is not reported as the alias tag. Also, the "ActualTag" in the before +case will be the actual tag within the alias that failed. + +Here is a list of the current built in alias tags: + + "iscolor" + alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor) + "country_code" + alias is "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric" (Usage: country_code) + +Validator notes: + + regex + a regex validator won't be added because commas and = signs can be part + of a regex which conflict with the validation definitions. Although + workarounds can be made, they take away from using pure regex's. + Furthermore it's quick and dirty but the regex's become harder to + maintain and are not reusable, so it's as much a programming philosophy + as anything. + + In place of this new validator functions should be created; a regex can + be used within the validator function and even be precompiled for better + efficiency within regexes.go. + + And the best reason, you can submit a pull request and we can keep on + adding to the validation library of this package! + +Non standard validators + +A collection of validation rules that are frequently needed but are more +complex than the ones found in the baked in validators. +A non standard validator must be registered manually like you would +with your own custom validation functions. + +Example of registration and use: + + type Test struct { + TestField string `validate:"yourtag"` + } + + t := &Test{ + TestField: "Test" + } + + validate := validator.New() + validate.RegisterValidation("yourtag", validators.NotBlank) + +Here is a list of the current non standard validators: + + NotBlank + This validates that the value is not blank or with length zero. + For strings ensures they do not contain only spaces. For channels, maps, slices and arrays + ensures they don't have zero length. For others, a non empty value is required. + + Usage: notblank + +Panics + +This package panics when bad input is provided, this is by design, bad code like +that should not make it to production. + + type Test struct { + TestField string `validate:"nonexistantfunction=1"` + } + + t := &Test{ + TestField: "Test" + } + + validate.Struct(t) // this will panic +*/ +package validator diff --git a/vendor/github.com/go-playground/validator/v10/errors.go b/vendor/github.com/go-playground/validator/v10/errors.go new file mode 100644 index 00000000000..9a1b1abe935 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/errors.go @@ -0,0 +1,275 @@ +package validator + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + ut "github.com/go-playground/universal-translator" +) + +const ( + fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag" +) + +// ValidationErrorsTranslations is the translation return type +type ValidationErrorsTranslations map[string]string + +// InvalidValidationError describes an invalid argument passed to +// `Struct`, `StructExcept`, StructPartial` or `Field` +type InvalidValidationError struct { + Type reflect.Type +} + +// Error returns InvalidValidationError message +func (e *InvalidValidationError) Error() string { + + if e.Type == nil { + return "validator: (nil)" + } + + return "validator: (nil " + e.Type.String() + ")" +} + +// ValidationErrors is an array of FieldError's +// for use in custom error messages post validation. +type ValidationErrors []FieldError + +// Error is intended for use in development + debugging and not intended to be a production error message. +// It allows ValidationErrors to subscribe to the Error interface. +// All information to create an error message specific to your application is contained within +// the FieldError found within the ValidationErrors array +func (ve ValidationErrors) Error() string { + + buff := bytes.NewBufferString("") + + var fe *fieldError + + for i := 0; i < len(ve); i++ { + + fe = ve[i].(*fieldError) + buff.WriteString(fe.Error()) + buff.WriteString("\n") + } + + return strings.TrimSpace(buff.String()) +} + +// Translate translates all of the ValidationErrors +func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations { + + trans := make(ValidationErrorsTranslations) + + var fe *fieldError + + for i := 0; i < len(ve); i++ { + fe = ve[i].(*fieldError) + + // // in case an Anonymous struct was used, ensure that the key + // // would be 'Username' instead of ".Username" + // if len(fe.ns) > 0 && fe.ns[:1] == "." { + // trans[fe.ns[1:]] = fe.Translate(ut) + // continue + // } + + trans[fe.ns] = fe.Translate(ut) + } + + return trans +} + +// FieldError contains all functions to get error details +type FieldError interface { + + // Tag returns the validation tag that failed. if the + // validation was an alias, this will return the + // alias name and not the underlying tag that failed. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "iscolor" + Tag() string + + // ActualTag returns the validation tag that failed, even if an + // alias the actual tag within the alias will be returned. + // If an 'or' validation fails the entire or will be returned. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "hexcolor|rgb|rgba|hsl|hsla" + ActualTag() string + + // Namespace returns the namespace for the field error, with the tag + // name taking precedence over the field's actual name. + // + // eg. JSON name "User.fname" + // + // See StructNamespace() for a version that returns actual names. + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract it's name + Namespace() string + + // StructNamespace returns the namespace for the field error, with the field's + // actual name. + // + // eq. "User.FirstName" see Namespace for comparison + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract its name + StructNamespace() string + + // Field returns the fields name with the tag name taking precedence over the + // field's actual name. + // + // eq. JSON name "fname" + // see StructField for comparison + Field() string + + // StructField returns the field's actual name from the struct, when able to determine. + // + // eq. "FirstName" + // see Field for comparison + StructField() string + + // Value returns the actual field's value in case needed for creating the error + // message + Value() interface{} + + // Param returns the param value, in string form for comparison; this will also + // help with generating an error message + Param() string + + // Kind returns the Field's reflect Kind + // + // eg. time.Time's kind is a struct + Kind() reflect.Kind + + // Type returns the Field's reflect Type + // + // eg. time.Time's type is time.Time + Type() reflect.Type + + // Translate returns the FieldError's translated error + // from the provided 'ut.Translator' and registered 'TranslationFunc' + // + // NOTE: if no registered translator can be found it returns the same as + // calling fe.Error() + Translate(ut ut.Translator) string + + // Error returns the FieldError's message + Error() string +} + +// compile time interface checks +var _ FieldError = new(fieldError) +var _ error = new(fieldError) + +// fieldError contains a single field's validation error along +// with other properties that may be needed for error message creation +// it complies with the FieldError interface +type fieldError struct { + v *Validate + tag string + actualTag string + ns string + structNs string + fieldLen uint8 + structfieldLen uint8 + value interface{} + param string + kind reflect.Kind + typ reflect.Type +} + +// Tag returns the validation tag that failed. +func (fe *fieldError) Tag() string { + return fe.tag +} + +// ActualTag returns the validation tag that failed, even if an +// alias the actual tag within the alias will be returned. +func (fe *fieldError) ActualTag() string { + return fe.actualTag +} + +// Namespace returns the namespace for the field error, with the tag +// name taking precedence over the field's actual name. +func (fe *fieldError) Namespace() string { + return fe.ns +} + +// StructNamespace returns the namespace for the field error, with the field's +// actual name. +func (fe *fieldError) StructNamespace() string { + return fe.structNs +} + +// Field returns the field's name with the tag name taking precedence over the +// field's actual name. +func (fe *fieldError) Field() string { + + return fe.ns[len(fe.ns)-int(fe.fieldLen):] + // // return fe.field + // fld := fe.ns[len(fe.ns)-int(fe.fieldLen):] + + // log.Println("FLD:", fld) + + // if len(fld) > 0 && fld[:1] == "." { + // return fld[1:] + // } + + // return fld +} + +// StructField returns the field's actual name from the struct, when able to determine. +func (fe *fieldError) StructField() string { + // return fe.structField + return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):] +} + +// Value returns the actual field's value in case needed for creating the error +// message +func (fe *fieldError) Value() interface{} { + return fe.value +} + +// Param returns the param value, in string form for comparison; this will +// also help with generating an error message +func (fe *fieldError) Param() string { + return fe.param +} + +// Kind returns the Field's reflect Kind +func (fe *fieldError) Kind() reflect.Kind { + return fe.kind +} + +// Type returns the Field's reflect Type +func (fe *fieldError) Type() reflect.Type { + return fe.typ +} + +// Error returns the fieldError's error message +func (fe *fieldError) Error() string { + return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag) +} + +// Translate returns the FieldError's translated error +// from the provided 'ut.Translator' and registered 'TranslationFunc' +// +// NOTE: if no registered translation can be found, it returns the original +// untranslated error message. +func (fe *fieldError) Translate(ut ut.Translator) string { + + m, ok := fe.v.transTagFunc[ut] + if !ok { + return fe.Error() + } + + fn, ok := m[fe.tag] + if !ok { + return fe.Error() + } + + return fn(ut, fe) +} diff --git a/vendor/github.com/go-playground/validator/v10/field_level.go b/vendor/github.com/go-playground/validator/v10/field_level.go new file mode 100644 index 00000000000..ef35826ee6f --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/field_level.go @@ -0,0 +1,120 @@ +package validator + +import "reflect" + +// FieldLevel contains all the information and helper functions +// to validate a field +type FieldLevel interface { + + // Top returns the top level struct, if any + Top() reflect.Value + + // Parent returns the current fields parent struct, if any or + // the comparison value if called 'VarWithValue' + Parent() reflect.Value + + // Field returns current field for validation + Field() reflect.Value + + // FieldName returns the field's name with the tag + // name taking precedence over the fields actual name. + FieldName() string + + // StructFieldName returns the struct field's name + StructFieldName() string + + // Param returns param for validation against current field + Param() string + + // GetTag returns the current validations tag name + GetTag() string + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and it's kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // GetStructFieldOK traverses the parent struct to retrieve a specific field denoted by the provided namespace + // in the param and returns the field, field kind and whether is was successful in retrieving + // the field at all. + // + // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field + // could not be retrieved because it didn't exist. + // + // Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable. + GetStructFieldOK() (reflect.Value, reflect.Kind, bool) + + // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for + // the field and namespace allowing more extensibility for validators. + // + // Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable. + GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) + + // GetStructFieldOK2 traverses the parent struct to retrieve a specific field denoted by the provided namespace + // in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving + // the field at all. + // + // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field + // could not be retrieved because it didn't exist. + GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) + + // GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for + // the field and namespace allowing more extensibility for validators. + GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) +} + +var _ FieldLevel = new(validate) + +// Field returns current field for validation +func (v *validate) Field() reflect.Value { + return v.flField +} + +// FieldName returns the field's name with the tag +// name taking precedence over the fields actual name. +func (v *validate) FieldName() string { + return v.cf.altName +} + +// GetTag returns the current validations tag name +func (v *validate) GetTag() string { + return v.ct.tag +} + +// StructFieldName returns the struct field's name +func (v *validate) StructFieldName() string { + return v.cf.name +} + +// Param returns param for validation against current field +func (v *validate) Param() string { + return v.ct.param +} + +// GetStructFieldOK returns Param returns param for validation against current field +// +// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable. +func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) { + current, kind, _, found := v.getStructFieldOKInternal(v.slflParent, v.ct.param) + return current, kind, found +} + +// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for +// the field and namespace allowing more extensibility for validators. +// +// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable. +func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) { + current, kind, _, found := v.GetStructFieldOKAdvanced2(val, namespace) + return current, kind, found +} + +// GetStructFieldOK2 returns Param returns param for validation against current field +func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) { + return v.getStructFieldOKInternal(v.slflParent, v.ct.param) +} + +// GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for +// the field and namespace allowing more extensibility for validators. +func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) { + return v.getStructFieldOKInternal(val, namespace) +} diff --git a/vendor/github.com/go-playground/validator/v10/go.mod b/vendor/github.com/go-playground/validator/v10/go.mod new file mode 100644 index 00000000000..ddecff62826 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/go.mod @@ -0,0 +1,19 @@ +module github.com/go-playground/validator/v10 + +go 1.13 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-playground/assert/v2 v2.0.1 + github.com/go-playground/locales v0.14.0 + github.com/go-playground/universal-translator v0.18.0 + github.com/kr/pretty v0.3.0 // indirect + github.com/leodido/go-urn v1.2.1 + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/stretchr/testify v1.7.0 // indirect + golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 + golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 // indirect + golang.org/x/text v0.3.6 + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/vendor/github.com/go-playground/validator/v10/go.sum b/vendor/github.com/go-playground/validator/v10/go.sum new file mode 100644 index 00000000000..57500933f32 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/go.sum @@ -0,0 +1,50 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-playground/validator/v10/logo.png b/vendor/github.com/go-playground/validator/v10/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..355000f5247d50e979cf5db5de38188ef4649a34 GIT binary patch literal 13443 zcmbVz^LHiB^LK39wrv|5-`KWoTN~T9%?&s9Mte8f*xWeZ`~Lg^&kxU>)6-|>Om|IH zcUM=vsybFxSr!?A009gP3|U@IN*z>Z{#W2&KzAW<*L_e0I`lYd#@osT8GH952<>}$=#%H%txCM^!YLYIPG*~?*PoX}XfpXD#$r$vg% zUJ@M8Sa6}E0bs?J()q&Aj2!Xx^!*X7wf45!j09TZ!t8tmiZrhYa~rM!hkOgG3jNOL z$t%hsiEZp{`uZS=pUq3db%5@e>LpqUR%RzF4Fp&XYtszH3NMj(x&yBfN!B@dDe(i*$ zFaI9z`VK(*+SzZ3Km$V|gFS(NfPUS&ND}#zKM&MsZR;zy@SJwDwy5moK{eH84yz0`{Dhd+jynpps_Wzr*Rl)ctU%7jk!=>D(g}(sK zP}YV(B1|d_4NeR~4qlx?36qk5ng9u-wt+@fOTlvqhk>WQ%HxtjxBspSS(-6OpP;_x z73LX72W9oA=yUj&B*sjt0z}2U44ACNztdo!tbwR&pl8vCLjf!@HDwcP;p{h$JYsrk zE3Pp7L^A>!xwNPSX+2zrQgJ8|CCr11n`u|=C}{? zlHLN%{DxBD;+;&!6Se$BciUB@EQ~Y8_ZT-Q&4p}|A3l`R=AVR9Kt+V~a3a3V{l=)gHBK2op+X}BW7o(X1K2eRTZ^; ziO?#OmuWkXeCj2*{H(1C#qnQ>tz|Kq>*#cF7g)+?3G3(pVB@N37)9YHmYxa}CVb-% z@SHf5CnrEMiI6-&fkkOb9ema$%-Ld}qN54xNf|CDt?#e@Aec&IEcEEpu3Ak5Y z>0@s)b7yHEr~UCsek0JVuF%66MBgBxj-d!wQu4Evlx;p|pZG{&=4VV)*pIE{{f=SO z;V$)QC5ae=-6(Nc68{(S;2ymNVxIiwAs9}A@vA2?55kfV(qK>S6caF|bywd&p8ydL zB}xJ~6Di7u^Xl{s1E&b!{FXH0#>1$=MTNA7+vd;Pm*#B`iYRecX>5H7^iqDqQ{GQH zKNNh0?p}h?nEjh_Ft*^M`+(a;L*rKgPp=E!!}stvVxG|YKY=Yh25?+RloCoyT3T~2 zr1!?YL58}YTlyj1sTl_H(oBl48zJPwJFr9|r(>T7Npe$Hyl7Pm(dZ}|x;n!X(4wtZ zeNCCz4LTygy(gl;pV;dp+-Lpq=weiOW2Z_Lt@RNd_s43tZ>$@23^%6`T}rfexq!%# z)e|oR;kRY~2fW@V(in6QZzE*6TubN0<>|v2xiX)v6->d$no+&np8 z=DZPj>yPVL2Y2U^MJuW`R8R{2@Rg&}`S+$yEgsGihuW$3 z2y*A5Rm-TCh*xaY#R1q)HfzQS_%fPHCL7200}u=S#u`m zvW%z6F_UcmBq~g~s|d}v6$Q?noL`Z(X;@Q$i>kw^bF}I3A8QQyAE_nz-`H~a9o2}- ztPUs0q(DTZ^Yx3oA6C5I?{nHCX0qfW&C2r}h~~slhe!$_Hh1WB`w?_|D{JsF#zpgf z;F^yDTZs-$?`myzyDj@=x}@L4b~_KtUWzV+uiL${48Qh^ZdoywlRNR<*WLFY>v0fq zeWQS`g6{8q<#x){FrCbZlcTAh?+fw^gB-2LpRnlF^}`$D;(KxTOLn;dXs3Yl(uW$g6hyw3{wZdTVg|kdSet`n+SACG=!&%#r zl+Ha_MzD$G>iQb%tW~Uus7-zOMPI__Qo92dK3VKkGgR#;-!`uw++~l5J?MT+BUCv3 zcItfZO)uKXlipj1XD?F|>3frjQWA;$JV>TcHHrcrR=Ql;-B}Bb4;f|uVo(S7xL(QP zE%c8{bnchCJ%aG)3x8gx0`Hqq`eapfWqK`~Ec6Mea`v0{J?4~x(S2D#-7sMBR1X;{ zO-QlMUsyD!#jI^8v6y2J8TinHz_zsU@;3|?TfT0F2b2A7aX&aEQGc;IZ>UV*cToht z27rX9TA$h1ZMxk`KX|$6o$)=$PxIM3k^FhGmiJMaA3fBM6(M#efLJ9ucfbo2TkroP zxE4Dv?B_Nkef;0LYVj3nk|C9-MLv{y^-tY`SD(5phR2KMn}9@?I@SQ^#m* zu>9T8l>)311+yf)qc`Zp%3Cp9FS4PN18t5zZGy-!{f^5eJQA&Fb>Llf4kF^OZ}Tu z=jyadHyzlQLaf@_eAf{CFb}_v=Gj*BLc$VrMAe%hAL@6JaXkt^p&>`#SXjBAX!3#; zZ(sPdwtkoS08=HP@lruhHm*fIlu{y~LTu@+@;u*LBUU~nbQ7S{eH09xc5^_Xtu!q@ z6^P#P!A-(qwW30Th;TBWNp{b1+lP1D!2Y2In`HJ8=DTs8;1)Y~TE2Tco&agHaJGJRtE&{R2y^@Gnpny|$qxXc2=Ps$@$~9mxET{1Q$%i!i#frlzo0UOe_Y zMxNvLk98G99Jhl+-rMB_{OyQsE?70nTDUTZf%>P_;7WAz0a+FG*4EALUD*p3UWt_( z3yZrIM%L#2dleC=K}bD*)-@4195ctqtgM0iQACxJ?F&0O<{t?%^dK1pMJo*-dHj;E z%Vt=-^pa?Z(eb%_rx~$m@yuyvX^t!IvZZ&&LJtY`#;x+PXT-Gb~(3>gv;tf~4N37#aCX z-tW%A@AM^Cf&WBJl*|wp9s0RGq_rCL)=Klfe3e8BUY|7FGZM)#ZdT04zyZ#{*|<&8 z+dsxt9B+krqDfJbykPO==|6C|yAi)*jkV&C{Du7Y#drV0`{jGeFSFOANIz#B-ncz= zB?v}IR2j5eCJ`2>yNMN9<}h!(e$i><|KSPd(Ff^lC-7pg&G~QJ8T0JD-37gq1-K+V z;1?GW_CVrGX3V0m%yvW#+uGLl^01=9zyGrgZ5fJ6GeeULS25^4)YCL=-Z!w`r$tH= zj-ikdG|nI;y1wvvk2)h7^hL0Xvnxw)Y1u}&9Vv%k1};Z0IqW?AQ8)B@QOUa?ayt31 zX^`u?pa(0F6YpbrT;e2^auw#%0BX_ub?}_ieYF;4rGRd*1_vX-+Xv9I&yR@ zZF(3;`kXg0vHdTn$5Ie;gpS4@djPPJ60-Z_1?!DwQz9NO2jKDbkZ^oJbQVc?6v#&0 zAW|kWVx3>tw#eTFT> z$S^|&ZWo)7Lyes7r)@VL=2A|$JW1nr)ed9~F&(_uHC1f;YO_5oj&Afj!0(9M)7c*f z$rra8Ji?1Bc%e|v^CcS{(B7RRCc7Wrrs=I7)f#8IE{rDM)o~`?Y8!;pSaL!lLHCZ% z2RHV1+l?QSk}_AkH)`_s#(xKJ?jwKC`csy*aOGtV&`hmHIG<5hXtzm+=iQkb{pyZ< z%;RxAP~z<%lTo;vNWd4mn;*jW+1tVM*tJ0j)}5|LR4#M7r{PkXJaW;yHBr@9pIuiG z8V1M4Ci1&Z6T1I>(C@#6rJT=}4_MR%kp=0LFD)iInpDCFQ;rm ziA+yF-c%|NyQ8);!vM6)#xu8qylq9(nieBl!@e}S5}R@)8LEU)Q|o0z)Z3vP);l2n zvCG7gtlR2XtjF1}fhC?!r{BZ4#sNRJi-Kgt?Rd(rePR;wmE}rqM-z^#fA_=ptrRR~ zqS-A)prf=s19gdfPBn?#&j;!a+e1!wX7|RMt|@0KQ_^z7My)2imN}+? z&Ro$-#EC#FN(11}WJ|$X)eQ0hf7Xye3AhvowX$0|1+x+uY~g?ccTKswq+io;vFNd6 zr7#C9z4;>@b-tg$UzV@9U5hK{mDW{6%YjDa>FJu9Z8hZyN}pshPN=W>uI!^Q$kMqm z+}IiQA9sdYvoB1IiBfX#m0axM;6c8}N>K$tD8kW56>r1h5t8J!3X0YAj1|Aw&~l@A zxf2^V`F;A0W?i!7*yQ+#;?4!!1ZQrBEI$9+-N z6P_sTrV&}s7MX}77Nq}~KmQy&5T&0ZWX--y(<$MEOLGIm!7k)jQOWggN0!Rg^`Bj3z7;;PquhhFnoqJeAbUfHR~d2;C{_De_Ogp81i65*qU(X5fweyv+B#w>RW0 zm&_w7Zm z`YWfGxm^7MK^A>0nDITy;gz^Lzudv@BC>+^JOVExD%|?aa0W#9qe``&CHjF6vqV zB8&tSqdhz{r4(|w3!g-DZKg>^k=!a74kk`D{P(2>&R~8kXP)^Ns3jTlnM7|b=I@?W z*3YW^eW^H83@t)lUE4LAm#(ICbAZTgW?ohHU;Ok91QJvMGp6fHL|&TQb|ICaXi{OO zrD__`B5>e)a6orX^!P5CsJZqQhI9-E6v4*!cC1vUi2?G|44quG+rfLS+7ZX;meFoT zMa+fb0pH+x{|o<7L^;cM5J4}K*7m~l#N2_qa(YL%G9rt7(fo;z(CaJOODkCKPA9`Bop?dIYFl3wBU&l zdqN~tz4k#i1&+HT_N>0Qm%uxG;}xqfaciIXXK|67VNTu0yzMz6pt6)m~ z7y^EZ+(wMlK9yMiwkhp&>cmXQoRzGf`o{MmkrGaxJY*%s0Dza_WczOvC8IXtY4zGE zoAfaSy~MQoF^;l5RWb}DJq*S_&wp|_lkCAaR~iQkooeXo>yX+1hRw(?%#&k0 zm|IG1?>%mpBmLr(*DC>|Vr>bN;nKsdZLlS4*_h%G1n`;;6|sE0rg^T9KG)Swoz!z& zJra776~H1@daS@C;jzI*0~;x5(E1Fpo!nLAV=SmM;Q>*#bxdaC<;wO{!IV4ONwN}f z8NK=|T>UjQ5%%_C3KAVb1}wC~Feno-GH|l>&?HI*RX~t*0XtJ~S0R6Kst$kD*7mw& z{mR31-KopNO5bKJqku0*PjnBKFE_1y_|tmDtiN7SF!NZpwNb5#sV6w^bu9#1B5K7# z0N}))422cqc#^(uW?wJU*^KLe?VU&(*c6j;U6^LZcQoK4POU1{yKSH^?k2$VGLEWB zog!7!3_Kl%apr)=%d3Rpw_4BDLZf!1tIdN&D;Yg?X2&jp0vSBqbz)XX`Wu2%`IWJS1s3lhZ7H--?PJxQLg$XONw$8qE z@4G(S5}8yFwM`{Rdz9E__ZH{{Gusj%$t#w4+ac&G3wkM0n&qZPP}J9r*av-Zq- z%NdhH1sgs~Oq9{PLkkxDiK6MQo36OTZrr&F7k6+F*a}5hV<;1u+B`QQSF#ti5`pI3 z@gvRFovLjNAri8~54co-plD$yQSX*b95r9t@B%~eI2r9NqXw{mGRKtdG5*|wk~yO zW)?msL*Rlpy{X4OLKx;RTX5`t1RY4!(bJ`a7rJ?=xM7fwcCL;k7j%-*cj9NLfbojM= zsFk;>hWcz(m*MFBO66YXrs>D4!BqdqWy_oZ>c&}P@|L*1a zVk(-?<3wy?;t9XLM*dyTj^XbicaVIM%BJouomO8jaqzV51LbTf>Ywq=#cXFtO*-oC z$O(ezf}G*);p^d{5Cc9apUxWE7RHp-F$ne9h?~C!5ok6%glp3JFOLJd2A-Fm@I}Id-s z30mMGUBh}2LTd&+z4*b8fB8hNy+ke`kmJbFXYm9=Ud96znCvs;Xa*GB`{*jEPp($~+DX+RP*)$prD03Z~ zot>}r&YE}7>5Wkie5E(*NC^ihU`EdF6Ezw%Y_=C72{{2}2gipm z*Kp)Aa`c2J&xYYZ876z^z{Zt5pR2|?72(fT&g8MYt4OgJ8>+ZDr_oB=>9xhEw%27Y zdZWI$a9GrD@5Los+iFbyl507c-TMRH3x)MMT{uczOKy3!ra7;z>2})#CRqJW`Jr@s z)uA36KCgr8c)q);G>N2B3p4kyV4NWuoxzls;Xqq+eg~fI$A3otuea;KAQGd#L@`_H zA12vy0BVLhln8XMstlX5M43pjjcZzAO3GUc$zV-2^wq;7JE(EwPLoa!*2(XgxwSlx z2_J0xvN3`hnHWW^kuO7z`%AW8B~t4yhgSPHzS%y%$=}V7s+fZJ0#{k0^O+*L0|Zg4 zfX#xMrgl!s6Xo=iNk_&jq83fy!YAvDSuT8GO6%m58Pi*2YUR|;TQb;TdN=rqTpKIZ z-p&;$N{lIA6N##--%mV~CtEEa4=)@J`4YoT@$9}xH&qcX>lvW>wj*s%n1(JbS$r*d zK9ca~LMdTQ7v!Y34Q6Zh;50&tLX-E@$j3m@9%{iLEyrjeeM_lyMFuI6_pPMcUemdp z%6;iM!PP84B5GQ70B|+R^|BqipYC7_%BZRXP;eo#KZ6EBvBzlpn}@P3p}|8#I(4 zul0x<>TxI(s0g?8v|a89+n1)Jx##kH*g@FY*niUw?{Sqmm)Xi-;WBKlUieBT&& zR3bfdZ|yI{!e~+H2q@uF@=N3k*$H|RZpj@5hvmw~_Py+wV14-k8ynOVi-{@1gB;!g zop(C8F2W!)$gD|@VYtF3M2`gl44ny?45baF7yCBl4PB@{E_nNFz6{2b zr3nxXdd~S*FLLzzO#wytqw(3PI0_%D56fEkMQPca=JX$gqH9aJ{ZpKvk8&kz zVxpVpl56nj^P5R4Lm_KK_6WeFW0cD?IY zZu%H?$YfA5eBYF>2g^}+ig>LD^PgW_C#jRJ$|LNXlubK_b)pT9uqy)nlg?;4DG*(P@1w56aN4^TZM6bWUyCM1*1Czy}HIA#1w25=*y73 z^<0dmnQg(qFmL~t3lPYXx1H8~Zxg@d(x{4t40F=4{->PSRp{RzUszJHDlXejc7G)lY;8~fKsU-hk(590&9v& zFxr}ZKJqco?V2qb5bnkeLoQVbyu32=?$Kwh?;N0OiGN|7=6`RkdoJ(JT@djZ8dUCb za?z#~&v8Fm?V}L${Zr&aQ8JC0EF>X56u4pkjt-w^+1bG$r9f1OrJ$<6qE&iV7_}kN zQweT&?sbJOanV*#E0|bLjkCC(vOm|{V<5MsimhiSt-uxzjxz9SMUvMQK{`~RZ+v_m ztb%l8kCZHm@?oa?Mb$L;Bv2$(K*V=w5SD>$*ITQ!W*n%SOXJlQO>pLoRxLI#%aHpA zmYUE<8T)d$P6F=j$+`7W&dSV^FR;c5cvU=igm6$Q0Bz?EE|?msF@JEX89DoMOUuTr z`R{3)%|h-VtA7`wTgs8)s;PQ6$6IvQTsUzrO=Y6G!D)=Xx9w0$Gba6n1y?{dK@SoK ze54vzFH|9D2~QbpUTb_H)IYDfa|J}Lq9?%CFHbsIB8(vc1WUKbA`4uL=6b&iurle2 zi(4jg{2hA5K*7uKNnBA0KV4x*3NO0jb^QuEWV&39?%#Ve9aEQ)AWUUwycBSf_o%|` zTcm^fRU(B}U8I(N*#z&8fsGAkKpBAt5@UnRa>N{07@%qaw@bs$K2R5VP#c$2^Q%RQ zVuW0slT(*~U8kk?duR;_jOTpmwrrbx->n}JX^_|@zO|a)Ik@>X7HOQ(!z(#CF@`^7JGf2aEfn1S0v}oHKo%uVUMDjl3<>f#kD$hfw(^nW(aRB6AaG-TDAh%oHjEJ=J$;2s zNEtTUsuZa{ji(_8)1j8gUOb~B9fXBSc)M%Hfsu<`&aJGt{{#rePUQ zno1K;1#~H`?Rd#V$}MtvkS~UOgsP~@^u^u{ua{D3V=Fa{2IKwI7>|!cOUteP#X!Z_ z0=9Z(Y!#lhZpJ0TKxMl6%R?6~lt!EvNztb>dbADO5-ZkYS1f(TlqF6|c^Euh&Dojz z>7lTs0ClkTp5>|*~0 z_E{QdSna;}KOcu@*xbF3xyhffB&XIT`p{6hS*F))v#>=YEtJB9Cr*b2YtK$qd9@hS zP$+_%N43)URxgAF_fx!4#yRV$Y6W06mtQ<`6RwW>A*AaHVyEylR` z2)a@71;tKHeL9Ik*CB!~)Qw;=Fl6gGLG;?;LTeerep>@XsgVh4FN73z`?Z=yj9;v% zBxr{}l;h6)J_enD_Xs$w_!zAMpK(;B2{L7;9924=)DeQQG^=eR>S_sq&5Q6tF?!ZR z5Qknd!5HirCsnTIK}aSQLQ_)EQtb#JSG@QUVD}mpOr(idOXutsza|QP5K1WjdsWrs zkY855Sph`j8nzw^f)v# zwC?)Bwo9-}lWq~G9ow>)3->q?FnXQ<$)Y*Vq?hu8MmH8aL1yGrPXCBBQZxc2c}gM_ zazx&=nWtc(!DSU&y7M>VVWHK|f42Z4$ zgM>V`3TB$s+ZXAeFq65x>B{Mdc~^YySq_px;%mpmr2Huz$C}=8-KJN$50%RxgBJ`5 zP&~~O#I1B_1NXRe11=!ZG+8?m1fW)N1v3M~LuJCB_sLZ){E+CtNyJ{zEW!m2t6~4F z%H681*4phFkdH+Wb0LC&2rz`YF05QDpj#b^CS9c1iNN6yizQhsjIRb7ouDlN!>cKa zg7n0;K8z}XHxXi2Ecua32b&VylFo$*L~5$eud(~1>99o0y8R!-= zN_$<&9xytjEu>6segSMd2}6=@3VVB@GhvK>e5(3kr&=sV&}-F69b-m9?Ip^S&_1rK z)REQFT(LR2P6B9EqR$$Fjhw z@@PRbM9s{1I>jgtQ$A{Too_2xnL(GQD!IAjpSUUis4yB{sY$2}MT}d+!IfS_pu32*B z(%fM}`E)&K1vl5$L@*MaDwYf>96D4c;kb|PKTS;+YgrY9Ko@0Q)3$VlDZzF2D5qzp z3ooIC#iZNaZ8P4)aA|hlnC{a}TPrb~vTu2a+lFss)C;W1zZ926mndG{GXR|B)WfV} z(w*ugyn0Rs7_}^z1)X+M+)NXwrEUSMFwNFX_HD7RuV z!p-X54t?QhInx8)RxXg9Gk>_MaF-=XqAoa3dpy^>ZnMr+Cdt={MR4L zlI?2b=gsG3&Ijqpwy~pkv)Y=9n_|E|4ghMbhOV!U-CR;Kg+YdeFwVdfiyc*SZ33P= z$^Lo3LKgdGzEro#sbQm;1h}cbE=gD;VPV$RSAC(NX#P3$r*wNcQV1ac_8s?7<*v7Pd5{UE zcj<@b8s(}ugB+l6X(Y2RciZYZr@$lnh&EYVD&Q;@IV#m~)&N`>i%eC8QGg&uMXI=s zQLp~@6So9V;*{|ZngyWwRLWwzOCrXEh&`MazZ2wm)kNAKFDMMTX_jGA!oO_hJ-YRa z$^tLI^PHPVUu(o9eNPBKBkY8oY*JvIpawcN$Hq>rF`BD0kq%ngOwjI!^ei&^{U1>= z!^&Yoa|y-vSLjPKIP(7C?$zHcE6TomiB6zAM~x|S=v76w%>=hQ$rVordZSPGH_r!u za~NE4L)9W;Zi;ur8=N=;g2x`9ew-T?zd44CWDowkmcCKC2ZEkwyy{Cgf?DBwA)VTz zfJ;~J*q-_{>l2=#Z#w%BHa5yiZUb#89mRN|$!s*X41Sj}m@%Y%D8!j#oex}HpuLkT zi_70a875WHbj(&5q=Cb3*-Fn-b_=@DxVxh?M%~C#aO%I9A zfDhFlS)5h5m^sgc`qgo&LB;4CbJ+4Do$EQJL-i45cQ|%b4D${*iHS6$Z!lFja4{Z& zm_cQ;+$jmX_Ipjb?|qoQbz3TEQd@UgHO-t8i&2#uH*%Y+@t8HptKD;^>9mRb|w53r?9PIIfP$v|io_ z*cIdNpF96_?%r4O-CUdtgPsVgQ7~>`(Iv=~Vz&=N0I0@6UyQtkp156a;eRI#QPSg> zAcDjr+gD33>K!@@N;gSJr1%xowp?n{?=lK{fWo0V0xrMs(WD=)!hLrQ-FS z@!mYs_Wz~@&+paJijML41V^ce^;23KtbY_xNJKz@7&zTeFEoHm7;$(UI=YQFs#$@ptk!EQ42Y4FfxEf@TOsVt37 z}4AVbnC9Ri){mLjd@o@_vA;wzpV8G_ypN3Y=M08M6ZfM2Xtkpu^=A9}u!A?A z)0NYU%Y#TSSE#tnm?KTmo4 zd|UY(Tqjl;m?`7^R${$QG*ZIW=)p?(%lWYXyf8k9{rdP=)z$ty1cAPSJfgXTOPXN1 z^D-Pknjk$jWsP@0+o&sn#8=lA=)a-i-j|YE5DwZ9s#vk3w-c?}%C?C;Q@<(1;-i>L zl}SN#MAX*71GGR14&RiPaHe@3srrMMPYHjml+|aGSDU_fRi{h1_NlT;MHYR@D(IR3 zt6;N#m)Tdz?)V=$7s!&Z;ZdmJFd0m>InR~Umqz^)eM=coLx(q4tw%~Z0U73ho^=Mj zF)fREGIOlh6`*ARYk?K%ir!a_Yvw9snW){J#zyP<3!#qD8Q_2~d|so+(Xc54smil}_4kGqEK^l7mVKQmM+ zBR70H=a2QXGB!3jg`P(Kg~lE^KQr-y_EVJi%}cMMzb+f?)6-|yp{QzpZdk)-9EhXzQpEZ(ZuvkddnL`d4Y{JQ>J+Eg}}BSGkUqgbqYuem+;8++@MM(|;Eh?pTI_O&4Gy6PBOnIB)^cWfo! V^Rh-@K?jDw` + jWTRegexString = "^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$" + splitParamsRegexString = `'[^']*'|\S+` + bicRegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$` +) + +var ( + alphaRegex = regexp.MustCompile(alphaRegexString) + alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString) + alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString) + alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString) + numericRegex = regexp.MustCompile(numericRegexString) + numberRegex = regexp.MustCompile(numberRegexString) + hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString) + hexColorRegex = regexp.MustCompile(hexColorRegexString) + rgbRegex = regexp.MustCompile(rgbRegexString) + rgbaRegex = regexp.MustCompile(rgbaRegexString) + hslRegex = regexp.MustCompile(hslRegexString) + hslaRegex = regexp.MustCompile(hslaRegexString) + e164Regex = regexp.MustCompile(e164RegexString) + emailRegex = regexp.MustCompile(emailRegexString) + base64Regex = regexp.MustCompile(base64RegexString) + base64URLRegex = regexp.MustCompile(base64URLRegexString) + iSBN10Regex = regexp.MustCompile(iSBN10RegexString) + iSBN13Regex = regexp.MustCompile(iSBN13RegexString) + uUID3Regex = regexp.MustCompile(uUID3RegexString) + uUID4Regex = regexp.MustCompile(uUID4RegexString) + uUID5Regex = regexp.MustCompile(uUID5RegexString) + uUIDRegex = regexp.MustCompile(uUIDRegexString) + uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString) + uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString) + uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString) + uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString) + aSCIIRegex = regexp.MustCompile(aSCIIRegexString) + printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) + multibyteRegex = regexp.MustCompile(multibyteRegexString) + dataURIRegex = regexp.MustCompile(dataURIRegexString) + latitudeRegex = regexp.MustCompile(latitudeRegexString) + longitudeRegex = regexp.MustCompile(longitudeRegexString) + sSNRegex = regexp.MustCompile(sSNRegexString) + hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952) + hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123) + fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123) + btcAddressRegex = regexp.MustCompile(btcAddressRegexString) + btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32) + btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32) + ethAddressRegex = regexp.MustCompile(ethAddressRegexString) + ethAddressRegexUpper = regexp.MustCompile(ethAddressUpperRegexString) + ethAddressRegexLower = regexp.MustCompile(ethAddressLowerRegexString) + uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString) + hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString) + hTMLRegex = regexp.MustCompile(hTMLRegexString) + jWTRegex = regexp.MustCompile(jWTRegexString) + splitParamsRegex = regexp.MustCompile(splitParamsRegexString) + bicRegex = regexp.MustCompile(bicRegexString) +) diff --git a/vendor/github.com/go-playground/validator/v10/struct_level.go b/vendor/github.com/go-playground/validator/v10/struct_level.go new file mode 100644 index 00000000000..c0d89cfbed7 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/struct_level.go @@ -0,0 +1,175 @@ +package validator + +import ( + "context" + "reflect" +) + +// StructLevelFunc accepts all values needed for struct level validation +type StructLevelFunc func(sl StructLevel) + +// StructLevelFuncCtx accepts all values needed for struct level validation +// but also allows passing of contextual validation information via context.Context. +type StructLevelFuncCtx func(ctx context.Context, sl StructLevel) + +// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx +func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx { + return func(ctx context.Context, sl StructLevel) { + fn(sl) + } +} + +// StructLevel contains all the information and helper functions +// to validate a struct +type StructLevel interface { + + // Validator returns the main validation object, in case one wants to call validations internally. + // this is so you don't have to use anonymous functions to get access to the validate + // instance. + Validator() *Validate + + // Top returns the top level struct, if any + Top() reflect.Value + + // Parent returns the current fields parent struct, if any + Parent() reflect.Value + + // Current returns the current struct. + Current() reflect.Value + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and its kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // ReportError reports an error just by passing the field and tag information + // + // NOTES: + // + // fieldName and altName get appended to the existing namespace that + // validator is on. e.g. pass 'FirstName' or 'Names[0]' depending + // on the nesting + // + // tag can be an existing validation tag or just something you make up + // and process on the flip side it's up to you. + ReportError(field interface{}, fieldName, structFieldName string, tag, param string) + + // ReportValidationErrors reports an error just by passing ValidationErrors + // + // NOTES: + // + // relativeNamespace and relativeActualNamespace get appended to the + // existing namespace that validator is on. + // e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending + // on the nesting. most of the time they will be blank, unless you validate + // at a level lower the the current field depth + ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors) +} + +var _ StructLevel = new(validate) + +// Top returns the top level struct +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an acurate value otherwise. +func (v *validate) Top() reflect.Value { + return v.top +} + +// Parent returns the current structs parent +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an acurate value otherwise. +func (v *validate) Parent() reflect.Value { + return v.slflParent +} + +// Current returns the current struct. +func (v *validate) Current() reflect.Value { + return v.slCurrent +} + +// Validator returns the main validation object, in case one want to call validations internally. +func (v *validate) Validator() *Validate { + return v.v +} + +// ExtractType gets the actual underlying type of field value. +func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) { + return v.extractTypeInternal(field, false) +} + +// ReportError reports an error just by passing the field and tag information +func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) { + + fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false) + + if len(structFieldName) == 0 { + structFieldName = fieldName + } + + v.str1 = string(append(v.ns, fieldName...)) + + if v.v.hasTagNameFunc || fieldName != structFieldName { + v.str2 = string(append(v.actualNs, structFieldName...)) + } else { + v.str2 = v.str1 + } + + if kind == reflect.Invalid { + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + param: param, + kind: kind, + }, + ) + return + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + value: fv.Interface(), + param: param, + kind: kind, + typ: fv.Type(), + }, + ) +} + +// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation. +// +// NOTE: this function prepends the current namespace to the relative ones. +func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) { + + var err *fieldError + + for i := 0; i < len(errs); i++ { + + err = errs[i].(*fieldError) + err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...)) + err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...)) + + v.errs = append(v.errs, err) + } +} diff --git a/vendor/github.com/go-playground/validator/v10/translations.go b/vendor/github.com/go-playground/validator/v10/translations.go new file mode 100644 index 00000000000..4d9d75c13a0 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/translations.go @@ -0,0 +1,11 @@ +package validator + +import ut "github.com/go-playground/universal-translator" + +// TranslationFunc is the function type used to register or override +// custom translations +type TranslationFunc func(ut ut.Translator, fe FieldError) string + +// RegisterTranslationsFunc allows for registering of translations +// for a 'ut.Translator' for use within the 'TranslationFunc' +type RegisterTranslationsFunc func(ut ut.Translator) error diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go new file mode 100644 index 00000000000..56420f4301f --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/util.go @@ -0,0 +1,288 @@ +package validator + +import ( + "reflect" + "strconv" + "strings" + "time" +) + +// extractTypeInternal gets the actual underlying type of field value. +// It will dive into pointers, customTypes and return you the +// underlying value and it's kind. +func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) { + +BEGIN: + switch current.Kind() { + case reflect.Ptr: + + nullable = true + + if current.IsNil() { + return current, reflect.Ptr, nullable + } + + current = current.Elem() + goto BEGIN + + case reflect.Interface: + + nullable = true + + if current.IsNil() { + return current, reflect.Interface, nullable + } + + current = current.Elem() + goto BEGIN + + case reflect.Invalid: + return current, reflect.Invalid, nullable + + default: + + if v.v.hasCustomFuncs { + + if fn, ok := v.v.customFuncs[current.Type()]; ok { + current = reflect.ValueOf(fn(current)) + goto BEGIN + } + } + + return current, current.Kind(), nullable + } +} + +// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and +// returns the field, field kind and whether is was successful in retrieving the field at all. +// +// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field +// could not be retrieved because it didn't exist. +func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, nullable bool, found bool) { + +BEGIN: + current, kind, nullable = v.ExtractType(val) + if kind == reflect.Invalid { + return + } + + if namespace == "" { + found = true + return + } + + switch kind { + + case reflect.Ptr, reflect.Interface: + return + + case reflect.Struct: + + typ := current.Type() + fld := namespace + var ns string + + if typ != timeType { + + idx := strings.Index(namespace, namespaceSeparator) + + if idx != -1 { + fld = namespace[:idx] + ns = namespace[idx+1:] + } else { + ns = "" + } + + bracketIdx := strings.Index(fld, leftBracket) + if bracketIdx != -1 { + fld = fld[:bracketIdx] + + ns = namespace[bracketIdx:] + } + + val = current.FieldByName(fld) + namespace = ns + goto BEGIN + } + + case reflect.Array, reflect.Slice: + idx := strings.Index(namespace, leftBracket) + idx2 := strings.Index(namespace, rightBracket) + + arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2]) + + if arrIdx >= current.Len() { + return + } + + startIdx := idx2 + 1 + + if startIdx < len(namespace) { + if namespace[startIdx:startIdx+1] == namespaceSeparator { + startIdx++ + } + } + + val = current.Index(arrIdx) + namespace = namespace[startIdx:] + goto BEGIN + + case reflect.Map: + idx := strings.Index(namespace, leftBracket) + 1 + idx2 := strings.Index(namespace, rightBracket) + + endIdx := idx2 + + if endIdx+1 < len(namespace) { + if namespace[endIdx+1:endIdx+2] == namespaceSeparator { + endIdx++ + } + } + + key := namespace[idx:idx2] + + switch current.Type().Key().Kind() { + case reflect.Int: + i, _ := strconv.Atoi(key) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Int8: + i, _ := strconv.ParseInt(key, 10, 8) + val = current.MapIndex(reflect.ValueOf(int8(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int16: + i, _ := strconv.ParseInt(key, 10, 16) + val = current.MapIndex(reflect.ValueOf(int16(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int32: + i, _ := strconv.ParseInt(key, 10, 32) + val = current.MapIndex(reflect.ValueOf(int32(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int64: + i, _ := strconv.ParseInt(key, 10, 64) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Uint: + i, _ := strconv.ParseUint(key, 10, 0) + val = current.MapIndex(reflect.ValueOf(uint(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint8: + i, _ := strconv.ParseUint(key, 10, 8) + val = current.MapIndex(reflect.ValueOf(uint8(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint16: + i, _ := strconv.ParseUint(key, 10, 16) + val = current.MapIndex(reflect.ValueOf(uint16(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint32: + i, _ := strconv.ParseUint(key, 10, 32) + val = current.MapIndex(reflect.ValueOf(uint32(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint64: + i, _ := strconv.ParseUint(key, 10, 64) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Float32: + f, _ := strconv.ParseFloat(key, 32) + val = current.MapIndex(reflect.ValueOf(float32(f))) + namespace = namespace[endIdx+1:] + + case reflect.Float64: + f, _ := strconv.ParseFloat(key, 64) + val = current.MapIndex(reflect.ValueOf(f)) + namespace = namespace[endIdx+1:] + + case reflect.Bool: + b, _ := strconv.ParseBool(key) + val = current.MapIndex(reflect.ValueOf(b)) + namespace = namespace[endIdx+1:] + + // reflect.Type = string + default: + val = current.MapIndex(reflect.ValueOf(key)) + namespace = namespace[endIdx+1:] + } + + goto BEGIN + } + + // if got here there was more namespace, cannot go any deeper + panic("Invalid field namespace") +} + +// asInt returns the parameter as a int64 +// or panics if it can't convert +func asInt(param string) int64 { + i, err := strconv.ParseInt(param, 0, 64) + panicIf(err) + + return i +} + +// asIntFromTimeDuration parses param as time.Duration and returns it as int64 +// or panics on error. +func asIntFromTimeDuration(param string) int64 { + d, err := time.ParseDuration(param) + if err != nil { + // attempt parsing as an an integer assuming nanosecond precision + return asInt(param) + } + return int64(d) +} + +// asIntFromType calls the proper function to parse param as int64, +// given a field's Type t. +func asIntFromType(t reflect.Type, param string) int64 { + switch t { + case timeDurationType: + return asIntFromTimeDuration(param) + default: + return asInt(param) + } +} + +// asUint returns the parameter as a uint64 +// or panics if it can't convert +func asUint(param string) uint64 { + + i, err := strconv.ParseUint(param, 0, 64) + panicIf(err) + + return i +} + +// asFloat returns the parameter as a float64 +// or panics if it can't convert +func asFloat(param string) float64 { + + i, err := strconv.ParseFloat(param, 64) + panicIf(err) + + return i +} + +// asBool returns the parameter as a bool +// or panics if it can't convert +func asBool(param string) bool { + + i, err := strconv.ParseBool(param) + panicIf(err) + + return i +} + +func panicIf(err error) { + if err != nil { + panic(err.Error()) + } +} diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go new file mode 100644 index 00000000000..2a4fad022d1 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/validator.go @@ -0,0 +1,477 @@ +package validator + +import ( + "context" + "fmt" + "reflect" + "strconv" +) + +// per validate construct +type validate struct { + v *Validate + top reflect.Value + ns []byte + actualNs []byte + errs ValidationErrors + includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise + ffn FilterFunc + slflParent reflect.Value // StructLevel & FieldLevel + slCurrent reflect.Value // StructLevel & FieldLevel + flField reflect.Value // StructLevel & FieldLevel + cf *cField // StructLevel & FieldLevel + ct *cTag // StructLevel & FieldLevel + misc []byte // misc reusable + str1 string // misc reusable + str2 string // misc reusable + fldIsPointer bool // StructLevel & FieldLevel + isPartial bool + hasExcludes bool +} + +// parent and current will be the same the first run of validateStruct +func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) { + + cs, ok := v.v.structCache.Get(typ) + if !ok { + cs = v.v.extractStructCache(current, typ.Name()) + } + + if len(ns) == 0 && len(cs.name) != 0 { + + ns = append(ns, cs.name...) + ns = append(ns, '.') + + structNs = append(structNs, cs.name...) + structNs = append(structNs, '.') + } + + // ct is nil on top level struct, and structs as fields that have no tag info + // so if nil or if not nil and the structonly tag isn't present + if ct == nil || ct.typeof != typeStructOnly { + + var f *cField + + for i := 0; i < len(cs.fields); i++ { + + f = cs.fields[i] + + if v.isPartial { + + if v.ffn != nil { + // used with StructFiltered + if v.ffn(append(structNs, f.name...)) { + continue + } + + } else { + // used with StructPartial & StructExcept + _, ok = v.includeExclude[string(append(structNs, f.name...))] + + if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) { + continue + } + } + } + + v.traverseField(ctx, current, current.Field(f.idx), ns, structNs, f, f.cTags) + } + } + + // check if any struct level validations, after all field validations already checked. + // first iteration will have no info about nostructlevel tag, and is checked prior to + // calling the next iteration of validateStruct called from traverseField. + if cs.fn != nil { + + v.slflParent = parent + v.slCurrent = current + v.ns = ns + v.actualNs = structNs + + cs.fn(ctx, v) + } +} + +// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options +func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) { + var typ reflect.Type + var kind reflect.Kind + + current, kind, v.fldIsPointer = v.extractTypeInternal(current, false) + + switch kind { + case reflect.Ptr, reflect.Interface, reflect.Invalid: + + if ct == nil { + return + } + + if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault { + return + } + + if ct.hasTag { + if kind == reflect.Invalid { + v.str1 = string(append(ns, cf.altName...)) + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + param: ct.param, + kind: kind, + }, + ) + return + } + + v.str1 = string(append(ns, cf.altName...)) + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + if !ct.runValidationWhenNil { + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: current.Type(), + }, + ) + return + } + } + + case reflect.Struct: + + typ = current.Type() + + if typ != timeType { + + if ct != nil { + + if ct.typeof == typeStructOnly { + goto CONTINUE + } else if ct.typeof == typeIsDefault { + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !ct.fn(ctx, v) { + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + return + } + } + + ct = ct.next + } + + if ct != nil && ct.typeof == typeNoStructLevel { + return + } + + CONTINUE: + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) + return + } + } + + if ct == nil || !ct.hasTag { + return + } + + typ = current.Type() + +OUTER: + for { + if ct == nil { + return + } + + switch ct.typeof { + + case typeOmitEmpty: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !hasValue(v) { + return + } + + ct = ct.next + continue + + case typeEndKeys: + return + + case typeDive: + + ct = ct.next + + // traverse slice or map here + // or panic ;) + switch kind { + case reflect.Slice, reflect.Array: + + var i64 int64 + reusableCF := &cField{} + + for i := 0; i < current.Len(); i++ { + + i64 = int64(i) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct) + } + + case reflect.Map: + + var pv string + reusableCF := &cField{} + + for _, key := range current.MapKeys() { + + pv = fmt.Sprintf("%v", key.Interface()) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + + if ct != nil && ct.typeof == typeKeys && ct.keys != nil { + v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys) + // can be nil when just keys being validated + if ct.next != nil { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next) + } + } else { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct) + } + } + + default: + // throw error, if not a slice or map then should not have gotten here + // bad dive tag + panic("dive error! can't dive on a non slice or map") + } + + return + + case typeOr: + + v.misc = v.misc[0:0] + + for { + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if ct.fn(ctx, v) { + + // drain rest of the 'or' values, then continue or leave + for { + + ct = ct.next + + if ct == nil { + return + } + + if ct.typeof != typeOr { + continue OUTER + } + } + } + + v.misc = append(v.misc, '|') + v.misc = append(v.misc, ct.tag...) + + if ct.hasParam { + v.misc = append(v.misc, '=') + v.misc = append(v.misc, ct.param...) + } + + if ct.isBlockEnd || ct.next == nil { + // if we get here, no valid 'or' value and no more tags + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + if ct.hasAlias { + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.actualAliasTag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + + } else { + + tVal := string(v.misc)[1:] + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tVal, + actualTag: tVal, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + } + + return + } + + ct = ct.next + } + + default: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !ct.fn(ctx, v) { + + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + + return + } + ct = ct.next + } + } + +} diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go new file mode 100644 index 00000000000..973964fc205 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -0,0 +1,658 @@ +package validator + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + ut "github.com/go-playground/universal-translator" +) + +const ( + defaultTagName = "validate" + utf8HexComma = "0x2C" + utf8Pipe = "0x7C" + tagSeparator = "," + orSeparator = "|" + tagKeySeparator = "=" + structOnlyTag = "structonly" + noStructLevelTag = "nostructlevel" + omitempty = "omitempty" + isdefault = "isdefault" + requiredWithoutAllTag = "required_without_all" + requiredWithoutTag = "required_without" + requiredWithTag = "required_with" + requiredWithAllTag = "required_with_all" + requiredIfTag = "required_if" + requiredUnlessTag = "required_unless" + excludedWithoutAllTag = "excluded_without_all" + excludedWithoutTag = "excluded_without" + excludedWithTag = "excluded_with" + excludedWithAllTag = "excluded_with_all" + skipValidationTag = "-" + diveTag = "dive" + keysTag = "keys" + endKeysTag = "endkeys" + requiredTag = "required" + namespaceSeparator = "." + leftBracket = "[" + rightBracket = "]" + restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}" + restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" + restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" +) + +var ( + timeDurationType = reflect.TypeOf(time.Duration(0)) + timeType = reflect.TypeOf(time.Time{}) + + defaultCField = &cField{namesEqual: true} +) + +// FilterFunc is the type used to filter fields using +// StructFiltered(...) function. +// returning true results in the field being filtered/skiped from +// validation +type FilterFunc func(ns []byte) bool + +// CustomTypeFunc allows for overriding or adding custom field type handler functions +// field = field value of the type to return a value to be validated +// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29 +type CustomTypeFunc func(field reflect.Value) interface{} + +// TagNameFunc allows for adding of a custom tag name parser +type TagNameFunc func(field reflect.StructField) string + +type internalValidationFuncWrapper struct { + fn FuncCtx + runValidatinOnNil bool +} + +// Validate contains the validator settings and cache +type Validate struct { + tagName string + pool *sync.Pool + hasCustomFuncs bool + hasTagNameFunc bool + tagNameFunc TagNameFunc + structLevelFuncs map[reflect.Type]StructLevelFuncCtx + customFuncs map[reflect.Type]CustomTypeFunc + aliases map[string]string + validations map[string]internalValidationFuncWrapper + transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc + tagCache *tagCache + structCache *structCache +} + +// New returns a new instance of 'validate' with sane defaults. +// Validate is designed to be thread-safe and used as a singleton instance. +// It caches information about your struct and validations, +// in essence only parsing your validation tags once per struct type. +// Using multiple instances neglects the benefit of caching. +func New() *Validate { + + tc := new(tagCache) + tc.m.Store(make(map[string]*cTag)) + + sc := new(structCache) + sc.m.Store(make(map[reflect.Type]*cStruct)) + + v := &Validate{ + tagName: defaultTagName, + aliases: make(map[string]string, len(bakedInAliases)), + validations: make(map[string]internalValidationFuncWrapper, len(bakedInValidators)), + tagCache: tc, + structCache: sc, + } + + // must copy alias validators for separate validations to be used in each validator instance + for k, val := range bakedInAliases { + v.RegisterAlias(k, val) + } + + // must copy validators for separate validations to be used in each instance + for k, val := range bakedInValidators { + + switch k { + // these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour + case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag, + excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag: + _ = v.registerValidation(k, wrapFunc(val), true, true) + default: + // no need to error check here, baked in will always be valid + _ = v.registerValidation(k, wrapFunc(val), true, false) + } + } + + v.pool = &sync.Pool{ + New: func() interface{} { + return &validate{ + v: v, + ns: make([]byte, 0, 64), + actualNs: make([]byte, 0, 64), + misc: make([]byte, 32), + } + }, + } + + return v +} + +// SetTagName allows for changing of the default tag name of 'validate' +func (v *Validate) SetTagName(name string) { + v.tagName = name +} + +// ValidateMapCtx validates a map using a map of validation rules and allows passing of contextual +// validation validation information via context.Context. +func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{}, rules map[string]interface{}) map[string]interface{} { + errs := make(map[string]interface{}) + for field, rule := range rules { + if reflect.ValueOf(rule).Kind() == reflect.Map && reflect.ValueOf(data[field]).Kind() == reflect.Map { + err := v.ValidateMapCtx(ctx, data[field].(map[string]interface{}), rule.(map[string]interface{})) + if len(err) > 0 { + errs[field] = err + } + } else if reflect.ValueOf(rule).Kind() == reflect.Map { + errs[field] = errors.New("The field: '" + field + "' is not a map to dive") + } else { + err := v.VarCtx(ctx, data[field], rule.(string)) + if err != nil { + errs[field] = err + } + } + } + return errs +} + +// ValidateMap validates map data form a map of tags +func (v *Validate) ValidateMap(data map[string]interface{}, rules map[string]interface{}) map[string]interface{} { + return v.ValidateMapCtx(context.Background(), data, rules) +} + +// RegisterTagNameFunc registers a function to get alternate names for StructFields. +// +// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names: +// +// validate.RegisterTagNameFunc(func(fld reflect.StructField) string { +// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0] +// if name == "-" { +// return "" +// } +// return name +// }) +func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) { + v.tagNameFunc = fn + v.hasTagNameFunc = true +} + +// RegisterValidation adds a validation with the given tag +// +// NOTES: +// - if the key already exists, the previous validation function will be replaced. +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterValidation(tag string, fn Func, callValidationEvenIfNull ...bool) error { + return v.RegisterValidationCtx(tag, wrapFunc(fn), callValidationEvenIfNull...) +} + +// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation +// allowing context.Context validation support. +func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationEvenIfNull ...bool) error { + var nilCheckable bool + if len(callValidationEvenIfNull) > 0 { + nilCheckable = callValidationEvenIfNull[0] + } + return v.registerValidation(tag, fn, false, nilCheckable) +} + +func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error { + if len(tag) == 0 { + return errors.New("function Key cannot be empty") + } + + if fn == nil { + return errors.New("function cannot be empty") + } + + _, ok := restrictedTags[tag] + if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) { + panic(fmt.Sprintf(restrictedTagErr, tag)) + } + v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable} + return nil +} + +// RegisterAlias registers a mapping of a single validation tag that +// defines a common or complex set of validation(s) to simplify adding validation +// to structs. +// +// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterAlias(alias, tags string) { + + _, ok := restrictedTags[alias] + + if ok || strings.ContainsAny(alias, restrictedTagChars) { + panic(fmt.Sprintf(restrictedAliasErr, alias)) + } + + v.aliases[alias] = tags +} + +// RegisterStructValidation registers a StructLevelFunc against a number of types. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) { + v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...) +} + +// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing +// of contextual validation information via context.Context. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) { + + if v.structLevelFuncs == nil { + v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx) + } + + for _, t := range types { + tv := reflect.ValueOf(t) + if tv.Kind() == reflect.Ptr { + t = reflect.Indirect(tv).Interface() + } + + v.structLevelFuncs[reflect.TypeOf(t)] = fn + } +} + +// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types +// +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) { + + if v.customFuncs == nil { + v.customFuncs = make(map[reflect.Type]CustomTypeFunc) + } + + for _, t := range types { + v.customFuncs[reflect.TypeOf(t)] = fn + } + + v.hasCustomFuncs = true +} + +// RegisterTranslation registers translations against the provided tag. +func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) { + + if v.transTagFunc == nil { + v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc) + } + + if err = registerFn(trans); err != nil { + return + } + + m, ok := v.transTagFunc[trans] + if !ok { + m = make(map[string]TranslationFunc) + v.transTagFunc[trans] = m + } + + m[tag] = translationFn + + return +} + +// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) Struct(s interface{}) error { + return v.StructCtx(context.Background(), s) +} + +// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified +// and also allows passing of context.Context for contextual validation information. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) { + + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = false + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error { + return v.StructFilteredCtx(context.Background(), s, fn) +} + +// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified and also allows passing of contextual validation information via +// context.Context +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = fn + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructPartial validates the fields passed in only, ignoring all others. +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartial(s interface{}, fields ...string) error { + return v.StructPartialCtx(context.Background(), s, fields...) +} + +// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual +// validation validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = false + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, k := range fields { + + flds := strings.Split(k, namespaceSeparator) + if len(flds) > 0 { + + vd.misc = append(vd.misc[0:0], name...) + // Don't append empty name for unnamed structs + if len(vd.misc) != 0 { + vd.misc = append(vd.misc, '.') + } + + for _, s := range flds { + + idx := strings.Index(s, leftBracket) + + if idx != -1 { + for idx != -1 { + vd.misc = append(vd.misc, s[:idx]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + + idx2 := strings.Index(s, rightBracket) + idx2++ + vd.misc = append(vd.misc, s[idx:idx2]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + s = s[idx2:] + idx = strings.Index(s, leftBracket) + } + } else { + + vd.misc = append(vd.misc, s...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.misc = append(vd.misc, '.') + } + } + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructExcept validates all fields except the ones passed in. +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExcept(s interface{}, fields ...string) error { + return v.StructExceptCtx(context.Background(), s, fields...) +} + +// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual +// validation validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = true + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, key := range fields { + + vd.misc = vd.misc[0:0] + + if len(name) > 0 { + vd.misc = append(vd.misc, name...) + vd.misc = append(vd.misc, '.') + } + + vd.misc = append(vd.misc, key...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// Var validates a single variable using tag style validation. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) Var(field interface{}, tag string) error { + return v.VarCtx(context.Background(), field, tag) +} + +// VarCtx validates a single variable using tag style validation and allows passing of contextual +// validation validation information via context.Context. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + + ctag := v.fetchCacheTag(tag) + val := reflect.ValueOf(field) + vd := v.pool.Get().(*validate) + vd.top = val + vd.isPartial = false + vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} + +// VarWithValue validates a single variable, against another variable/field's value using tag style validation +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error { + return v.VarWithValueCtx(context.Background(), field, other, tag) +} + +// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and +// allows passing of contextual validation validation information via context.Context. +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + ctag := v.fetchCacheTag(tag) + otherVal := reflect.ValueOf(other) + vd := v.pool.Get().(*validate) + vd.top = otherVal + vd.isPartial = false + vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} diff --git a/vendor/github.com/gofrs/uuid/.gitignore b/vendor/github.com/gofrs/uuid/.gitignore new file mode 100644 index 00000000000..666dbbb5bcd --- /dev/null +++ b/vendor/github.com/gofrs/uuid/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# binary bundle generated by go-fuzz +uuid-fuzz.zip diff --git a/vendor/github.com/gofrs/uuid/LICENSE b/vendor/github.com/gofrs/uuid/LICENSE new file mode 100644 index 00000000000..926d5498702 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2018 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md new file mode 100644 index 00000000000..48303001b7c --- /dev/null +++ b/vendor/github.com/gofrs/uuid/README.md @@ -0,0 +1,109 @@ +# UUID + +[![License](https://img.shields.io/github/license/gofrs/uuid.svg)](https://github.com/gofrs/uuid/blob/master/LICENSE) +[![Build Status](https://travis-ci.org/gofrs/uuid.svg?branch=master)](https://travis-ci.org/gofrs/uuid) +[![GoDoc](http://godoc.org/github.com/gofrs/uuid?status.svg)](http://godoc.org/github.com/gofrs/uuid) +[![Coverage Status](https://codecov.io/gh/gofrs/uuid/branch/master/graphs/badge.svg?branch=master)](https://codecov.io/gh/gofrs/uuid/) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/uuid)](https://goreportcard.com/report/github.com/gofrs/uuid) + +Package uuid provides a pure Go implementation of Universally Unique Identifiers +(UUID) variant as defined in RFC-4122. This package supports both the creation +and parsing of UUIDs in different formats. + +This package supports the following UUID versions: +* Version 1, based on timestamp and MAC address (RFC-4122) +* Version 3, based on MD5 hashing of a named value (RFC-4122) +* Version 4, based on random numbers (RFC-4122) +* Version 5, based on SHA-1 hashing of a named value (RFC-4122) + +## Project History + +This project was originally forked from the +[github.com/satori/go.uuid](https://github.com/satori/go.uuid) repository after +it appeared to be no longer maintained, while exhibiting [critical +flaws](https://github.com/satori/go.uuid/issues/73). We have decided to take +over this project to ensure it receives regular maintenance for the benefit of +the larger Go community. + +We'd like to thank Maxim Bublis for his hard work on the original iteration of +the package. + +## License + +This source code of this package is released under the MIT License. Please see +the [LICENSE](https://github.com/gofrs/uuid/blob/master/LICENSE) for the full +content of the license. + +## Recommended Package Version + +We recommend using v2.0.0+ of this package, as versions prior to 2.0.0 were +created before our fork of the original package and have some known +deficiencies. + +## Installation + +It is recommended to use a package manager like `dep` that understands tagged +releases of a package, as well as semantic versioning. + +If you are unable to make use of a dependency manager with your project, you can +use the `go get` command to download it directly: + +```Shell +$ go get github.com/gofrs/uuid +``` + +## Requirements + +Due to subtests not being supported in older versions of Go, this package is +only regularly tested against Go 1.7+. This package may work perfectly fine with +Go 1.2+, but support for these older versions is not actively maintained. + +## Go 1.11 Modules + +As of v3.2.0, this repository no longer adopts Go modules, and v3.2.0 no longer has a `go.mod` file. As a result, v3.2.0 also drops support for the `github.com/gofrs/uuid/v3` import path. Only module-based consumers are impacted. With the v3.2.0 release, _all_ gofrs/uuid consumers should use the `github.com/gofrs/uuid` import path. + +An existing module-based consumer will continue to be able to build using the `github.com/gofrs/uuid/v3` import path using any valid consumer `go.mod` that worked prior to the publishing of v3.2.0, but any module-based consumer should start using the `github.com/gofrs/uuid` import path when possible and _must_ use the `github.com/gofrs/uuid` import path prior to upgrading to v3.2.0. + +Please refer to [Issue #61](https://github.com/gofrs/uuid/issues/61) and [Issue #66](https://github.com/gofrs/uuid/issues/66) for more details. + +## Usage + +Here is a quick overview of how to use this package. For more detailed +documentation, please see the [GoDoc Page](http://godoc.org/github.com/gofrs/uuid). + +```go +package main + +import ( + "log" + + "github.com/gofrs/uuid" +) + +// Create a Version 4 UUID, panicking on error. +// Use this form to initialize package-level variables. +var u1 = uuid.Must(uuid.NewV4()) + +func main() { + // Create a Version 4 UUID. + u2, err := uuid.NewV4() + if err != nil { + log.Fatalf("failed to generate UUID: %v", err) + } + log.Printf("generated Version 4 UUID %v", u2) + + // Parse a UUID from a string. + s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + u3, err := uuid.FromString(s) + if err != nil { + log.Fatalf("failed to parse UUID %q: %v", s, err) + } + log.Printf("successfully parsed UUID %v", u3) +} +``` + +## References + +* [RFC-4122](https://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) +* [New UUID Formats RFC Draft (Peabody) Rev 02](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-02) diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go new file mode 100644 index 00000000000..e3014c68c66 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/codec.go @@ -0,0 +1,212 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "encoding/hex" + "fmt" +) + +// FromBytes returns a UUID generated from the raw byte slice input. +// It will return an error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (UUID, error) { + u := UUID{} + err := u.UnmarshalBinary(input) + return u, err +} + +// FromBytesOrNil returns a UUID generated from the raw byte slice input. +// Same behavior as FromBytes(), but returns uuid.Nil instead of an error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns a UUID parsed from the input string. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (UUID, error) { + u := UUID{} + err := u.UnmarshalText([]byte(input)) + return u, err +} + +// FromStringOrNil returns a UUID parsed from the input string. +// Same behavior as FromString(), but returns uuid.Nil instead of an error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by the String() method. +func (u UUID) MarshalText() ([]byte, error) { + return []byte(u.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// "{6ba7b8109dad11d180b400c04fd430c8}", +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" +// +// ABNF for supported UUID text representation follows: +// +// URN := 'urn' +// UUID-NID := 'uuid' +// +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +// +// hexoct := hexdig hexdig +// 2hexoct := hexoct hexoct +// 4hexoct := 2hexoct 2hexoct +// 6hexoct := 4hexoct 2hexoct +// 12hexoct := 6hexoct 6hexoct +// +// hashlike := 12hexoct +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// +// plain := canonical | hashlike +// uuid := canonical | hashlike | braced | urn +// +// braced := '{' plain '}' | '{' hashlike '}' +// urn := URN ':' UUID-NID ':' plain +// +func (u *UUID) UnmarshalText(text []byte) error { + switch len(text) { + case 32: + return u.decodeHashLike(text) + case 34, 38: + return u.decodeBraced(text) + case 36: + return u.decodeCanonical(text) + case 41, 45: + return u.decodeURN(text) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text) + } +} + +// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3): +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". +func (u *UUID) decodeCanonical(t []byte) error { + if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + src := t + dst := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + src = src[1:] // skip dash + } + _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup]) + if err != nil { + return err + } + src = src[byteGroup:] + dst = dst[byteGroup/2:] + } + + return nil +} + +// decodeHashLike decodes UUID strings that are using the following format: +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeHashLike(t []byte) error { + src := t[:] + dst := u[:] + + _, err := hex.Decode(dst, src) + return err +} + +// decodeBraced decodes UUID strings that are using the following formats: +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" +// "{6ba7b8109dad11d180b400c04fd430c8}". +func (u *UUID) decodeBraced(t []byte) error { + l := len(t) + + if t[0] != '{' || t[l-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[1 : l-1]) +} + +// decodeURN decodes UUID strings that are using the following formats: +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeURN(t []byte) error { + total := len(t) + + urnUUIDPrefix := t[:9] + + if !bytes.Equal(urnUUIDPrefix, urnPrefix) { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[9:total]) +} + +// decodePlain decodes UUID strings that are using the following formats: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodePlain(t []byte) error { + switch len(t) { + case 32: + return u.decodeHashLike(t) + case 36: + return u.decodeCanonical(t) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t) + } +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() ([]byte, error) { + return u.Bytes(), nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return an error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) != Size { + return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + } + copy(u[:], data) + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go new file mode 100644 index 00000000000..afaefbc8e39 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/fuzz.go @@ -0,0 +1,47 @@ +// Copyright (c) 2018 Andrei Tudor Călin +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// +build gofuzz + +package uuid + +// Fuzz implements a simple fuzz test for FromString / UnmarshalText. +// +// To run: +// +// $ go get github.com/dvyukov/go-fuzz/... +// $ cd $GOPATH/src/github.com/gofrs/uuid +// $ go-fuzz-build github.com/gofrs/uuid +// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata +// +// If you make significant changes to FromString / UnmarshalText and add +// new cases to fromStringTests (in codec_test.go), please run +// +// $ go test -seed_fuzz_corpus +// +// to seed the corpus with the new interesting inputs, then run the fuzzer. +func Fuzz(data []byte) int { + _, err := FromString(string(data)) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go new file mode 100644 index 00000000000..38bf685029e --- /dev/null +++ b/vendor/github.com/gofrs/uuid/generator.go @@ -0,0 +1,573 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "net" + "sync" + "time" +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +type epochFunc func() time.Time + +// HWAddrFunc is the function type used to provide hardware (MAC) addresses. +type HWAddrFunc func() (net.HardwareAddr, error) + +// DefaultGenerator is the default UUID Generator used by this package. +var DefaultGenerator Generator = NewGen() + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func NewV1() (UUID, error) { + return DefaultGenerator.NewV1() +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + return DefaultGenerator.NewV3(ns, name) +} + +// NewV4 returns a randomly generated UUID. +func NewV4() (UUID, error) { + return DefaultGenerator.NewV4() +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + return DefaultGenerator.NewV5(ns, name) +} + +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 02 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV6() (UUID, error) { + return DefaultGenerator.NewV6() +} + +// NewV7 returns a k-sortable UUID based on the current UNIX epoch, with the +// ability to configure the timestamp's precision from millisecond all the way +// to nanosecond. The additional precision is supported by reducing the amount +// of pseudorandom data that makes up the rest of the UUID. +// +// If an unknown Precision argument is passed to this method it will panic. As +// such it's strongly encouraged to use the package-provided constants for this +// value. +// +// This is implemented based on revision 02 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV7(p Precision) (UUID, error) { + return DefaultGenerator.NewV7(p) +} + +// Generator provides an interface for generating UUIDs. +type Generator interface { + NewV1() (UUID, error) + NewV3(ns UUID, name string) UUID + NewV4() (UUID, error) + NewV5(ns UUID, name string) UUID + NewV6() (UUID, error) + NewV7(Precision) (UUID, error) +} + +// Gen is a reference UUID generator based on the specifications laid out in +// RFC-4122 and DCE 1.1: Authentication and Security Services. This type +// satisfies the Generator interface as defined in this package. +// +// For consumers who are generating V1 UUIDs, but don't want to expose the MAC +// address of the node generating the UUIDs, the NewGenWithHWAF() function has been +// provided as a convenience. See the function's documentation for more info. +// +// The authors of this package do not feel that the majority of users will need +// to obfuscate their MAC address, and so we recommend using NewGen() to create +// a new generator. +type Gen struct { + clockSequenceOnce sync.Once + hardwareAddrOnce sync.Once + storageMutex sync.Mutex + + rand io.Reader + + epochFunc epochFunc + hwAddrFunc HWAddrFunc + lastTime uint64 + clockSequence uint16 + hardwareAddr [6]byte + + v7LastTime uint64 + v7LastSubsec uint64 + v7ClockSequence uint16 +} + +// interface check -- build will fail if *Gen doesn't satisfy Generator +var _ Generator = (*Gen)(nil) + +// NewGen returns a new instance of Gen with some default values set. Most +// people should use this. +func NewGen() *Gen { + return NewGenWithHWAF(defaultHWAddrFunc) +} + +// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most +// consumers should use NewGen() instead. +// +// This is used so that consumers can generate their own MAC addresses, for use +// in the generated UUIDs, if there is some concern about exposing the physical +// address of the machine generating the UUID. +// +// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC +// address for all the future UUIDs generated by it. If you'd like to switch the +// MAC address being used, you'll need to create a new generator using this +// function. +func NewGenWithHWAF(hwaf HWAddrFunc) *Gen { + return &Gen{ + epochFunc: time.Now, + hwAddrFunc: hwaf, + rand: rand.Reader, + } +} + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func (g *Gen) NewV1() (UUID, error) { + u := UUID{} + + timeNow, clockSeq, err := g.getClockSequence() + if err != nil { + return Nil, err + } + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + hardwareAddr, err := g.getHardwareAddr() + if err != nil { + return Nil, err + } + copy(u[10:], hardwareAddr) + + u.SetVersion(V1) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func (g *Gen) NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(V3) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV4 returns a randomly generated UUID. +func (g *Gen) NewV4() (UUID, error) { + u := UUID{} + if _, err := io.ReadFull(g.rand, u[:]); err != nil { + return Nil, err + } + u.SetVersion(V4) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func (g *Gen) NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(V5) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 02 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV6() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[10:]); err != nil { + return Nil, err + } + + timeNow, clockSeq, err := g.getClockSequence() + if err != nil { + return Nil, err + } + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow>>28)) // set time_high + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>12)) // set time_mid + binary.BigEndian.PutUint16(u[6:], uint16(timeNow&0xfff)) // set time_low (minus four version bits) + binary.BigEndian.PutUint16(u[8:], clockSeq&0x3fff) // set clk_seq_hi_res (minus two variant bits) + + u.SetVersion(V6) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// getClockSequence returns the epoch and clock sequence for V1 and V6 UUIDs. +func (g *Gen) getClockSequence() (uint64, uint16, error) { + var err error + g.clockSequenceOnce.Do(func() { + buf := make([]byte, 2) + if _, err = io.ReadFull(g.rand, buf); err != nil { + return + } + g.clockSequence = binary.BigEndian.Uint16(buf) + }) + if err != nil { + return 0, 0, err + } + + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + timeNow := g.getEpoch() + // Clock didn't change since last UUID generation. + // Should increase clock sequence. + if timeNow <= g.lastTime { + g.clockSequence++ + } + g.lastTime = timeNow + + return timeNow, g.clockSequence, nil +} + +// Precision is used to configure the V7 generator, to specify how precise the +// timestamp within the UUID should be. +type Precision byte + +const ( + NanosecondPrecision Precision = iota + MicrosecondPrecision + MillisecondPrecision +) + +func (p Precision) String() string { + switch p { + case NanosecondPrecision: + return "nanosecond" + + case MicrosecondPrecision: + return "microsecond" + + case MillisecondPrecision: + return "millisecond" + + default: + return "unknown" + } +} + +// Duration returns the time.Duration for a specific precision. If the Precision +// value is not known, this returns 0. +func (p Precision) Duration() time.Duration { + switch p { + case NanosecondPrecision: + return time.Nanosecond + + case MicrosecondPrecision: + return time.Microsecond + + case MillisecondPrecision: + return time.Millisecond + + default: + return 0 + } +} + +// NewV7 returns a k-sortable UUID based on the current UNIX epoch, with the +// ability to configure the timestamp's precision from millisecond all the way +// to nanosecond. The additional precision is supported by reducing the amount +// of pseudorandom data that makes up the rest of the UUID. +// +// If an unknown Precision argument is passed to this method it will panic. As +// such it's strongly encouraged to use the package-provided constants for this +// value. +// +// This is implemented based on revision 02 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV7(p Precision) (UUID, error) { + var u UUID + var err error + + switch p { + case NanosecondPrecision: + u, err = g.newV7Nano() + + case MicrosecondPrecision: + u, err = g.newV7Micro() + + case MillisecondPrecision: + u, err = g.newV7Milli() + + default: + panic(fmt.Sprintf("unknown precision value %d", p)) + } + + if err != nil { + return Nil, err + } + + u.SetVersion(V7) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +func (g *Gen) newV7Milli() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[8:]); err != nil { + return Nil, err + } + + sec, nano, seq, err := g.getV7ClockSequence(MillisecondPrecision) + if err != nil { + return Nil, err + } + + msec := (nano / 1000000) & 0xfff + + d := (sec << 28) // set unixts field + d |= (msec << 16) // set msec field + d |= (uint64(seq) & 0xfff) // set seq field + + binary.BigEndian.PutUint64(u[:], d) + + return u, nil +} + +func (g *Gen) newV7Micro() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[10:]); err != nil { + return Nil, err + } + + sec, nano, seq, err := g.getV7ClockSequence(MicrosecondPrecision) + if err != nil { + return Nil, err + } + + usec := nano / 1000 + usech := (usec << 4) & 0xfff0000 + usecl := usec & 0xfff + + d := (sec << 28) // set unixts field + d |= usech | usecl // set usec fields + + binary.BigEndian.PutUint64(u[:], d) + binary.BigEndian.PutUint16(u[8:], seq) + + return u, nil +} + +func (g *Gen) newV7Nano() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[11:]); err != nil { + return Nil, err + } + + sec, nano, seq, err := g.getV7ClockSequence(NanosecondPrecision) + if err != nil { + return Nil, err + } + + nano &= 0x3fffffffff + nanoh := nano >> 26 + nanom := (nano >> 14) & 0xfff + nanol := uint16(nano & 0x3fff) + + d := (sec << 28) // set unixts field + d |= (nanoh << 16) | nanom // set nsec high and med fields + + binary.BigEndian.PutUint64(u[:], d) + binary.BigEndian.PutUint16(u[8:], nanol) // set nsec low field + + u[10] = byte(seq) // set seq field + + return u, nil +} + +const ( + maxSeq14 = (1 << 14) - 1 + maxSeq12 = (1 << 12) - 1 + maxSeq8 = (1 << 8) - 1 +) + +// getV7ClockSequence returns the unix epoch, nanoseconds of current second, and +// the sequence for V7 UUIDs. +func (g *Gen) getV7ClockSequence(p Precision) (epoch uint64, nano uint64, seq uint16, err error) { + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + tn := g.epochFunc() + unix := uint64(tn.Unix()) + nsec := uint64(tn.Nanosecond()) + + // V7 UUIDs have more precise requirements around how the clock sequence + // value is generated and used. Specifically they require that the sequence + // be zero, unless we've already generated a UUID within this unit of time + // (millisecond, microsecond, or nanosecond) at which point you should + // increment the sequence. Likewise if time has warped backwards for some reason (NTP + // adjustment?), we also increment the clock sequence to reduce the risk of a + // collision. + switch { + case unix < g.v7LastTime: + g.v7ClockSequence++ + + case unix > g.v7LastTime: + g.v7ClockSequence = 0 + + case unix == g.v7LastTime: + switch p { + case NanosecondPrecision: + if nsec <= g.v7LastSubsec { + if g.v7ClockSequence >= maxSeq8 { + return 0, 0, 0, errors.New("generating nanosecond precision UUIDv7s too fast: internal clock sequence would roll over") + } + + g.v7ClockSequence++ + } else { + g.v7ClockSequence = 0 + } + + case MicrosecondPrecision: + if nsec/1000 <= g.v7LastSubsec/1000 { + if g.v7ClockSequence >= maxSeq14 { + return 0, 0, 0, errors.New("generating microsecond precision UUIDv7s too fast: internal clock sequence would roll over") + } + + g.v7ClockSequence++ + } else { + g.v7ClockSequence = 0 + } + + case MillisecondPrecision: + if nsec/1000000 <= g.v7LastSubsec/1000000 { + if g.v7ClockSequence >= maxSeq12 { + return 0, 0, 0, errors.New("generating millisecond precision UUIDv7s too fast: internal clock sequence would roll over") + } + + g.v7ClockSequence++ + } else { + g.v7ClockSequence = 0 + } + + default: + panic(fmt.Sprintf("unknown precision value %d", p)) + } + } + + g.v7LastTime = unix + g.v7LastSubsec = nsec + + return unix, nsec, g.v7ClockSequence, nil +} + +// Returns the hardware address. +func (g *Gen) getHardwareAddr() ([]byte, error) { + var err error + g.hardwareAddrOnce.Do(func() { + var hwAddr net.HardwareAddr + if hwAddr, err = g.hwAddrFunc(); err == nil { + copy(g.hardwareAddr[:], hwAddr) + return + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence. + if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil { + return + } + // Set multicast bit as recommended by RFC-4122 + g.hardwareAddr[0] |= 0x01 + }) + if err != nil { + return []byte{}, err + } + return g.hardwareAddr[:], nil +} + +// Returns the difference between UUID epoch (October 15, 1582) +// and current time in 100-nanosecond intervals. +func (g *Gen) getEpoch() uint64 { + return epochStart + uint64(g.epochFunc().UnixNano()/100) +} + +// Returns the UUID based on the hashing of the namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +// Returns the hardware address. +func defaultHWAddrFunc() (net.HardwareAddr, error) { + ifaces, err := net.Interfaces() + if err != nil { + return []byte{}, err + } + for _, iface := range ifaces { + if len(iface.HardwareAddr) >= 6 { + return iface.HardwareAddr, nil + } + } + return []byte{}, fmt.Errorf("uuid: no HW address found") +} diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go new file mode 100644 index 00000000000..6f254a4fd10 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/sql.go @@ -0,0 +1,109 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice will be handled by UnmarshalBinary, while +// a longer byte slice or a string will be handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case UUID: // support gorm convert from UUID to NullUUID + *u = src + return nil + + case []byte: + if len(src) == Size { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database. +type NullUUID struct { + UUID UUID + Valid bool +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} + +// MarshalJSON marshals the NullUUID as null or the nested UUID +func (u NullUUID) MarshalJSON() ([]byte, error) { + if !u.Valid { + return json.Marshal(nil) + } + + return json.Marshal(u.UUID) +} + +// UnmarshalJSON unmarshals a NullUUID +func (u *NullUUID) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, []byte("null")) { + u.UUID, u.Valid = Nil, false + return nil + } + + if err := json.Unmarshal(b, &u.UUID); err != nil { + return err + } + + u.Valid = true + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go new file mode 100644 index 00000000000..f314b845736 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/uuid.go @@ -0,0 +1,292 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementations of the Universally Unique Identifier +// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 02). +// +// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. The +// Peabody UUID RFC Draft[2] provides the specification for the new k-sortable +// UUIDs, versions 6 and 7. +// +// DCE 1.1[3] provides the specification for version 2, but version 2 support +// was removed from this package in v4 due to some concerns with the +// specification itself. Reading the spec, it seems that it would result in +// generating UUIDs that aren't very unique. In having read the spec it seemed +// that our implementation did not meet the spec. It also seems to be at-odds +// with RFC 4122, meaning we would need quite a bit of special code to support +// it. Lastly, there were no Version 2 implementations that we could find to +// ensure we were understanding the specification correctly. +// +// [1] https://tools.ietf.org/html/rfc4122 +// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-02 +// [3] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 +package uuid + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "strings" + "time" +) + +// Size of a UUID in bytes. +const Size = 16 + +// UUID is an array type to represent the value of a UUID, as defined in RFC-4122. +type UUID [Size]byte + +// UUID versions. +const ( + _ byte = iota + V1 // Version 1 (date-time and MAC address) + _ // Version 2 (date-time and MAC address, DCE security version) [removed] + V3 // Version 3 (namespace name-based) + V4 // Version 4 (random) + V5 // Version 5 (namespace name-based) + V6 // Version 6 (k-sortable timestamp and random data) [peabody draft] + V7 // Version 7 (k-sortable timestamp, with configurable precision, and random data) [peabody draft] + _ // Version 8 (k-sortable timestamp, meant for custom implementations) [peabody draft] [not implemented] +) + +// UUID layout variants. +const ( + VariantNCS byte = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00, +// 15 October 1582 within a V1 UUID. This type has no meaning for other +// UUID versions since they don't have an embedded timestamp. +type Timestamp uint64 + +const _100nsPerSecond = 10000000 + +// Time returns the UTC time.Time representation of a Timestamp +func (t Timestamp) Time() (time.Time, error) { + secs := uint64(t) / _100nsPerSecond + nsecs := 100 * (uint64(t) % _100nsPerSecond) + + return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil +} + +// TimestampFromV1 returns the Timestamp embedded within a V1 UUID. +// Returns an error if the UUID is any version other than 1. +func TimestampFromV1(u UUID) (Timestamp, error) { + if u.Version() != 1 { + err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version()) + return 0, err + } + + low := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff + + return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil +} + +// TimestampFromV6 returns the Timestamp embedded within a V6 UUID. This +// function returns an error if the UUID is any version other than 6. +// +// This is implemented based on revision 01 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func TimestampFromV6(u UUID) (Timestamp, error) { + if u.Version() != 6 { + return 0, fmt.Errorf("uuid: %s is version %d, not version 6", u, u.Version()) + } + + hi := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + low := binary.BigEndian.Uint16(u[6:8]) & 0xfff + + return Timestamp(uint64(low) + (uint64(mid) << 12) + (uint64(hi) << 28)), nil +} + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to +// zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) +) + +// IsNil returns if the UUID is equal to the nil UUID +func (u UUID) IsNil() bool { + return u == Nil +} + +// Version returns the algorithm version used to generate the UUID. +func (u UUID) Version() byte { + return u[6] >> 4 +} + +// Variant returns the UUID layout variant. +func (u UUID) Variant() byte { + switch { + case (u[8] >> 7) == 0x00: + return VariantNCS + case (u[8] >> 6) == 0x02: + return VariantRFC4122 + case (u[8] >> 5) == 0x06: + return VariantMicrosoft + case (u[8] >> 5) == 0x07: + fallthrough + default: + return VariantFuture + } +} + +// Bytes returns a byte slice representation of the UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// String returns a canonical RFC-4122 string representation of the UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// Format implements fmt.Formatter for UUID values. +// +// The behavior is as follows: +// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'. +// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation. +// The 'S' verb returns the RFC-4122 format, but with capital hex digits. +// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer. +// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return +// "%!verb(uuid.UUID=value)" as recommended by the fmt package. +func (u UUID) Format(f fmt.State, c rune) { + switch c { + case 'x', 'X': + s := hex.EncodeToString(u.Bytes()) + if c == 'X' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'v': + var s string + if f.Flag('#') { + s = fmt.Sprintf("%#v", [Size]byte(u)) + } else { + s = u.String() + } + _, _ = io.WriteString(f, s) + case 's', 'S': + s := u.String() + if c == 'S' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'q': + _, _ = io.WriteString(f, `"`+u.String()+`"`) + default: + // invalid/unsupported format verb + fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String()) + } +} + +func toCapitalHexDigits(ch rune) rune { + // convert a-f hex digits to A-F + switch ch { + case 'a': + return 'A' + case 'b': + return 'B' + case 'c': + return 'C' + case 'd': + return 'D' + case 'e': + return 'E' + case 'f': + return 'F' + default: + return ch + } +} + +// SetVersion sets the version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets the variant bits. +func (u *UUID) SetVariant(v byte) { + switch v { + case VariantNCS: + u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + case VariantRFC4122: + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + case VariantMicrosoft: + u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + case VariantFuture: + fallthrough + default: + u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) + } +} + +// Must is a helper that wraps a call to a function returning (UUID, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) +func Must(u UUID, err error) UUID { + if err != nil { + panic(err) + } + return u +} diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml index f8684d99fc4..061d72ae079 100644 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -1,13 +1,10 @@ language: go go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - 1.11.x + - 1.12.x + - 1.13.x + - master script: - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md index 51cf5cd1ada..97c1b34fd5e 100644 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -1,7 +1,7 @@ # How to contribute # We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. +just a few small guidelines you need to follow. ## Contributor License Agreement ## diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 386c2a457a8..b503aae7d71 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. See more examples in ```example_test.go```. +You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. +go-fuzz provides the user a byte-slice, which should be converted to different inputs +for the tested function. This library can help convert the byte slice. Consider for +example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: +```go +// +build gofuzz +package mypackage + +import fuzz "github.com/google/gofuzz" + +func Fuzz(data []byte) int { + var i int + fuzz.NewFromGoFuzz(data).Fuzz(&i) + MyFunc(i) + return 0 +} +``` + Happy testing! diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go new file mode 100644 index 00000000000..5bb36594969 --- /dev/null +++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. +package bytesource + +import ( + "bytes" + "encoding/binary" + "io" + "math/rand" +) + +// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are +// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a +// fallback pseudo random source is created in case more random numbers are required. +// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. +type ByteSource struct { + *bytes.Reader + fallback rand.Source +} + +// New returns a new ByteSource from a given slice of bytes. +func New(input []byte) *ByteSource { + s := &ByteSource{ + Reader: bytes.NewReader(input), + fallback: rand.NewSource(0), + } + if len(input) > 0 { + s.fallback = rand.NewSource(int64(s.consumeUint64())) + } + return s +} + +func (s *ByteSource) Uint64() uint64 { + // Return from input if it was not exhausted. + if s.Len() > 0 { + return s.consumeUint64() + } + + // Input was exhausted, return random number from fallback (in this case fallback should not be + // nil). Try first having a Uint64 output (Should work in current rand implementation), + // otherwise return a conversion of Int63. + if s64, ok := s.fallback.(rand.Source64); ok { + return s64.Uint64() + } + return uint64(s.fallback.Int63()) +} + +func (s *ByteSource) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +func (s *ByteSource) Seed(seed int64) { + s.fallback = rand.NewSource(seed) + s.Reader = bytes.NewReader(nil) +} + +// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the +// bytes reader is not empty. +func (s *ByteSource) consumeUint64() uint64 { + var bytes [8]byte + _, err := s.Read(bytes[:]) + if err != nil && err != io.EOF { + panic("failed reading source") // Should not happen. + } + return binary.BigEndian.Uint64(bytes[:]) +} diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index da0a5f93800..761520a8cee 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -22,6 +22,9 @@ import ( "reflect" "regexp" "time" + + "github.com/google/gofuzz/bytesource" + "strings" ) // fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. @@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer { return f } +// NewFromGoFuzz is a helper function that enables using gofuzz (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Fuzzer should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: +// +// // +build gofuzz +// package mypacakge +// import fuzz "github.com/google/gofuzz" +// func Fuzz(data []byte) int { +// var i int +// fuzz.NewFromGoFuzz(data).Fuzz(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Fuzzer { + return New().RandSource(bytesource.New(data)) +} + // Funcs adds each entry in fuzzFuncs as a custom fuzzing function. // // Each entry in fuzzFuncs must be a function taking two parameters. @@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int { } func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance + return f.r.Float64() >= f.nilChance } // MaxDepth sets the maximum number of recursive fuzz calls that will be made @@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { fn(v, fc.fuzzer.r) return } + switch v.Kind() { case reflect.Map: if fc.fuzzer.genShouldFill() { @@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ v.SetFloat(r.Float64()) }, reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) }, reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex(r.Float64(), r.Float64())) }, reflect.String: func(v reflect.Value, r *rand.Rand) { v.SetString(randString(r)) @@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ // randBool returns true or false randomly. func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 } -type charRange struct { - first, last rune +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune } +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + // choose returns a random unicode character from the given range, using the // given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } } -var unicodeRanges = []charRange{ +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("The last encoding must be greater than the first one.") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when user do not set +// CustomStringFuzzFunc() but wants fuzz string. +var defaultUnicodeRanges = UnicodeRanges{ {' ', '~'}, // ASCII characters {'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) } +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty.") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + // randString makes a random string up to 20 characters long. The returned string // may include a variety of (valid) UTF-8 encodings. func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) + return defaultUnicodeRanges.randString(r) } // randUint64 makes random 64 bit numbers. diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml new file mode 100644 index 00000000000..769849071ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml @@ -0,0 +1,12 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md new file mode 100644 index 00000000000..fbde8b9aef6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -0,0 +1,8 @@ +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) + +Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod new file mode 100644 index 00000000000..dd57f9d21ad --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-uuid diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go new file mode 100644 index 00000000000..0c10c4e9f5f --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -0,0 +1,83 @@ +package uuid + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io" +) + +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + return GenerateRandomBytesWithReader(size, rand.Reader) +} + +// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader. +func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) { + if reader == nil { + return nil, fmt.Errorf("provided reader is nil") + } + buf := make([]byte, size) + if _, err := io.ReadFull(reader, buf); err != nil { + return nil, fmt.Errorf("failed to read random bytes: %v", err) + } + return buf, nil +} + + +const uuidLen = 16 + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + return GenerateUUIDWithReader(rand.Reader) +} + +// GenerateUUIDWithReader is used to generate a random UUID with a given Reader +func GenerateUUIDWithReader(reader io.Reader) (string, error) { + if reader == nil { + return "", fmt.Errorf("provided reader is nil") + } + buf, err := GenerateRandomBytesWithReader(uuidLen, reader) + if err != nil { + return "", err + } + return FormatUUID(buf) +} + +func FormatUUID(buf []byte) (string, error) { + if buflen := len(buf); buflen != uuidLen { + return "", fmt.Errorf("wrong length byte slice (%d)", buflen) + } + + return fmt.Sprintf("%x-%x-%x-%x-%x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]), nil +} + +func ParseUUID(uuid string) ([]byte, error) { + if len(uuid) != 2 * uuidLen + 4 { + return nil, fmt.Errorf("uuid string is wrong length") + } + + if uuid[8] != '-' || + uuid[13] != '-' || + uuid[18] != '-' || + uuid[23] != '-' { + return nil, fmt.Errorf("uuid is improperly formatted") + } + + hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] + + ret, err := hex.DecodeString(hexStr) + if err != nil { + return nil, err + } + if len(ret) != uuidLen { + return nil, fmt.Errorf("decoded hex is the wrong length") + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md new file mode 100644 index 00000000000..2020c472743 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -0,0 +1,32 @@ +# 1.4.0 (January 5, 2021) + +FEATURES: + + - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) + - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) + +# 1.3.0 (March 31, 2021) + +Please note that CHANGELOG.md does not exist in the source code prior to this release. + +FEATURES: + - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) + +# 1.2.1 (June 17, 2020) + +BUG FIXES: + - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) + +# 1.2.0 (April 23, 2019) + +FEATURES: + - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) + +# 1.1.0 (Jan 07, 2019) + +FEATURES: + - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) + +# 1.0.0 (August 24, 2018) + +Initial release. diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 00000000000..851a337beb4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,66 @@ +# Versioning Library for Go +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/master) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 00000000000..1d880902810 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,290 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "sort" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + op operator + check *Version + original string +} + +func (c *Constraint) Equals(con *Constraint) bool { + return c.op == con.op && c.check.Equal(con.check) +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintOperation + +type constraintOperation struct { + op operator + f constraintFunc +} + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintOperation{ + "": {op: equal, f: constraintEqual}, + "=": {op: equal, f: constraintEqual}, + "!=": {op: notEqual, f: constraintNotEqual}, + ">": {op: greaterThan, f: constraintGreaterThan}, + "<": {op: lessThan, f: constraintLessThan}, + ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, + "<=": {op: lessThanEqual, f: constraintLessThanEqual}, + "~>": {op: pessimistic, f: constraintPessimistic}, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// MustConstraints is a helper that wraps a call to a function +// returning (Constraints, error) and panics if error is non-nil. +func MustConstraints(c Constraints, err error) Constraints { + if err != nil { + panic(err) + } + + return c +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Equals compares Constraints with other Constraints +// for equality. This may not represent logical equivalence +// of compared constraints. +// e.g. even though '>0.1,>0.2' is logically equivalent +// to '>0.2' it is *NOT* treated as equal. +// +// Missing operator is treated as equal to '=', whitespaces +// are ignored and constraints are sorted before comaparison. +func (cs Constraints) Equals(c Constraints) bool { + if len(cs) != len(c) { + return false + } + + // make copies to retain order of the original slices + left := make(Constraints, len(cs)) + copy(left, cs) + sort.Stable(left) + right := make(Constraints, len(c)) + copy(right, c) + sort.Stable(right) + + // compare sorted slices + for i, con := range left { + if !con.Equals(right[i]) { + return false + } + } + + return true +} + +func (cs Constraints) Len() int { + return len(cs) +} + +func (cs Constraints) Less(i, j int) bool { + if cs[i].op < cs[j].op { + return true + } + if cs[i].op > cs[j].op { + return false + } + + return cs[i].check.LessThan(cs[j].check) +} + +func (cs Constraints) Swap(i, j int) { + cs[i], cs[j] = cs[j], cs[i] +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + cop := constraintOperators[matches[1]] + + return &Constraint{ + f: cop.f, + op: cop.op, + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +type operator rune + +const ( + equal operator = '=' + notEqual operator = '≠' + greaterThan operator = '>' + lessThan operator = '<' + greaterThanEqual operator = '≥' + lessThanEqual operator = '≤' + pessimistic operator = '~' +) + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod new file mode 100644 index 00000000000..f5285555fa8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 00000000000..116a74466db --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,390 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = val + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: len(segmentsStr), + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Core returns a new version constructed from only the MAJOR.MINOR.PATCH +// segments of the version, without prerelease or metadata. +func (v *Version) Core() *Version { + segments := v.Segments64() + segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) + return Must(NewVersion(segmentsOnly)) +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + if v == nil || o == nil { + return v == o + } + + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqual tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqual tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 00000000000..cc888d43e6b --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/jcmturner/aescts/v2/LICENSE b/vendor/github.com/jcmturner/aescts/v2/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/jcmturner/aescts/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jcmturner/aescts/v2/aescts.go b/vendor/github.com/jcmturner/aescts/v2/aescts.go new file mode 100644 index 00000000000..fee3b43e91e --- /dev/null +++ b/vendor/github.com/jcmturner/aescts/v2/aescts.go @@ -0,0 +1,186 @@ +// Package aescts provides AES CBC CipherText Stealing encryption and decryption methods +package aescts + +import ( + "crypto/aes" + "crypto/cipher" + "errors" + "fmt" +) + +// Encrypt the message with the key and the initial vector. +// Returns: next iv, ciphertext bytes, error +func Encrypt(key, iv, plaintext []byte) ([]byte, []byte, error) { + l := len(plaintext) + + block, err := aes.NewCipher(key) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error creating cipher: %v", err) + } + mode := cipher.NewCBCEncrypter(block, iv) + + m := make([]byte, len(plaintext)) + copy(m, plaintext) + + /*For consistency, ciphertext stealing is always used for the last two + blocks of the data to be encrypted, as in [RC5]. If the data length + is a multiple of the block size, this is equivalent to plain CBC mode + with the last two ciphertext blocks swapped.*/ + /*The initial vector carried out from one encryption for use in a + subsequent encryption is the next-to-last block of the encryption + output; this is the encrypted form of the last plaintext block.*/ + if l <= aes.BlockSize { + m, _ = zeroPad(m, aes.BlockSize) + mode.CryptBlocks(m, m) + return m, m, nil + } + if l%aes.BlockSize == 0 { + mode.CryptBlocks(m, m) + iv = m[len(m)-aes.BlockSize:] + rb, _ := swapLastTwoBlocks(m, aes.BlockSize) + return iv, rb, nil + } + m, _ = zeroPad(m, aes.BlockSize) + rb, pb, lb, err := tailBlocks(m, aes.BlockSize) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error tailing blocks: %v", err) + } + var ct []byte + if rb != nil { + // Encrpt all but the lats 2 blocks and update the rolling iv + mode.CryptBlocks(rb, rb) + iv = rb[len(rb)-aes.BlockSize:] + mode = cipher.NewCBCEncrypter(block, iv) + ct = append(ct, rb...) + } + mode.CryptBlocks(pb, pb) + mode = cipher.NewCBCEncrypter(block, pb) + mode.CryptBlocks(lb, lb) + // Cipher Text Stealing (CTS) - Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing + // Swap the last two cipher blocks + // Truncate the ciphertext to the length of the original plaintext + ct = append(ct, lb...) + ct = append(ct, pb...) + return lb, ct[:l], nil +} + +// Decrypt the ciphertext with the key and the initial vector. +func Decrypt(key, iv, ciphertext []byte) ([]byte, error) { + // Copy the cipher text as golang slices even when passed by value to this method can result in the backing arrays of the calling code value being updated. + ct := make([]byte, len(ciphertext)) + copy(ct, ciphertext) + if len(ct) < aes.BlockSize { + return []byte{}, fmt.Errorf("ciphertext is not large enough. It is less that one block size. Blocksize:%v; Ciphertext:%v", aes.BlockSize, len(ct)) + } + // Configure the CBC + block, err := aes.NewCipher(key) + if err != nil { + return nil, fmt.Errorf("error creating cipher: %v", err) + } + var mode cipher.BlockMode + + //If ciphertext is multiple of blocksize we just need to swap back the last two blocks and then do CBC + //If the ciphertext is just one block we can't swap so we just decrypt + if len(ct)%aes.BlockSize == 0 { + if len(ct) > aes.BlockSize { + ct, _ = swapLastTwoBlocks(ct, aes.BlockSize) + } + mode = cipher.NewCBCDecrypter(block, iv) + message := make([]byte, len(ct)) + mode.CryptBlocks(message, ct) + return message[:len(ct)], nil + } + + // Cipher Text Stealing (CTS) using CBC interface. Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing + // Get ciphertext of the 2nd to last (penultimate) block (cpb), the last block (clb) and the rest (crb) + crb, cpb, clb, _ := tailBlocks(ct, aes.BlockSize) + v := make([]byte, len(iv), len(iv)) + copy(v, iv) + var message []byte + if crb != nil { + //If there is more than just the last and the penultimate block we decrypt it and the last bloc of this becomes the iv for later + rb := make([]byte, len(crb)) + mode = cipher.NewCBCDecrypter(block, v) + v = crb[len(crb)-aes.BlockSize:] + mode.CryptBlocks(rb, crb) + message = append(message, rb...) + } + + // We need to modify the cipher text + // Decryt the 2nd to last (penultimate) block with a the original iv + pb := make([]byte, aes.BlockSize) + mode = cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(pb, cpb) + // number of byte needed to pad + npb := aes.BlockSize - len(ct)%aes.BlockSize + //pad last block using the number of bytes needed from the tail of the plaintext 2nd to last (penultimate) block + clb = append(clb, pb[len(pb)-npb:]...) + + // Now decrypt the last block in the penultimate position (iv will be from the crb, if the is no crb it's zeros) + // iv for the penultimate block decrypted in the last position becomes the modified last block + lb := make([]byte, aes.BlockSize) + mode = cipher.NewCBCDecrypter(block, v) + v = clb + mode.CryptBlocks(lb, clb) + message = append(message, lb...) + + // Now decrypt the penultimate block in the last position (iv will be from the modified last block) + mode = cipher.NewCBCDecrypter(block, v) + mode.CryptBlocks(cpb, cpb) + message = append(message, cpb...) + + // Truncate to the size of the original cipher text + return message[:len(ct)], nil +} + +func tailBlocks(b []byte, c int) ([]byte, []byte, []byte, error) { + if len(b) <= c { + return []byte{}, []byte{}, []byte{}, errors.New("bytes slice is not larger than one block so cannot tail") + } + // Get size of last block + var lbs int + if l := len(b) % aes.BlockSize; l == 0 { + lbs = aes.BlockSize + } else { + lbs = l + } + // Get last block + lb := b[len(b)-lbs:] + // Get 2nd to last (penultimate) block + pb := b[len(b)-lbs-c : len(b)-lbs] + if len(b) > 2*c { + rb := b[:len(b)-lbs-c] + return rb, pb, lb, nil + } + return nil, pb, lb, nil +} + +func swapLastTwoBlocks(b []byte, c int) ([]byte, error) { + rb, pb, lb, err := tailBlocks(b, c) + if err != nil { + return nil, err + } + var out []byte + if rb != nil { + out = append(out, rb...) + } + out = append(out, lb...) + out = append(out, pb...) + return out, nil +} + +// zeroPad pads bytes with zeros to nearest multiple of message size m. +func zeroPad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("invalid message block size when padding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("data not valid to pad: Zero size") + } + if l := len(b) % m; l != 0 { + n := m - l + z := make([]byte, n) + b = append(b, z...) + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/aescts/v2/go.mod b/vendor/github.com/jcmturner/aescts/v2/go.mod new file mode 100644 index 00000000000..034c3cec60c --- /dev/null +++ b/vendor/github.com/jcmturner/aescts/v2/go.mod @@ -0,0 +1,5 @@ +module github.com/jcmturner/aescts/v2 + +go 1.13 + +require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/jcmturner/aescts/v2/go.sum b/vendor/github.com/jcmturner/aescts/v2/go.sum new file mode 100644 index 00000000000..e863f517e19 --- /dev/null +++ b/vendor/github.com/jcmturner/aescts/v2/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jcmturner/dnsutils/v2/LICENSE b/vendor/github.com/jcmturner/dnsutils/v2/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/jcmturner/dnsutils/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jcmturner/dnsutils/v2/go.mod b/vendor/github.com/jcmturner/dnsutils/v2/go.mod new file mode 100644 index 00000000000..f75ac6d264d --- /dev/null +++ b/vendor/github.com/jcmturner/dnsutils/v2/go.mod @@ -0,0 +1,5 @@ +module github.com/jcmturner/dnsutils/v2 + +go 1.13 + +require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/jcmturner/dnsutils/v2/go.sum b/vendor/github.com/jcmturner/dnsutils/v2/go.sum new file mode 100644 index 00000000000..e863f517e19 --- /dev/null +++ b/vendor/github.com/jcmturner/dnsutils/v2/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jcmturner/dnsutils/v2/srv.go b/vendor/github.com/jcmturner/dnsutils/v2/srv.go new file mode 100644 index 00000000000..15ea912d100 --- /dev/null +++ b/vendor/github.com/jcmturner/dnsutils/v2/srv.go @@ -0,0 +1,95 @@ +package dnsutils + +import ( + "math/rand" + "net" + "sort" +) + +// OrderedSRV returns a count of the results and a map keyed on the order they should be used. +// This based on the records' priority and randomised selection based on their relative weighting. +// The function's inputs are the same as those for net.LookupSRV +// To use in the correct order: +// +// count, orderedSRV, err := OrderedSRV(service, proto, name) +// i := 1 +// for i <= count { +// srv := orderedSRV[i] +// // Do something such as dial this SRV. If fails move on the the next or break if it succeeds. +// i += 1 +// } +func OrderedSRV(service, proto, name string) (int, map[int]*net.SRV, error) { + _, addrs, err := net.LookupSRV(service, proto, name) + if err != nil { + return 0, make(map[int]*net.SRV), err + } + index, osrv := orderSRV(addrs) + return index, osrv, nil +} + +func orderSRV(addrs []*net.SRV) (int, map[int]*net.SRV) { + // Initialise the ordered map + var o int + osrv := make(map[int]*net.SRV) + + prioMap := make(map[int][]*net.SRV, 0) + for _, srv := range addrs { + prioMap[int(srv.Priority)] = append(prioMap[int(srv.Priority)], srv) + } + + priorities := make([]int, 0) + for p := range prioMap { + priorities = append(priorities, p) + } + + var count int + sort.Ints(priorities) + for _, p := range priorities { + tos := weightedOrder(prioMap[p]) + for i, s := range tos { + count += 1 + osrv[o+i] = s + } + o += len(tos) + } + return count, osrv +} + +func weightedOrder(srvs []*net.SRV) map[int]*net.SRV { + // Get the total weight + var tw int + for _, s := range srvs { + tw += int(s.Weight) + } + + // Initialise the ordered map + o := 1 + osrv := make(map[int]*net.SRV) + + // Whilst there are still entries to be ordered + l := len(srvs) + for l > 0 { + i := rand.Intn(l) + s := srvs[i] + var rw int + if tw > 0 { + // Greater the weight the more likely this will be zero or less + rw = rand.Intn(tw) - int(s.Weight) + } + if rw <= 0 { + // Put entry in position + osrv[o] = s + if len(srvs) > 1 { + // Remove the entry from the source slice by swapping with the last entry and truncating + srvs[len(srvs)-1], srvs[i] = srvs[i], srvs[len(srvs)-1] + srvs = srvs[:len(srvs)-1] + l = len(srvs) + } else { + l = 0 + } + o += 1 + tw = tw - int(s.Weight) + } + } + return osrv +} diff --git a/vendor/github.com/jcmturner/gofork/LICENSE b/vendor/github.com/jcmturner/gofork/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md new file mode 100644 index 00000000000..66a2a8cca71 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md @@ -0,0 +1,5 @@ +This is a temporary repository that will be removed when the issues below are fixed in the core golang code. + +## Issues +* [encoding/asn1: cannot marshal into a GeneralString](https://github.com/golang/go/issues/18832) +* [encoding/asn1: cannot marshal into slice of strings and pass stringtype parameter tags to members](https://github.com/golang/go/issues/18834) \ No newline at end of file diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go new file mode 100644 index 00000000000..f1bb7671795 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go @@ -0,0 +1,1003 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 implements parsing of DER-encoded ASN.1 data structures, +// as defined in ITU-T Rec X.690. +// +// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,'' +// http://luca.ntop.org/Teaching/Appunti/asn1.html. +package asn1 + +// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc +// are different encoding formats for those objects. Here, we'll be dealing +// with DER, the Distinguished Encoding Rules. DER is used in X.509 because +// it's fast to parse and, unlike BER, has a unique encoding for every object. +// When calculating hashes over objects, it's important that the resulting +// bytes be the same at both ends and DER removes this margin of error. +// +// ASN.1 is very complex and this package doesn't attempt to implement +// everything by any means. + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "strconv" + "time" + "unicode/utf8" +) + +// A StructuralError suggests that the ASN.1 data is valid, but the Go type +// which is receiving it doesn't match. +type StructuralError struct { + Msg string +} + +func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg } + +// A SyntaxError suggests that the ASN.1 data is invalid. +type SyntaxError struct { + Msg string +} + +func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg } + +// We start by dealing with each of the primitive types in turn. + +// BOOLEAN + +func parseBool(bytes []byte) (ret bool, err error) { + if len(bytes) != 1 { + err = SyntaxError{"invalid boolean"} + return + } + + // DER demands that "If the encoding represents the boolean value TRUE, + // its single contents octet shall have all eight bits set to one." + // Thus only 0 and 255 are valid encoded values. + switch bytes[0] { + case 0: + ret = false + case 0xff: + ret = true + default: + err = SyntaxError{"invalid boolean"} + } + + return +} + +// INTEGER + +// checkInteger returns nil if the given bytes are a valid DER-encoded +// INTEGER and an error otherwise. +func checkInteger(bytes []byte) error { + if len(bytes) == 0 { + return StructuralError{"empty integer"} + } + if len(bytes) == 1 { + return nil + } + if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { + return StructuralError{"integer not minimally-encoded"} + } + return nil +} + +// parseInt64 treats the given bytes as a big-endian, signed integer and +// returns the result. +func parseInt64(bytes []byte) (ret int64, err error) { + err = checkInteger(bytes) + if err != nil { + return + } + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = StructuralError{"integer too large"} + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +// parseInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseInt32(bytes []byte) (int32, error) { + if err := checkInteger(bytes); err != nil { + return 0, err + } + ret64, err := parseInt64(bytes) + if err != nil { + return 0, err + } + if ret64 != int64(int32(ret64)) { + return 0, StructuralError{"integer too large"} + } + return int32(ret64), nil +} + +var bigOne = big.NewInt(1) + +// parseBigInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseBigInt(bytes []byte) (*big.Int, error) { + if err := checkInteger(bytes); err != nil { + return nil, err + } + ret := new(big.Int) + if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { + // This is a negative number. + notBytes := make([]byte, len(bytes)) + for i := range notBytes { + notBytes[i] = ^bytes[i] + } + ret.SetBytes(notBytes) + ret.Add(ret, bigOne) + ret.Neg(ret) + return ret, nil + } + ret.SetBytes(bytes) + return ret, nil +} + +// BIT STRING + +// BitString is the structure to use when you want an ASN.1 BIT STRING type. A +// bit string is padded up to the nearest byte in memory and the number of +// valid bits is recorded. Padding bits will be zero. +type BitString struct { + Bytes []byte // bits packed into bytes. + BitLength int // length in bits. +} + +// At returns the bit at the given index. If the index is out of range it +// returns false. +func (b BitString) At(i int) int { + if i < 0 || i >= b.BitLength { + return 0 + } + x := i / 8 + y := 7 - uint(i%8) + return int(b.Bytes[x]>>y) & 1 +} + +// RightAlign returns a slice where the padding bits are at the beginning. The +// slice may share memory with the BitString. +func (b BitString) RightAlign() []byte { + shift := uint(8 - (b.BitLength % 8)) + if shift == 8 || len(b.Bytes) == 0 { + return b.Bytes + } + + a := make([]byte, len(b.Bytes)) + a[0] = b.Bytes[0] >> shift + for i := 1; i < len(b.Bytes); i++ { + a[i] = b.Bytes[i-1] << (8 - shift) + a[i] |= b.Bytes[i] >> shift + } + + return a +} + +// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. +func parseBitString(bytes []byte) (ret BitString, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length BIT STRING"} + return + } + paddingBits := int(bytes[0]) + if paddingBits > 7 || + len(bytes) == 1 && paddingBits > 0 || + bytes[len(bytes)-1]&((1< 0 { + s += "." + } + s += strconv.Itoa(v) + } + + return s +} + +// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and +// returns it. An object identifier is a sequence of variable length integers +// that are assigned in a hierarchy. +func parseObjectIdentifier(bytes []byte) (s []int, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length OBJECT IDENTIFIER"} + return + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + s = make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + v, offset, err := parseBase128Int(bytes, 0) + if err != nil { + return + } + if v < 80 { + s[0] = v / 40 + s[1] = v % 40 + } else { + s[0] = 2 + s[1] = v - 80 + } + + i := 2 + for ; offset < len(bytes); i++ { + v, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + s[i] = v + } + s = s[0:i] + return +} + +// ENUMERATED + +// An Enumerated is represented as a plain int. +type Enumerated int + +// FLAG + +// A Flag accepts any data and is set to true if present. +type Flag bool + +// parseBase128Int parses a base-128 encoded int from the given offset in the +// given byte slice. It returns the value and the new offset. +func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) { + offset = initOffset + for shifted := 0; offset < len(bytes); shifted++ { + if shifted == 4 { + err = StructuralError{"base 128 integer too large"} + return + } + ret <<= 7 + b := bytes[offset] + ret |= int(b & 0x7f) + offset++ + if b&0x80 == 0 { + return + } + } + err = SyntaxError{"truncated base 128 integer"} + return +} + +// UTCTime + +func parseUTCTime(bytes []byte) (ret time.Time, err error) { + s := string(bytes) + + formatStr := "0601021504Z0700" + ret, err = time.Parse(formatStr, s) + if err != nil { + formatStr = "060102150405Z0700" + ret, err = time.Parse(formatStr, s) + } + if err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + return + } + + if ret.Year() >= 2050 { + // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + ret = ret.AddDate(-100, 0, 0) + } + + return +} + +// parseGeneralizedTime parses the GeneralizedTime from the given byte slice +// and returns the resulting time. +func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { + const formatStr = "20060102150405Z0700" + s := string(bytes) + + if ret, err = time.Parse(formatStr, s); err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + } + + return +} + +// PrintableString + +// parsePrintableString parses a ASN.1 PrintableString from the given byte +// array and returns it. +func parsePrintableString(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if !isPrintable(b) { + err = SyntaxError{"PrintableString contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +func isPrintable(b byte) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + b == '*' +} + +// IA5String + +// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given +// byte slice and returns it. +func parseIA5String(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if b >= utf8.RuneSelf { + err = SyntaxError{"IA5String contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// T61String + +// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given +// byte slice and returns it. +func parseT61String(bytes []byte) (ret string, err error) { + return string(bytes), nil +} + +// UTF8String + +// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte +// array and returns it. +func parseUTF8String(bytes []byte) (ret string, err error) { + if !utf8.Valid(bytes) { + return "", errors.New("asn1: invalid UTF-8 string") + } + return string(bytes), nil +} + +// A RawValue represents an undecoded ASN.1 object. +type RawValue struct { + Class, Tag int + IsCompound bool + Bytes []byte + FullBytes []byte // includes the tag and length +} + +// RawContent is used to signal that the undecoded, DER data needs to be +// preserved for a struct. To use it, the first field of the struct must have +// this type. It's an error for any of the other fields to have this type. +type RawContent []byte + +// Tagging + +// parseTagAndLength parses an ASN.1 tag and length pair from the given offset +// into a byte slice. It returns the parsed data and the new offset. SET and +// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we +// don't distinguish between ordered and unordered objects in this code. +func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) { + offset = initOffset + // parseTagAndLength should not be called without at least a single + // byte to read. Thus this check is for robustness: + if offset >= len(bytes) { + err = errors.New("asn1: internal error in parseTagAndLength") + return + } + b := bytes[offset] + offset++ + ret.class = int(b >> 6) + ret.isCompound = b&0x20 == 0x20 + ret.tag = int(b & 0x1f) + + // If the bottom five bits are set, then the tag number is actually base 128 + // encoded afterwards + if ret.tag == 0x1f { + ret.tag, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + // Tags should be encoded in minimal form. + if ret.tag < 0x1f { + err = SyntaxError{"non-minimal tag"} + return + } + } + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if b&0x80 == 0 { + // The length is encoded in the bottom 7 bits. + ret.length = int(b & 0x7f) + } else { + // Bottom 7 bits give the number of length bytes to follow. + numBytes := int(b & 0x7f) + if numBytes == 0 { + err = SyntaxError{"indefinite length found (not DER)"} + return + } + ret.length = 0 + for i := 0; i < numBytes; i++ { + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if ret.length >= 1<<23 { + // We can't shift ret.length up without + // overflowing. + err = StructuralError{"length too large"} + return + } + ret.length <<= 8 + ret.length |= int(b) + if ret.length == 0 { + // DER requires that lengths be minimal. + err = StructuralError{"superfluous leading zeros in length"} + return + } + } + // Short lengths must be encoded in short form. + if ret.length < 0x80 { + err = StructuralError{"non-minimal length"} + return + } + } + + return +} + +// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse +// a number of ASN.1 values from the given byte slice and returns them as a +// slice of Go values of the given type. +func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) { + expectedTag, compoundType, ok := getUniversalType(elemType) + if !ok { + err = StructuralError{"unknown Go type for slice"} + return + } + + // First we iterate over the input and count the number of elements, + // checking that the types are correct in each case. + numElements := 0 + for offset := 0; offset < len(bytes); { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: + // We pretend that various other string types are + // PRINTABLE STRINGs so that a sequence of them can be + // parsed into a []string. + t.tag = TagPrintableString + case TagGeneralizedTime, TagUTCTime: + // Likewise, both time types are treated the same. + t.tag = TagUTCTime + } + + if t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag { + err = StructuralError{"sequence tag mismatch"} + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"truncated sequence"} + return + } + offset += t.length + numElements++ + } + ret = reflect.MakeSlice(sliceType, numElements, numElements) + params := fieldParameters{} + offset := 0 + for i := 0; i < numElements; i++ { + offset, err = parseField(ret.Index(i), bytes, offset, params) + if err != nil { + return + } + } + return +} + +var ( + bitStringType = reflect.TypeOf(BitString{}) + objectIdentifierType = reflect.TypeOf(ObjectIdentifier{}) + enumeratedType = reflect.TypeOf(Enumerated(0)) + flagType = reflect.TypeOf(Flag(false)) + timeType = reflect.TypeOf(time.Time{}) + rawValueType = reflect.TypeOf(RawValue{}) + rawContentsType = reflect.TypeOf(RawContent(nil)) + bigIntType = reflect.TypeOf(new(big.Int)) +) + +// invalidLength returns true iff offset + length > sliceLength, or if the +// addition would overflow. +func invalidLength(offset, length, sliceLength int) bool { + return offset+length < offset || offset+length > sliceLength +} + +// parseField is the main parsing function. Given a byte slice and an offset +// into the array, it will try to parse a suitable ASN.1 value out and store it +// in the given Value. +func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { + offset = initOffset + fieldType := v.Type() + + // If we have run out of data, it may be that there are optional elements at the end. + if offset == len(bytes) { + if !setDefaultValue(v, params) { + err = SyntaxError{"sequence truncated"} + } + return + } + + // Deal with raw values. + if fieldType == rawValueType { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]} + offset += t.length + v.Set(reflect.ValueOf(result)) + return + } + + // Deal with the ANY type. + if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + var result interface{} + if !t.isCompound && t.class == ClassUniversal { + innerBytes := bytes[offset : offset+t.length] + switch t.tag { + case TagPrintableString: + result, err = parsePrintableString(innerBytes) + case TagIA5String: + result, err = parseIA5String(innerBytes) + // jtasn1 addition of following case + case TagGeneralString: + result, err = parseIA5String(innerBytes) + case TagT61String: + result, err = parseT61String(innerBytes) + case TagUTF8String: + result, err = parseUTF8String(innerBytes) + case TagInteger: + result, err = parseInt64(innerBytes) + case TagBitString: + result, err = parseBitString(innerBytes) + case TagOID: + result, err = parseObjectIdentifier(innerBytes) + case TagUTCTime: + result, err = parseUTCTime(innerBytes) + case TagGeneralizedTime: + result, err = parseGeneralizedTime(innerBytes) + case TagOctetString: + result = innerBytes + default: + // If we don't know how to handle the type, we just leave Value as nil. + } + } + offset += t.length + if err != nil { + return + } + if result != nil { + v.Set(reflect.ValueOf(result)) + } + return + } + universalTag, compoundType, ok1 := getUniversalType(fieldType) + if !ok1 { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)} + return + } + + t, offset, err := parseTagAndLength(bytes, offset) + if err != nil { + return + } + if params.explicit { + expectedClass := ClassContextSpecific + if params.application { + expectedClass = ClassApplication + } + if offset == len(bytes) { + err = StructuralError{"explicit tag has no child"} + return + } + if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { + if t.length > 0 { + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + } else { + if fieldType != flagType { + err = StructuralError{"zero length explicit tag was not an asn1.Flag"} + return + } + v.SetBool(true) + return + } + } else { + // The tags didn't match, it might be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{"explicitly tagged member didn't match"} + } + return + } + } + + // Special case for strings: all the ASN.1 string types map to the Go + // type string. getUniversalType returns the tag for PrintableString + // when it sees a string, so if we see a different string type on the + // wire, we change the universal type to match. + if universalTag == TagPrintableString { + if t.class == ClassUniversal { + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: + universalTag = t.tag + } + } else if params.stringType != 0 { + universalTag = params.stringType + } + } + + // Special case for time: UTCTime and GeneralizedTime both map to the + // Go type time.Time. + if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal { + universalTag = TagGeneralizedTime + } + + if params.set { + universalTag = TagSet + } + + expectedClass := ClassUniversal + expectedTag := universalTag + + if !params.explicit && params.tag != nil { + expectedClass = ClassContextSpecific + expectedTag = *params.tag + } + + if !params.explicit && params.application && params.tag != nil { + expectedClass = ClassApplication + expectedTag = *params.tag + } + + // We have unwrapped any explicit tagging at this point. + if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType { + // Tags don't match. Again, it could be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)} + } + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + innerBytes := bytes[offset : offset+t.length] + offset += t.length + + // We deal with the structures defined in this package first. + switch fieldType { + case objectIdentifierType: + newSlice, err1 := parseObjectIdentifier(innerBytes) + v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) + if err1 == nil { + reflect.Copy(v, reflect.ValueOf(newSlice)) + } + err = err1 + return + case bitStringType: + bs, err1 := parseBitString(innerBytes) + if err1 == nil { + v.Set(reflect.ValueOf(bs)) + } + err = err1 + return + case timeType: + var time time.Time + var err1 error + if universalTag == TagUTCTime { + time, err1 = parseUTCTime(innerBytes) + } else { + time, err1 = parseGeneralizedTime(innerBytes) + } + if err1 == nil { + v.Set(reflect.ValueOf(time)) + } + err = err1 + return + case enumeratedType: + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + v.SetInt(int64(parsedInt)) + } + err = err1 + return + case flagType: + v.SetBool(true) + return + case bigIntType: + parsedInt, err1 := parseBigInt(innerBytes) + if err1 == nil { + v.Set(reflect.ValueOf(parsedInt)) + } + err = err1 + return + } + switch val := v; val.Kind() { + case reflect.Bool: + parsedBool, err1 := parseBool(innerBytes) + if err1 == nil { + val.SetBool(parsedBool) + } + err = err1 + return + case reflect.Int, reflect.Int32, reflect.Int64: + if val.Type().Size() == 4 { + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + val.SetInt(int64(parsedInt)) + } + err = err1 + } else { + parsedInt, err1 := parseInt64(innerBytes) + if err1 == nil { + val.SetInt(parsedInt) + } + err = err1 + } + return + // TODO(dfc) Add support for the remaining integer types + case reflect.Struct: + structType := fieldType + + if structType.NumField() > 0 && + structType.Field(0).Type == rawContentsType { + bytes := bytes[initOffset:offset] + val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) + } + + innerOffset := 0 + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + if i == 0 && field.Type == rawContentsType { + continue + } + innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1"))) + if err != nil { + return + } + } + // We allow extra bytes at the end of the SEQUENCE because + // adding elements to the end has been used in X.509 as the + // version numbers have increased. + return + case reflect.Slice: + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) + reflect.Copy(val, reflect.ValueOf(innerBytes)) + return + } + newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem()) + if err1 == nil { + val.Set(newSlice) + } + err = err1 + return + case reflect.String: + var v string + switch universalTag { + case TagPrintableString: + v, err = parsePrintableString(innerBytes) + case TagIA5String: + v, err = parseIA5String(innerBytes) + case TagT61String: + v, err = parseT61String(innerBytes) + case TagUTF8String: + v, err = parseUTF8String(innerBytes) + case TagGeneralString: + // GeneralString is specified in ISO-2022/ECMA-35, + // A brief review suggests that it includes structures + // that allow the encoding to change midstring and + // such. We give up and pass it as an 8-bit string. + v, err = parseT61String(innerBytes) + default: + err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)} + } + if err == nil { + val.SetString(v) + } + return + } + err = StructuralError{"unsupported: " + v.Type().String()} + return +} + +// canHaveDefaultValue reports whether k is a Kind that we will set a default +// value for. (A signed integer, essentially.) +func canHaveDefaultValue(k reflect.Kind) bool { + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + } + + return false +} + +// setDefaultValue is used to install a default value, from a tag string, into +// a Value. It is successful if the field was optional, even if a default value +// wasn't provided or it failed to install it into the Value. +func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { + if !params.optional { + return + } + ok = true + if params.defaultValue == nil { + return + } + if canHaveDefaultValue(v.Kind()) { + v.SetInt(*params.defaultValue) + } + return +} + +// Unmarshal parses the DER-encoded ASN.1 data structure b +// and uses the reflect package to fill in an arbitrary value pointed at by val. +// Because Unmarshal uses the reflect package, the structs +// being written to must use upper case field names. +// +// An ASN.1 INTEGER can be written to an int, int32, int64, +// or *big.Int (from the math/big package). +// If the encoded value does not fit in the Go type, +// Unmarshal returns a parse error. +// +// An ASN.1 BIT STRING can be written to a BitString. +// +// An ASN.1 OCTET STRING can be written to a []byte. +// +// An ASN.1 OBJECT IDENTIFIER can be written to an +// ObjectIdentifier. +// +// An ASN.1 ENUMERATED can be written to an Enumerated. +// +// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time. +// +// An ASN.1 PrintableString or IA5String can be written to a string. +// +// Any of the above ASN.1 values can be written to an interface{}. +// The value stored in the interface has the corresponding Go type. +// For integers, that type is int64. +// +// An ASN.1 SEQUENCE OF x or SET OF x can be written +// to a slice if an x can be written to the slice's element type. +// +// An ASN.1 SEQUENCE or SET can be written to a struct +// if each of the elements in the sequence can be +// written to the corresponding element in the struct. +// +// The following tags on struct fields have special meaning to Unmarshal: +// +// application specifies that a APPLICATION tag is used +// default:x sets the default value for optional integer fields +// explicit specifies that an additional, explicit tag wraps the implicit one +// optional marks the field as ASN.1 OPTIONAL +// set causes a SET, rather than a SEQUENCE type to be expected +// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC +// +// If the type of the first field of a structure is RawContent then the raw +// ASN1 contents of the struct will be stored in it. +// +// If the type name of a slice element ends with "SET" then it's treated as if +// the "set" tag was set on it. This can be used with nested slices where a +// struct tag cannot be given. +// +// Other ASN.1 types are not supported; if it encounters them, +// Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, parseFieldParameters(params)) + if err != nil { + return nil, err + } + return b[offset:], nil +} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go new file mode 100644 index 00000000000..7a9da49f396 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go @@ -0,0 +1,173 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "reflect" + "strconv" + "strings" +) + +// ASN.1 objects have metadata preceding them: +// the tag: the type of the object +// a flag denoting if this object is compound or not +// the class type: the namespace of the tag +// the length of the object, in bytes + +// Here are some standard tags and classes + +// ASN.1 tags represent the type of the following object. +const ( + TagBoolean = 1 + TagInteger = 2 + TagBitString = 3 + TagOctetString = 4 + TagOID = 6 + TagEnum = 10 + TagUTF8String = 12 + TagSequence = 16 + TagSet = 17 + TagPrintableString = 19 + TagT61String = 20 + TagIA5String = 22 + TagUTCTime = 23 + TagGeneralizedTime = 24 + TagGeneralString = 27 +) + +// ASN.1 class types represent the namespace of the tag. +const ( + ClassUniversal = 0 + ClassApplication = 1 + ClassContextSpecific = 2 + ClassPrivate = 3 +) + +type tagAndLength struct { + class, tag, length int + isCompound bool +} + +// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead +// of" and "in addition to". When not specified, every primitive type has a +// default tag in the UNIVERSAL class. +// +// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1 +// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT +// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another. +// +// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an +// /additional/ tag would wrap the default tag. This explicit tag will have the +// compound flag set. +// +// (This is used in order to remove ambiguity with optional elements.) +// +// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we +// don't support that here. We support a single layer of EXPLICIT or IMPLICIT +// tagging with tag strings on the fields of a structure. + +// fieldParameters is the parsed representation of tag string from a structure field. +type fieldParameters struct { + optional bool // true iff the field is OPTIONAL + explicit bool // true iff an EXPLICIT tag is in use. + application bool // true iff an APPLICATION tag is in use. + defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). + tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). + stringType int // the string tag to use when marshaling. + timeType int // the time tag to use when marshaling. + set bool // true iff this should be encoded as a SET + omitEmpty bool // true iff this should be omitted if empty when marshaling. + + // Invariants: + // if explicit is set, tag is non-nil. +} + +// Given a tag string with the format specified in the package comment, +// parseFieldParameters will parse it into a fieldParameters structure, +// ignoring unknown parts of the string. +func parseFieldParameters(str string) (ret fieldParameters) { + for _, part := range strings.Split(str, ",") { + switch { + case part == "optional": + ret.optional = true + case part == "explicit": + ret.explicit = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "generalized": + ret.timeType = TagGeneralizedTime + case part == "utc": + ret.timeType = TagUTCTime + case part == "ia5": + ret.stringType = TagIA5String + // jtasn1 case below added + case part == "generalstring": + ret.stringType = TagGeneralString + case part == "printable": + ret.stringType = TagPrintableString + case part == "utf8": + ret.stringType = TagUTF8String + case strings.HasPrefix(part, "default:"): + i, err := strconv.ParseInt(part[8:], 10, 64) + if err == nil { + ret.defaultValue = new(int64) + *ret.defaultValue = i + } + case strings.HasPrefix(part, "tag:"): + i, err := strconv.Atoi(part[4:]) + if err == nil { + ret.tag = new(int) + *ret.tag = i + } + case part == "set": + ret.set = true + case part == "application": + ret.application = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "omitempty": + ret.omitEmpty = true + } + } + return +} + +// Given a reflected Go type, getUniversalType returns the default tag number +// and expected compound flag. +func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) { + switch t { + case objectIdentifierType: + return TagOID, false, true + case bitStringType: + return TagBitString, false, true + case timeType: + return TagUTCTime, false, true + case enumeratedType: + return TagEnum, false, true + case bigIntType: + return TagInteger, false, true + } + switch t.Kind() { + case reflect.Bool: + return TagBoolean, false, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return TagInteger, false, true + case reflect.Struct: + return TagSequence, true, true + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return TagOctetString, false, true + } + if strings.HasSuffix(t.Name(), "SET") { + return TagSet, true, true + } + return TagSequence, true, true + case reflect.String: + return TagPrintableString, false, true + } + return 0, false, false +} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go new file mode 100644 index 00000000000..f52eee9d261 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go @@ -0,0 +1,659 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "time" + "unicode/utf8" +) + +// A forkableWriter is an in-memory buffer that can be +// 'forked' to create new forkableWriters that bracket the +// original. After +// pre, post := w.fork() +// the overall sequence of bytes represented is logically w+pre+post. +type forkableWriter struct { + *bytes.Buffer + pre, post *forkableWriter +} + +func newForkableWriter() *forkableWriter { + return &forkableWriter{new(bytes.Buffer), nil, nil} +} + +func (f *forkableWriter) fork() (pre, post *forkableWriter) { + if f.pre != nil || f.post != nil { + panic("have already forked") + } + f.pre = newForkableWriter() + f.post = newForkableWriter() + return f.pre, f.post +} + +func (f *forkableWriter) Len() (l int) { + l += f.Buffer.Len() + if f.pre != nil { + l += f.pre.Len() + } + if f.post != nil { + l += f.post.Len() + } + return +} + +func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) { + n, err = out.Write(f.Bytes()) + if err != nil { + return + } + + var nn int + + if f.pre != nil { + nn, err = f.pre.writeTo(out) + n += nn + if err != nil { + return + } + } + + if f.post != nil { + nn, err = f.post.writeTo(out) + n += nn + } + return +} + +func marshalBase128Int(out *forkableWriter, n int64) (err error) { + if n == 0 { + err = out.WriteByte(0) + return + } + + l := 0 + for i := n; i > 0; i >>= 7 { + l++ + } + + for i := l - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + err = out.WriteByte(o) + if err != nil { + return + } + } + + return nil +} + +func marshalInt64(out *forkableWriter, i int64) (err error) { + n := int64Length(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func int64Length(i int64) (numBytes int) { + numBytes = 1 + + for i > 127 { + numBytes++ + i >>= 8 + } + + for i < -128 { + numBytes++ + i >>= 8 + } + + return +} + +func marshalBigInt(out *forkableWriter, n *big.Int) (err error) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + err = out.WriteByte(0xff) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } else if n.Sign() == 0 { + // Zero is written as a single 0 zero rather than no bytes. + err = out.WriteByte(0x00) + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with 0x00 in order to stop it + // looking like a negative number. + err = out.WriteByte(0) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } + return +} + +func marshalLength(out *forkableWriter, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) { + b := uint8(t.class) << 6 + if t.isCompound { + b |= 0x20 + } + if t.tag >= 31 { + b |= 0x1f + err = out.WriteByte(b) + if err != nil { + return + } + err = marshalBase128Int(out, int64(t.tag)) + if err != nil { + return + } + } else { + b |= uint8(t.tag) + err = out.WriteByte(b) + if err != nil { + return + } + } + + if t.length >= 128 { + l := lengthLength(t.length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLength(out, t.length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(t.length)) + if err != nil { + return + } + } + + return nil +} + +func marshalBitString(out *forkableWriter, b BitString) (err error) { + paddingBits := byte((8 - b.BitLength%8) % 8) + err = out.WriteByte(paddingBits) + if err != nil { + return + } + _, err = out.Write(b.Bytes) + return +} + +func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return StructuralError{"invalid object identifier"} + } + + err = marshalBase128Int(out, int64(oid[0]*40+oid[1])) + if err != nil { + return + } + for i := 2; i < len(oid); i++ { + err = marshalBase128Int(out, int64(oid[i])) + if err != nil { + return + } + } + + return +} + +func marshalPrintableString(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if !isPrintable(c) { + return StructuralError{"PrintableString contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalIA5String(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if c > 127 { + return StructuralError{"IA5String contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalUTF8String(out *forkableWriter, s string) (err error) { + _, err = out.Write([]byte(s)) + return +} + +func marshalTwoDigits(out *forkableWriter, v int) (err error) { + err = out.WriteByte(byte('0' + (v/10)%10)) + if err != nil { + return + } + return out.WriteByte(byte('0' + v%10)) +} + +func marshalFourDigits(out *forkableWriter, v int) (err error) { + var bytes [4]byte + for i := range bytes { + bytes[3-i] = '0' + byte(v%10) + v /= 10 + } + _, err = out.Write(bytes[:]) + return +} + +func outsideUTCRange(t time.Time) bool { + year := t.Year() + return year < 1950 || year >= 2050 +} + +func marshalUTCTime(out *forkableWriter, t time.Time) (err error) { + year := t.Year() + + switch { + case 1950 <= year && year < 2000: + err = marshalTwoDigits(out, year-1900) + case 2000 <= year && year < 2050: + err = marshalTwoDigits(out, year-2000) + default: + return StructuralError{"cannot represent time as UTCTime"} + } + if err != nil { + return + } + + return marshalTimeCommon(out, t) +} + +func marshalGeneralizedTime(out *forkableWriter, t time.Time) (err error) { + year := t.Year() + if year < 0 || year > 9999 { + return StructuralError{"cannot represent time as GeneralizedTime"} + } + if err = marshalFourDigits(out, year); err != nil { + return + } + + return marshalTimeCommon(out, t) +} + +func marshalTimeCommon(out *forkableWriter, t time.Time) (err error) { + _, month, day := t.Date() + + err = marshalTwoDigits(out, int(month)) + if err != nil { + return + } + + err = marshalTwoDigits(out, day) + if err != nil { + return + } + + hour, min, sec := t.Clock() + + err = marshalTwoDigits(out, hour) + if err != nil { + return + } + + err = marshalTwoDigits(out, min) + if err != nil { + return + } + + err = marshalTwoDigits(out, sec) + if err != nil { + return + } + + _, offset := t.Zone() + + switch { + case offset/60 == 0: + err = out.WriteByte('Z') + return + case offset > 0: + err = out.WriteByte('+') + case offset < 0: + err = out.WriteByte('-') + } + + if err != nil { + return + } + + offsetMinutes := offset / 60 + if offsetMinutes < 0 { + offsetMinutes = -offsetMinutes + } + + err = marshalTwoDigits(out, offsetMinutes/60) + if err != nil { + return + } + + err = marshalTwoDigits(out, offsetMinutes%60) + return +} + +func stripTagAndLength(in []byte) []byte { + _, offset, err := parseTagAndLength(in, 0) + if err != nil { + return in + } + return in[offset:] +} + +func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) { + switch value.Type() { + case flagType: + return nil + case timeType: + t := value.Interface().(time.Time) + if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { + return marshalGeneralizedTime(out, t) + } else { + return marshalUTCTime(out, t) + } + case bitStringType: + return marshalBitString(out, value.Interface().(BitString)) + case objectIdentifierType: + return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier)) + case bigIntType: + return marshalBigInt(out, value.Interface().(*big.Int)) + } + + switch v := value; v.Kind() { + case reflect.Bool: + if v.Bool() { + return out.WriteByte(255) + } else { + return out.WriteByte(0) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return marshalInt64(out, v.Int()) + case reflect.Struct: + t := v.Type() + + startingField := 0 + + // If the first element of the structure is a non-empty + // RawContents, then we don't bother serializing the rest. + if t.NumField() > 0 && t.Field(0).Type == rawContentsType { + s := v.Field(0) + if s.Len() > 0 { + bytes := make([]byte, s.Len()) + for i := 0; i < s.Len(); i++ { + bytes[i] = uint8(s.Index(i).Uint()) + } + /* The RawContents will contain the tag and + * length fields but we'll also be writing + * those ourselves, so we strip them out of + * bytes */ + _, err = out.Write(stripTagAndLength(bytes)) + return + } else { + startingField = 1 + } + } + + for i := startingField; i < t.NumField(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1"))) + if err != nil { + return + } + } + return + case reflect.Slice: + sliceType := v.Type() + if sliceType.Elem().Kind() == reflect.Uint8 { + bytes := make([]byte, v.Len()) + for i := 0; i < v.Len(); i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err = out.Write(bytes) + return + } + + // jtasn1 Pass on the tags to the members but need to unset explicit switch and implicit value + //var fp fieldParameters + params.explicit = false + params.tag = nil + for i := 0; i < v.Len(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Index(i), params) + if err != nil { + return + } + } + return + case reflect.String: + switch params.stringType { + case TagIA5String: + return marshalIA5String(out, v.String()) + case TagPrintableString: + return marshalPrintableString(out, v.String()) + default: + return marshalUTF8String(out, v.String()) + } + } + + return StructuralError{"unknown Go type"} +} + +func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) { + if !v.IsValid() { + return fmt.Errorf("asn1: cannot marshal nil value") + } + // If the field is an interface{} then recurse into it. + if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { + return marshalField(out, v.Elem(), params) + } + + if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { + return + } + + if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { + defaultValue := reflect.New(v.Type()).Elem() + defaultValue.SetInt(*params.defaultValue) + + if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { + return + } + } + + // If no default value is given then the zero value for the type is + // assumed to be the default value. This isn't obviously the correct + // behaviour, but it's what Go has traditionally done. + if params.optional && params.defaultValue == nil { + if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { + return + } + } + + if v.Type() == rawValueType { + rv := v.Interface().(RawValue) + if len(rv.FullBytes) != 0 { + _, err = out.Write(rv.FullBytes) + } else { + err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}) + if err != nil { + return + } + _, err = out.Write(rv.Bytes) + } + return + } + + tag, isCompound, ok := getUniversalType(v.Type()) + if !ok { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())} + return + } + class := ClassUniversal + + if params.timeType != 0 && tag != TagUTCTime { + return StructuralError{"explicit time type given to non-time member"} + } + + // jtasn1 updated to allow slices of strings + if params.stringType != 0 && !(tag == TagPrintableString || (v.Kind() == reflect.Slice && tag == 16 && v.Type().Elem().Kind() == reflect.String)) { + return StructuralError{"explicit string type given to non-string member"} + } + + switch tag { + case TagPrintableString: + if params.stringType == 0 { + // This is a string without an explicit string type. We'll use + // a PrintableString if the character set in the string is + // sufficiently limited, otherwise we'll use a UTF8String. + for _, r := range v.String() { + if r >= utf8.RuneSelf || !isPrintable(byte(r)) { + if !utf8.ValidString(v.String()) { + return errors.New("asn1: string not valid UTF-8") + } + tag = TagUTF8String + break + } + } + } else { + tag = params.stringType + } + case TagUTCTime: + if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { + tag = TagGeneralizedTime + } + } + + if params.set { + if tag != TagSequence { + return StructuralError{"non sequence tagged as set"} + } + tag = TagSet + } + + tags, body := out.fork() + + err = marshalBody(body, v, params) + if err != nil { + return + } + + bodyLen := body.Len() + + var explicitTag *forkableWriter + if params.explicit { + explicitTag, tags = tags.fork() + } + + if !params.explicit && params.tag != nil { + // implicit tag. + tag = *params.tag + class = ClassContextSpecific + } + + err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound}) + if err != nil { + return + } + + if params.explicit { + err = marshalTagAndLength(explicitTag, tagAndLength{ + class: ClassContextSpecific, + tag: *params.tag, + length: bodyLen + tags.Len(), + isCompound: true, + }) + } + + return err +} + +// Marshal returns the ASN.1 encoding of val. +// +// In addition to the struct tags recognised by Unmarshal, the following can be +// used: +// +// ia5: causes strings to be marshaled as ASN.1, IA5 strings +// omitempty: causes empty slices to be skipped +// printable: causes strings to be marshaled as ASN.1, PrintableString strings. +// utf8: causes strings to be marshaled as ASN.1, UTF8 strings +func Marshal(val interface{}) ([]byte, error) { + var out bytes.Buffer + v := reflect.ValueOf(val) + f := newForkableWriter() + err := marshalField(f, v, fieldParameters{}) + if err != nil { + return nil, err + } + _, err = f.writeTo(&out) + return out.Bytes(), err +} diff --git a/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 00000000000..75d418763db --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,98 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + return Key64(password, salt, int64(iter), int64(keyLen), h) +} + +// Key64 derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. Key64 uses +// int64 for the iteration count and key length to allow larger values. +// The key is derived based on the method described as PBKDF2 with the HMAC +// variant using the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key64(password, salt []byte, iter, keyLen int64, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := int64(prf.Size()) + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := int64(1); block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[int64(len(dk))-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := int64(2); n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/github.com/jcmturner/goidentity/v6/LICENSE b/vendor/github.com/jcmturner/goidentity/v6/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/jcmturner/goidentity/v6/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jcmturner/goidentity/v6/README.md b/vendor/github.com/jcmturner/goidentity/v6/README.md new file mode 100644 index 00000000000..3e15042eac6 --- /dev/null +++ b/vendor/github.com/jcmturner/goidentity/v6/README.md @@ -0,0 +1,7 @@ +# goidentity +[![GoDoc](https://godoc.org/github.com/jcmturner/goidentity/v6?status.svg)](https://godoc.org/github.com/jcmturner/goidentity/v6) [![Go Report Card](https://goreportcard.com/badge/github.com/jcmturner/goidentity/v6)](https://goreportcard.com/report/github.com/jcmturner/goidentity/v6) + +Please import as below +``` +import "github.com/jcmturner/goidentity/v6" +``` diff --git a/vendor/github.com/jcmturner/goidentity/v6/authenticator.go b/vendor/github.com/jcmturner/goidentity/v6/authenticator.go new file mode 100644 index 00000000000..42ec79b0617 --- /dev/null +++ b/vendor/github.com/jcmturner/goidentity/v6/authenticator.go @@ -0,0 +1,6 @@ +package goidentity + +type Authenticator interface { + Authenticate() (Identity, bool, error) + Mechanism() string // gives the name of the type of authentication mechanism +} diff --git a/vendor/github.com/jcmturner/goidentity/v6/go.mod b/vendor/github.com/jcmturner/goidentity/v6/go.mod new file mode 100644 index 00000000000..73cb36b60c9 --- /dev/null +++ b/vendor/github.com/jcmturner/goidentity/v6/go.mod @@ -0,0 +1,8 @@ +module github.com/jcmturner/goidentity/v6 + +go 1.13 + +require ( + github.com/hashicorp/go-uuid v1.0.2 + github.com/stretchr/testify v1.4.0 +) diff --git a/vendor/github.com/jcmturner/goidentity/v6/go.sum b/vendor/github.com/jcmturner/goidentity/v6/go.sum new file mode 100644 index 00000000000..92979e4a960 --- /dev/null +++ b/vendor/github.com/jcmturner/goidentity/v6/go.sum @@ -0,0 +1,12 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jcmturner/goidentity/v6/identity.go b/vendor/github.com/jcmturner/goidentity/v6/identity.go new file mode 100644 index 00000000000..f55d3b4e0a5 --- /dev/null +++ b/vendor/github.com/jcmturner/goidentity/v6/identity.go @@ -0,0 +1,52 @@ +package goidentity + +import ( + "context" + "net/http" + "time" +) + +const ( + CTXKey = "jcmturner/goidentity" +) + +type Identity interface { + UserName() string + SetUserName(s string) + Domain() string + SetDomain(s string) + DisplayName() string + SetDisplayName(s string) + Human() bool + SetHuman(b bool) + AuthTime() time.Time + SetAuthTime(t time.Time) + AuthzAttributes() []string + AddAuthzAttribute(a string) + RemoveAuthzAttribute(a string) + Authenticated() bool + SetAuthenticated(b bool) + Authorized(a string) bool + SessionID() string + Expired() bool + Attributes() map[string]interface{} + SetAttribute(k string, v interface{}) + SetAttributes(map[string]interface{}) + RemoveAttribute(k string) + Marshal() ([]byte, error) + Unmarshal([]byte) error +} + +func AddToHTTPRequestContext(id Identity, r *http.Request) *http.Request { + ctx := r.Context() + ctx = context.WithValue(ctx, CTXKey, id) + return r.WithContext(ctx) +} + +func FromHTTPRequestContext(r *http.Request) Identity { + ctx := r.Context() + if id, ok := ctx.Value(CTXKey).(Identity); ok { + return id + } + return nil +} diff --git a/vendor/github.com/jcmturner/goidentity/v6/user.go b/vendor/github.com/jcmturner/goidentity/v6/user.go new file mode 100644 index 00000000000..88a73889bf1 --- /dev/null +++ b/vendor/github.com/jcmturner/goidentity/v6/user.go @@ -0,0 +1,172 @@ +package goidentity + +import ( + "bytes" + "encoding/gob" + "github.com/hashicorp/go-uuid" + "time" +) + +type User struct { + authenticated bool + domain string + userName string + displayName string + email string + human bool + groupMembership map[string]bool + authTime time.Time + sessionID string + expiry time.Time + attributes map[string]interface{} +} + +func NewUser(username string) User { + uuid, err := uuid.GenerateUUID() + if err != nil { + uuid = "00unique-sess-ions-uuid-unavailable0" + } + return User{ + userName: username, + groupMembership: make(map[string]bool), + sessionID: uuid, + } +} + +func (u *User) UserName() string { + return u.userName +} + +func (u *User) SetUserName(s string) { + u.userName = s +} + +func (u *User) Domain() string { + return u.domain +} + +func (u *User) SetDomain(s string) { + u.domain = s +} + +func (u *User) DisplayName() string { + if u.displayName == "" { + return u.userName + } + return u.displayName +} + +func (u *User) SetDisplayName(s string) { + u.displayName = s +} + +func (u *User) Human() bool { + return u.human +} + +func (u *User) SetHuman(b bool) { + u.human = b +} + +func (u *User) AuthTime() time.Time { + return u.authTime +} + +func (u *User) SetAuthTime(t time.Time) { + u.authTime = t +} + +func (u *User) AuthzAttributes() []string { + s := make([]string, len(u.groupMembership)) + i := 0 + for a := range u.groupMembership { + s[i] = a + i++ + } + return s +} + +func (u *User) Authenticated() bool { + return u.authenticated +} + +func (u *User) SetAuthenticated(b bool) { + u.authenticated = b +} + +func (u *User) AddAuthzAttribute(a string) { + u.groupMembership[a] = true +} + +func (u *User) RemoveAuthzAttribute(a string) { + if _, ok := u.groupMembership[a]; !ok { + return + } + delete(u.groupMembership, a) +} + +func (u *User) EnableAuthzAttribute(a string) { + if enabled, ok := u.groupMembership[a]; ok && !enabled { + u.groupMembership[a] = true + } +} + +func (u *User) DisableAuthzAttribute(a string) { + if enabled, ok := u.groupMembership[a]; ok && enabled { + u.groupMembership[a] = false + } +} + +func (u *User) Authorized(a string) bool { + if enabled, ok := u.groupMembership[a]; ok && enabled { + return true + } + return false +} + +func (u *User) SessionID() string { + return u.sessionID +} + +func (u *User) SetExpiry(t time.Time) { + u.expiry = t +} + +func (u *User) Expired() bool { + if !u.expiry.IsZero() && time.Now().UTC().After(u.expiry) { + return true + } + return false +} + +func (u *User) Attributes() map[string]interface{} { + return u.attributes +} + +func (u *User) SetAttribute(k string, v interface{}) { + u.attributes[k] = v +} + +func (u *User) SetAttributes(a map[string]interface{}) { + u.attributes = a +} + +func (u *User) RemoveAttribute(k string) { + delete(u.attributes, k) +} + +func (u *User) Marshal() ([]byte, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err := enc.Encode(u) + if err != nil { + return []byte{}, err + } + return buf.Bytes(), nil +} + +func (u *User) Unmarshal(b []byte) error { + buf := bytes.NewBuffer(b) + dec := gob.NewDecoder(buf) + return dec.Decode(u) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/LICENSE b/vendor/github.com/jcmturner/gokrb5/v8/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go b/vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go new file mode 100644 index 00000000000..f27740b9bd7 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go @@ -0,0 +1,86 @@ +// Package asn1tools provides tools for managing ASN1 marshaled data. +package asn1tools + +import ( + "github.com/jcmturner/gofork/encoding/asn1" +) + +// MarshalLengthBytes returns the ASN1 encoded bytes for the length 'l' +// +// There are two forms: short (for lengths between 0 and 127), and long definite (for lengths between 0 and 2^1008 -1). +// +// Short form: One octet. Bit 8 has value "0" and bits 7-1 give the length. +// +// Long form: Two to 127 octets. Bit 8 of first octet has value "1" and bits 7-1 give the number of additional length octets. Second and following octets give the length, base 256, most significant digit first. +func MarshalLengthBytes(l int) []byte { + if l <= 127 { + return []byte{byte(l)} + } + var b []byte + p := 1 + for i := 1; i < 127; { + b = append([]byte{byte((l % (p * 256)) / p)}, b...) + p = p * 256 + l = l - l%p + if l <= 0 { + break + } + } + return append([]byte{byte(128 + len(b))}, b...) +} + +// GetLengthFromASN returns the length of a slice of ASN1 encoded bytes from the ASN1 length header it contains. +func GetLengthFromASN(b []byte) int { + if int(b[1]) <= 127 { + return int(b[1]) + } + // The bytes that indicate the length + lb := b[2 : 2+int(b[1])-128] + base := 1 + l := 0 + for i := len(lb) - 1; i >= 0; i-- { + l += int(lb[i]) * base + base = base * 256 + } + return l +} + +// GetNumberBytesInLengthHeader returns the number of bytes in the ASn1 header that indicate the length. +func GetNumberBytesInLengthHeader(b []byte) int { + if int(b[1]) <= 127 { + return 1 + } + // The bytes that indicate the length + return 1 + int(b[1]) - 128 +} + +// AddASNAppTag adds an ASN1 encoding application tag value to the raw bytes provided. +func AddASNAppTag(b []byte, tag int) []byte { + r := asn1.RawValue{ + Class: asn1.ClassApplication, + IsCompound: true, + Tag: tag, + Bytes: b, + } + ab, _ := asn1.Marshal(r) + return ab +} + +/* +// The Marshal method of golang's asn1 package does not enable you to define wrapping the output in an application tag. +// This method adds that wrapping tag. +func AddASNAppTag(b []byte, tag int) []byte { + // The ASN1 wrapping consists of 2 bytes: + // 1st byte -> Identifier Octet - Application Tag + // 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here. + // Application Tag: + //| Bit: | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | + //| Value: | 0 | 1 | 1 | From the RFC spec 4120 | + //| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value | + // Therefore the value of the byte is an integer = ( Application tag value + 96 ) + //b = append(MarshalLengthBytes(int(b[1])+2), b...) + b = append(MarshalLengthBytes(len(b)), b...) + b = append([]byte{byte(96 + tag)}, b...) + return b +} +*/ diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go new file mode 100644 index 00000000000..5becccc4dde --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go @@ -0,0 +1,182 @@ +package client + +import ( + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/patype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// ASExchange performs an AS exchange for the client to retrieve a TGT. +func (cl *Client) ASExchange(realm string, ASReq messages.ASReq, referral int) (messages.ASRep, error) { + if ok, err := cl.IsConfigured(); !ok { + return messages.ASRep{}, krberror.Errorf(err, krberror.ConfigError, "AS Exchange cannot be performed") + } + + // Set PAData if required + err := setPAData(cl, nil, &ASReq) + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: issue with setting PAData on AS_REQ") + } + + b, err := ASReq.Marshal() + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ") + } + var ASRep messages.ASRep + + rb, err := cl.sendToKDC(b, realm) + if err != nil { + if e, ok := err.(messages.KRBError); ok { + switch e.ErrorCode { + case errorcode.KDC_ERR_PREAUTH_REQUIRED, errorcode.KDC_ERR_PREAUTH_FAILED: + // From now on assume this client will need to do this pre-auth and set the PAData + cl.settings.assumePreAuthentication = true + err = setPAData(cl, &e, &ASReq) + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: failed setting AS_REQ PAData for pre-authentication required") + } + b, err := ASReq.Marshal() + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ with PAData") + } + rb, err = cl.sendToKDC(b, realm) + if err != nil { + if _, ok := err.(messages.KRBError); ok { + return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC") + } + return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC") + } + case errorcode.KDC_ERR_WRONG_REALM: + // Client referral https://tools.ietf.org/html/rfc6806.html#section-7 + if referral > 5 { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "maximum number of client referrals exceeded") + } + referral++ + return cl.ASExchange(e.CRealm, ASReq, referral) + default: + return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC") + } + } else { + return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC") + } + } + err = ASRep.Unmarshal(rb) + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed to process the AS_REP") + } + if ok, err := ASRep.Verify(cl.Config, cl.Credentials, ASReq); !ok { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: AS_REP is not valid or client password/keytab incorrect") + } + return ASRep, nil +} + +// setPAData adds pre-authentication data to the AS_REQ. +func setPAData(cl *Client, krberr *messages.KRBError, ASReq *messages.ASReq) error { + if !cl.settings.DisablePAFXFAST() { + pa := types.PAData{PADataType: patype.PA_REQ_ENC_PA_REP} + ASReq.PAData = append(ASReq.PAData, pa) + } + if cl.settings.AssumePreAuthentication() { + // Identify the etype to use to encrypt the PA Data + var et etype.EType + var err error + var key types.EncryptionKey + var kvno int + if krberr == nil { + // This is not in response to an error from the KDC. It is preemptive or renewal + // There is no KRB Error that tells us the etype to use + etn := cl.settings.preAuthEType // Use the etype that may have previously been negotiated + if etn == 0 { + etn = int32(cl.Config.LibDefaults.PreferredPreauthTypes[0]) // Resort to config + } + et, err = crypto.GetEtype(etn) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption") + } + key, kvno, err = cl.Key(et, 0, nil) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials") + } + } else { + // Get the etype to use from the PA data in the KRBError e-data + et, err = preAuthEType(krberr) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption") + } + cl.settings.preAuthEType = et.GetETypeID() // Set the etype that has been defined for potential future use + key, kvno, err = cl.Key(et, 0, krberr) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials") + } + } + // Generate the PA data + paTSb, err := types.GetPAEncTSEncAsnMarshalled() + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error creating PAEncTSEnc for Pre-Authentication") + } + paEncTS, err := crypto.GetEncryptedData(paTSb, key, keyusage.AS_REQ_PA_ENC_TIMESTAMP, kvno) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error encrypting pre-authentication timestamp") + } + pb, err := paEncTS.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error marshaling the PAEncTSEnc encrypted data") + } + pa := types.PAData{ + PADataType: patype.PA_ENC_TIMESTAMP, + PADataValue: pb, + } + // Look for and delete any exiting patype.PA_ENC_TIMESTAMP + for i, pa := range ASReq.PAData { + if pa.PADataType == patype.PA_ENC_TIMESTAMP { + ASReq.PAData[i] = ASReq.PAData[len(ASReq.PAData)-1] + ASReq.PAData = ASReq.PAData[:len(ASReq.PAData)-1] + } + } + ASReq.PAData = append(ASReq.PAData, pa) + } + return nil +} + +// preAuthEType establishes what encryption type to use for pre-authentication from the KRBError returned from the KDC. +func preAuthEType(krberr *messages.KRBError) (etype etype.EType, err error) { + //RFC 4120 5.2.7.5 covers the preference order of ETYPE-INFO2 and ETYPE-INFO. + var etypeID int32 + var pas types.PADataSequence + e := pas.Unmarshal(krberr.EData) + if e != nil { + err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling KRBError data") + return + } +Loop: + for _, pa := range pas { + switch pa.PADataType { + case patype.PA_ETYPE_INFO2: + info, e := pa.GetETypeInfo2() + if e != nil { + err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO2 data") + return + } + etypeID = info[0].EType + break Loop + case patype.PA_ETYPE_INFO: + info, e := pa.GetETypeInfo() + if e != nil { + err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO data") + return + } + etypeID = info[0].EType + } + } + etype, e = crypto.GetEtype(etypeID) + if e != nil { + err = krberror.Errorf(e, krberror.EncryptingError, "error creating etype") + return + } + return etype, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go new file mode 100644 index 00000000000..e4571ce8669 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go @@ -0,0 +1,103 @@ +package client + +import ( + "github.com/jcmturner/gokrb5/v8/iana/flags" + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// TGSREQGenerateAndExchange generates the TGS_REQ and performs a TGS exchange to retrieve a ticket to the specified SPN. +func (cl *Client) TGSREQGenerateAndExchange(spn types.PrincipalName, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, renewal bool) (tgsReq messages.TGSReq, tgsRep messages.TGSRep, err error) { + tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, spn, renewal) + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: failed to generate a new TGS_REQ") + } + return cl.TGSExchange(tgsReq, kdcRealm, tgsRep.Ticket, sessionKey, 0) +} + +// TGSExchange exchanges the provided TGS_REQ with the KDC to retrieve a TGS_REP. +// Referrals are automatically handled. +// The client's cache is updated with the ticket received. +func (cl *Client) TGSExchange(tgsReq messages.TGSReq, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, referral int) (messages.TGSReq, messages.TGSRep, error) { + var tgsRep messages.TGSRep + b, err := tgsReq.Marshal() + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to marshal TGS_REQ") + } + r, err := cl.sendToKDC(b, kdcRealm) + if err != nil { + if _, ok := err.(messages.KRBError); ok { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.KDCError, "TGS Exchange Error: kerberos error response from KDC when requesting for %s", tgsReq.ReqBody.SName.PrincipalNameString()) + } + return tgsReq, tgsRep, krberror.Errorf(err, krberror.NetworkingError, "TGS Exchange Error: issue sending TGS_REQ to KDC") + } + err = tgsRep.Unmarshal(r) + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP") + } + err = tgsRep.DecryptEncPart(sessionKey) + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP") + } + if ok, err := tgsRep.Verify(cl.Config, tgsReq); !ok { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: TGS_REP is not valid") + } + + if tgsRep.Ticket.SName.NameString[0] == "krbtgt" && !tgsRep.Ticket.SName.Equal(tgsReq.ReqBody.SName) { + if referral > 5 { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: maximum number of referrals exceeded") + } + // Server referral https://tools.ietf.org/html/rfc6806.html#section-8 + // The TGS Rep contains a TGT for another domain as the service resides in that domain. + cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart) + realm := tgsRep.Ticket.SName.NameString[len(tgsRep.Ticket.SName.NameString)-1] + referral++ + if types.IsFlagSet(&tgsReq.ReqBody.KDCOptions, flags.EncTktInSkey) && len(tgsReq.ReqBody.AdditionalTickets) > 0 { + tgsReq, err = messages.NewUser2UserTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, tgsReq.ReqBody.SName, tgsReq.Renewal, tgsReq.ReqBody.AdditionalTickets[0]) + if err != nil { + return tgsReq, tgsRep, err + } + } + tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), realm, cl.Config, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, tgsReq.ReqBody.SName, tgsReq.Renewal) + if err != nil { + return tgsReq, tgsRep, err + } + return cl.TGSExchange(tgsReq, realm, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, referral) + } + cl.cache.addEntry( + tgsRep.Ticket, + tgsRep.DecryptedEncPart.AuthTime, + tgsRep.DecryptedEncPart.StartTime, + tgsRep.DecryptedEncPart.EndTime, + tgsRep.DecryptedEncPart.RenewTill, + tgsRep.DecryptedEncPart.Key, + ) + cl.Log("ticket added to cache for %s (EndTime: %v)", tgsRep.Ticket.SName.PrincipalNameString(), tgsRep.DecryptedEncPart.EndTime) + return tgsReq, tgsRep, err +} + +// GetServiceTicket makes a request to get a service ticket for the SPN specified +// SPN format: / Eg. HTTP/www.example.com +// The ticket will be added to the client's ticket cache +func (cl *Client) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) { + var tkt messages.Ticket + var skey types.EncryptionKey + if tkt, skey, ok := cl.GetCachedTicket(spn); ok { + // Already a valid ticket in the cache + return tkt, skey, nil + } + princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn) + realm := cl.Config.ResolveRealm(princ.NameString[len(princ.NameString)-1]) + + tgt, skey, err := cl.sessionTGT(realm) + if err != nil { + return tkt, skey, err + } + _, tgsRep, err := cl.TGSREQGenerateAndExchange(princ, realm, tgt, skey, false) + if err != nil { + return tkt, skey, err + } + return tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/cache.go b/vendor/github.com/jcmturner/gokrb5/v8/client/cache.go new file mode 100644 index 00000000000..552e73e41e1 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/cache.go @@ -0,0 +1,134 @@ +package client + +import ( + "encoding/json" + "errors" + "sort" + "sync" + "time" + + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// Cache for service tickets held by the client. +type Cache struct { + Entries map[string]CacheEntry + mux sync.RWMutex +} + +// CacheEntry holds details for a cache entry. +type CacheEntry struct { + SPN string + Ticket messages.Ticket `json:"-"` + AuthTime time.Time + StartTime time.Time + EndTime time.Time + RenewTill time.Time + SessionKey types.EncryptionKey `json:"-"` +} + +// NewCache creates a new client ticket cache instance. +func NewCache() *Cache { + return &Cache{ + Entries: map[string]CacheEntry{}, + } +} + +// getEntry returns a cache entry that matches the SPN. +func (c *Cache) getEntry(spn string) (CacheEntry, bool) { + c.mux.RLock() + defer c.mux.RUnlock() + e, ok := (*c).Entries[spn] + return e, ok +} + +// JSON returns information about the cached service tickets in a JSON format. +func (c *Cache) JSON() (string, error) { + c.mux.RLock() + defer c.mux.RUnlock() + var es []CacheEntry + keys := make([]string, 0, len(c.Entries)) + for k := range c.Entries { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + es = append(es, c.Entries[k]) + } + b, err := json.MarshalIndent(&es, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} + +// addEntry adds a ticket to the cache. +func (c *Cache) addEntry(tkt messages.Ticket, authTime, startTime, endTime, renewTill time.Time, sessionKey types.EncryptionKey) CacheEntry { + spn := tkt.SName.PrincipalNameString() + c.mux.Lock() + defer c.mux.Unlock() + (*c).Entries[spn] = CacheEntry{ + SPN: spn, + Ticket: tkt, + AuthTime: authTime, + StartTime: startTime, + EndTime: endTime, + RenewTill: renewTill, + SessionKey: sessionKey, + } + return c.Entries[spn] +} + +// clear deletes all the cache entries +func (c *Cache) clear() { + c.mux.Lock() + defer c.mux.Unlock() + for k := range c.Entries { + delete(c.Entries, k) + } +} + +// RemoveEntry removes the cache entry for the defined SPN. +func (c *Cache) RemoveEntry(spn string) { + c.mux.Lock() + defer c.mux.Unlock() + delete(c.Entries, spn) +} + +// GetCachedTicket returns a ticket from the cache for the SPN. +// Only a ticket that is currently valid will be returned. +func (cl *Client) GetCachedTicket(spn string) (messages.Ticket, types.EncryptionKey, bool) { + if e, ok := cl.cache.getEntry(spn); ok { + //If within time window of ticket return it + if time.Now().UTC().After(e.StartTime) && time.Now().UTC().Before(e.EndTime) { + cl.Log("ticket received from cache for %s", spn) + return e.Ticket, e.SessionKey, true + } else if time.Now().UTC().Before(e.RenewTill) { + e, err := cl.renewTicket(e) + if err != nil { + return e.Ticket, e.SessionKey, false + } + return e.Ticket, e.SessionKey, true + } + } + var tkt messages.Ticket + var key types.EncryptionKey + return tkt, key, false +} + +// renewTicket renews a cache entry ticket. +// To renew from outside the client package use GetCachedTicket +func (cl *Client) renewTicket(e CacheEntry) (CacheEntry, error) { + spn := e.Ticket.SName + _, _, err := cl.TGSREQGenerateAndExchange(spn, e.Ticket.Realm, e.Ticket, e.SessionKey, true) + if err != nil { + return e, err + } + e, ok := cl.cache.getEntry(e.Ticket.SName.PrincipalNameString()) + if !ok { + return e, errors.New("ticket was not added to cache") + } + cl.Log("ticket renewed for %s (EndTime: %v)", spn.PrincipalNameString(), e.EndTime) + return e, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/client.go b/vendor/github.com/jcmturner/gokrb5/v8/client/client.go new file mode 100644 index 00000000000..074e3f1245f --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/client.go @@ -0,0 +1,329 @@ +// Package client provides a client library and methods for Kerberos 5 authentication. +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// Client side configuration and state. +type Client struct { + Credentials *credentials.Credentials + Config *config.Config + settings *Settings + sessions *sessions + cache *Cache +} + +// NewWithPassword creates a new client from a password credential. +// Set the realm to empty string to use the default realm from config. +func NewWithPassword(username, realm, password string, krb5conf *config.Config, settings ...func(*Settings)) *Client { + creds := credentials.New(username, realm) + return &Client{ + Credentials: creds.WithPassword(password), + Config: krb5conf, + settings: NewSettings(settings...), + sessions: &sessions{ + Entries: make(map[string]*session), + }, + cache: NewCache(), + } +} + +// NewWithKeytab creates a new client from a keytab credential. +func NewWithKeytab(username, realm string, kt *keytab.Keytab, krb5conf *config.Config, settings ...func(*Settings)) *Client { + creds := credentials.New(username, realm) + return &Client{ + Credentials: creds.WithKeytab(kt), + Config: krb5conf, + settings: NewSettings(settings...), + sessions: &sessions{ + Entries: make(map[string]*session), + }, + cache: NewCache(), + } +} + +// NewFromCCache create a client from a populated client cache. +// +// WARNING: A client created from CCache does not automatically renew TGTs and a failure will occur after the TGT expires. +func NewFromCCache(c *credentials.CCache, krb5conf *config.Config, settings ...func(*Settings)) (*Client, error) { + cl := &Client{ + Credentials: c.GetClientCredentials(), + Config: krb5conf, + settings: NewSettings(settings...), + sessions: &sessions{ + Entries: make(map[string]*session), + }, + cache: NewCache(), + } + spn := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", c.DefaultPrincipal.Realm}, + } + cred, ok := c.GetEntry(spn) + if !ok { + return cl, errors.New("TGT not found in CCache") + } + var tgt messages.Ticket + err := tgt.Unmarshal(cred.Ticket) + if err != nil { + return cl, fmt.Errorf("TGT bytes in cache are not valid: %v", err) + } + cl.sessions.Entries[c.DefaultPrincipal.Realm] = &session{ + realm: c.DefaultPrincipal.Realm, + authTime: cred.AuthTime, + endTime: cred.EndTime, + renewTill: cred.RenewTill, + tgt: tgt, + sessionKey: cred.Key, + } + for _, cred := range c.GetEntries() { + var tkt messages.Ticket + err = tkt.Unmarshal(cred.Ticket) + if err != nil { + return cl, fmt.Errorf("cache entry ticket bytes are not valid: %v", err) + } + cl.cache.addEntry( + tkt, + cred.AuthTime, + cred.StartTime, + cred.EndTime, + cred.RenewTill, + cred.Key, + ) + } + return cl, nil +} + +// Key returns the client's encryption key for the specified encryption type and its kvno (kvno of zero will find latest). +// The key can be retrieved either from the keytab or generated from the client's password. +// If the client has both a keytab and a password defined the keytab is favoured as the source for the key +// A KRBError can be passed in the event the KDC returns one of type KDC_ERR_PREAUTH_REQUIRED and is required to derive +// the key for pre-authentication from the client's password. If a KRBError is not available, pass nil to this argument. +func (cl *Client) Key(etype etype.EType, kvno int, krberr *messages.KRBError) (types.EncryptionKey, int, error) { + if cl.Credentials.HasKeytab() && etype != nil { + return cl.Credentials.Keytab().GetEncryptionKey(cl.Credentials.CName(), cl.Credentials.Domain(), kvno, etype.GetETypeID()) + } else if cl.Credentials.HasPassword() { + if krberr != nil && krberr.ErrorCode == errorcode.KDC_ERR_PREAUTH_REQUIRED { + var pas types.PADataSequence + err := pas.Unmarshal(krberr.EData) + if err != nil { + return types.EncryptionKey{}, 0, fmt.Errorf("could not get PAData from KRBError to generate key from password: %v", err) + } + key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), krberr.CName, krberr.CRealm, etype.GetETypeID(), pas) + return key, 0, err + } + key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), cl.Credentials.CName(), cl.Credentials.Domain(), etype.GetETypeID(), types.PADataSequence{}) + return key, 0, err + } + return types.EncryptionKey{}, 0, errors.New("credential has neither keytab or password to generate key") +} + +// IsConfigured indicates if the client has the values required set. +func (cl *Client) IsConfigured() (bool, error) { + if cl.Credentials.UserName() == "" { + return false, errors.New("client does not have a username") + } + if cl.Credentials.Domain() == "" { + return false, errors.New("client does not have a define realm") + } + // Client needs to have either a password, keytab or a session already (later when loading from CCache) + if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() { + authTime, _, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || authTime.IsZero() { + return false, errors.New("client has neither a keytab nor a password set and no session") + } + } + if !cl.Config.LibDefaults.DNSLookupKDC { + for _, r := range cl.Config.Realms { + if r.Realm == cl.Credentials.Domain() { + if len(r.KDC) > 0 { + return true, nil + } + return false, errors.New("client krb5 config does not have any defined KDCs for the default realm") + } + } + } + return true, nil +} + +// Login the client with the KDC via an AS exchange. +func (cl *Client) Login() error { + if ok, err := cl.IsConfigured(); !ok { + return err + } + if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() { + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "no user credentials available and error getting any existing session") + } + if time.Now().UTC().After(endTime) { + return krberror.New(krberror.KRBMsgError, "cannot login, no user credentials available and no valid existing session") + } + // no credentials but there is a session with tgt already + return nil + } + ASReq, err := messages.NewASReqForTGT(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName()) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AS_REQ") + } + ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0) + if err != nil { + return err + } + cl.addSession(ASRep.Ticket, ASRep.DecryptedEncPart) + return nil +} + +// AffirmLogin will only perform an AS exchange with the KDC if the client does not already have a TGT. +func (cl *Client) AffirmLogin() error { + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || time.Now().UTC().After(endTime) { + err := cl.Login() + if err != nil { + return fmt.Errorf("could not get valid TGT for client's realm: %v", err) + } + } + return nil +} + +// realmLogin obtains or renews a TGT and establishes a session for the realm specified. +func (cl *Client) realmLogin(realm string) error { + if realm == cl.Credentials.Domain() { + return cl.Login() + } + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || time.Now().UTC().After(endTime) { + err := cl.Login() + if err != nil { + return fmt.Errorf("could not get valid TGT for client's realm: %v", err) + } + } + tgt, skey, err := cl.sessionTGT(cl.Credentials.Domain()) + if err != nil { + return err + } + + spn := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", realm}, + } + + _, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, false) + if err != nil { + return err + } + cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart) + + return nil +} + +// Destroy stops the auto-renewal of all sessions and removes the sessions and cache entries from the client. +func (cl *Client) Destroy() { + creds := credentials.New("", "") + cl.sessions.destroy() + cl.cache.clear() + cl.Credentials = creds + cl.Log("client destroyed") +} + +// Diagnostics runs a set of checks that the client is properly configured and writes details to the io.Writer provided. +func (cl *Client) Diagnostics(w io.Writer) error { + cl.Print(w) + var errs []string + if cl.Credentials.HasKeytab() { + var loginRealmEncTypes []int32 + for _, e := range cl.Credentials.Keytab().Entries { + if e.Principal.Realm == cl.Credentials.Realm() { + loginRealmEncTypes = append(loginRealmEncTypes, e.Key.KeyType) + } + } + for _, et := range cl.Config.LibDefaults.DefaultTktEnctypeIDs { + var etInKt bool + for _, val := range loginRealmEncTypes { + if val == et { + etInKt = true + break + } + } + if !etInKt { + errs = append(errs, fmt.Sprintf("default_tkt_enctypes specifies %d but this enctype is not available in the client's keytab", et)) + } + } + for _, et := range cl.Config.LibDefaults.PreferredPreauthTypes { + var etInKt bool + for _, val := range loginRealmEncTypes { + if int(val) == et { + etInKt = true + break + } + } + if !etInKt { + errs = append(errs, fmt.Sprintf("preferred_preauth_types specifies %d but this enctype is not available in the client's keytab", et)) + } + } + } + udpCnt, udpKDC, err := cl.Config.GetKDCs(cl.Credentials.Realm(), false) + if err != nil { + errs = append(errs, fmt.Sprintf("error when resolving KDCs for UDP communication: %v", err)) + } + if udpCnt < 1 { + errs = append(errs, "no KDCs resolved for communication via UDP.") + } else { + b, _ := json.MarshalIndent(&udpKDC, "", " ") + fmt.Fprintf(w, "UDP KDCs: %s\n", string(b)) + } + tcpCnt, tcpKDC, err := cl.Config.GetKDCs(cl.Credentials.Realm(), false) + if err != nil { + errs = append(errs, fmt.Sprintf("error when resolving KDCs for TCP communication: %v", err)) + } + if tcpCnt < 1 { + errs = append(errs, "no KDCs resolved for communication via TCP.") + } else { + b, _ := json.MarshalIndent(&tcpKDC, "", " ") + fmt.Fprintf(w, "TCP KDCs: %s\n", string(b)) + } + + if errs == nil || len(errs) < 1 { + return nil + } + err = fmt.Errorf(strings.Join(errs, "\n")) + return err +} + +// Print writes the details of the client to the io.Writer provided. +func (cl *Client) Print(w io.Writer) { + c, _ := cl.Credentials.JSON() + fmt.Fprintf(w, "Credentials:\n%s\n", c) + + s, _ := cl.sessions.JSON() + fmt.Fprintf(w, "TGT Sessions:\n%s\n", s) + + c, _ = cl.cache.JSON() + fmt.Fprintf(w, "Service ticket cache:\n%s\n", c) + + s, _ = cl.settings.JSON() + fmt.Fprintf(w, "Settings:\n%s\n", s) + + j, _ := cl.Config.JSON() + fmt.Fprintf(w, "Krb5 config:\n%s\n", j) + + k, _ := cl.Credentials.Keytab().JSON() + fmt.Fprintf(w, "Keytab:\n%s\n", k) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go new file mode 100644 index 00000000000..634f015c214 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go @@ -0,0 +1,218 @@ +package client + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "strings" + "time" + + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/messages" +) + +// SendToKDC performs network actions to send data to the KDC. +func (cl *Client) sendToKDC(b []byte, realm string) ([]byte, error) { + var rb []byte + if cl.Config.LibDefaults.UDPPreferenceLimit == 1 { + //1 means we should always use TCP + rb, errtcp := cl.sendKDCTCP(realm, b) + if errtcp != nil { + if e, ok := errtcp.(messages.KRBError); ok { + return rb, e + } + return rb, fmt.Errorf("communication error with KDC via TCP: %v", errtcp) + } + return rb, nil + } + if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit { + //Try UDP first, TCP second + rb, errudp := cl.sendKDCUDP(realm, b) + if errudp != nil { + if e, ok := errudp.(messages.KRBError); ok && e.ErrorCode != errorcode.KRB_ERR_RESPONSE_TOO_BIG { + // Got a KRBError from KDC + // If this is not a KRB_ERR_RESPONSE_TOO_BIG we will return immediately otherwise will try TCP. + return rb, e + } + // Try TCP + r, errtcp := cl.sendKDCTCP(realm, b) + if errtcp != nil { + if e, ok := errtcp.(messages.KRBError); ok { + // Got a KRBError + return r, e + } + return r, fmt.Errorf("failed to communicate with KDC. Attempts made with UDP (%v) and then TCP (%v)", errudp, errtcp) + } + rb = r + } + return rb, nil + } + //Try TCP first, UDP second + rb, errtcp := cl.sendKDCTCP(realm, b) + if errtcp != nil { + if e, ok := errtcp.(messages.KRBError); ok { + // Got a KRBError from KDC so returning and not trying UDP. + return rb, e + } + rb, errudp := cl.sendKDCUDP(realm, b) + if errudp != nil { + if e, ok := errudp.(messages.KRBError); ok { + // Got a KRBError + return rb, e + } + return rb, fmt.Errorf("failed to communicate with KDC. Attempts made with TCP (%v) and then UDP (%v)", errtcp, errudp) + } + } + return rb, nil +} + +// sendKDCUDP sends bytes to the KDC via UDP. +func (cl *Client) sendKDCUDP(realm string, b []byte) ([]byte, error) { + var r []byte + _, kdcs, err := cl.Config.GetKDCs(realm, false) + if err != nil { + return r, err + } + r, err = dialSendUDP(kdcs, b) + if err != nil { + return r, err + } + return checkForKRBError(r) +} + +// dialSendUDP establishes a UDP connection to a KDC. +func dialSendUDP(kdcs map[int]string, b []byte) ([]byte, error) { + var errs []string + for i := 1; i <= len(kdcs); i++ { + udpAddr, err := net.ResolveUDPAddr("udp", kdcs[i]) + if err != nil { + errs = append(errs, fmt.Sprintf("error resolving KDC address: %v", err)) + continue + } + + conn, err := net.DialTimeout("udp", udpAddr.String(), 5*time.Second) + if err != nil { + errs = append(errs, fmt.Sprintf("error setting dial timeout on connection to %s: %v", kdcs[i], err)) + continue + } + if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { + errs = append(errs, fmt.Sprintf("error setting deadline on connection to %s: %v", kdcs[i], err)) + continue + } + // conn is guaranteed to be a UDPConn + rb, err := sendUDP(conn.(*net.UDPConn), b) + if err != nil { + errs = append(errs, fmt.Sprintf("error sneding to %s: %v", kdcs[i], err)) + continue + } + return rb, nil + } + return nil, fmt.Errorf("error sending to a KDC: %s", strings.Join(errs, "; ")) +} + +// sendUDP sends bytes to connection over UDP. +func sendUDP(conn *net.UDPConn, b []byte) ([]byte, error) { + var r []byte + defer conn.Close() + _, err := conn.Write(b) + if err != nil { + return r, fmt.Errorf("error sending to (%s): %v", conn.RemoteAddr().String(), err) + } + udpbuf := make([]byte, 4096) + n, _, err := conn.ReadFrom(udpbuf) + r = udpbuf[:n] + if err != nil { + return r, fmt.Errorf("sending over UDP failed to %s: %v", conn.RemoteAddr().String(), err) + } + if len(r) < 1 { + return r, fmt.Errorf("no response data from %s", conn.RemoteAddr().String()) + } + return r, nil +} + +// sendKDCTCP sends bytes to the KDC via TCP. +func (cl *Client) sendKDCTCP(realm string, b []byte) ([]byte, error) { + var r []byte + _, kdcs, err := cl.Config.GetKDCs(realm, true) + if err != nil { + return r, err + } + r, err = dialSendTCP(kdcs, b) + if err != nil { + return r, err + } + return checkForKRBError(r) +} + +// dialKDCTCP establishes a TCP connection to a KDC. +func dialSendTCP(kdcs map[int]string, b []byte) ([]byte, error) { + var errs []string + for i := 1; i <= len(kdcs); i++ { + tcpAddr, err := net.ResolveTCPAddr("tcp", kdcs[i]) + if err != nil { + errs = append(errs, fmt.Sprintf("error resolving KDC address: %v", err)) + continue + } + + conn, err := net.DialTimeout("tcp", tcpAddr.String(), 5*time.Second) + if err != nil { + errs = append(errs, fmt.Sprintf("error setting dial timeout on connection to %s: %v", kdcs[i], err)) + continue + } + if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { + errs = append(errs, fmt.Sprintf("error setting deadline on connection to %s: %v", kdcs[i], err)) + continue + } + // conn is guaranteed to be a TCPConn + rb, err := sendTCP(conn.(*net.TCPConn), b) + if err != nil { + errs = append(errs, fmt.Sprintf("error sneding to %s: %v", kdcs[i], err)) + continue + } + return rb, nil + } + return nil, errors.New("error in getting a TCP connection to any of the KDCs") +} + +// sendTCP sends bytes to connection over TCP. +func sendTCP(conn *net.TCPConn, b []byte) ([]byte, error) { + defer conn.Close() + var r []byte + // RFC 4120 7.2.2 specifies the first 4 bytes indicate the length of the message in big endian order. + hb := make([]byte, 4, 4) + binary.BigEndian.PutUint32(hb, uint32(len(b))) + b = append(hb, b...) + + _, err := conn.Write(b) + if err != nil { + return r, fmt.Errorf("error sending to KDC (%s): %v", conn.RemoteAddr().String(), err) + } + + sh := make([]byte, 4, 4) + _, err = conn.Read(sh) + if err != nil { + return r, fmt.Errorf("error reading response size header: %v", err) + } + s := binary.BigEndian.Uint32(sh) + + rb := make([]byte, s, s) + _, err = io.ReadFull(conn, rb) + if err != nil { + return r, fmt.Errorf("error reading response: %v", err) + } + if len(rb) < 1 { + return r, fmt.Errorf("no response data from KDC %s", conn.RemoteAddr().String()) + } + return rb, nil +} + +// checkForKRBError checks if the response bytes from the KDC are a KRBError. +func checkForKRBError(b []byte) ([]byte, error) { + var KRBErr messages.KRBError + if err := KRBErr.Unmarshal(b); err == nil { + return b, KRBErr + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go b/vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go new file mode 100644 index 00000000000..fe20559ca12 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go @@ -0,0 +1,75 @@ +package client + +import ( + "fmt" + + "github.com/jcmturner/gokrb5/v8/kadmin" + "github.com/jcmturner/gokrb5/v8/messages" +) + +// Kpasswd server response codes. +const ( + KRB5_KPASSWD_SUCCESS = 0 + KRB5_KPASSWD_MALFORMED = 1 + KRB5_KPASSWD_HARDERROR = 2 + KRB5_KPASSWD_AUTHERROR = 3 + KRB5_KPASSWD_SOFTERROR = 4 + KRB5_KPASSWD_ACCESSDENIED = 5 + KRB5_KPASSWD_BAD_VERSION = 6 + KRB5_KPASSWD_INITIAL_FLAG_NEEDED = 7 +) + +// ChangePasswd changes the password of the client to the value provided. +func (cl *Client) ChangePasswd(newPasswd string) (bool, error) { + ASReq, err := messages.NewASReqForChgPasswd(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName()) + if err != nil { + return false, err + } + ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0) + if err != nil { + return false, err + } + + msg, key, err := kadmin.ChangePasswdMsg(cl.Credentials.CName(), cl.Credentials.Domain(), newPasswd, ASRep.Ticket, ASRep.DecryptedEncPart.Key) + if err != nil { + return false, err + } + r, err := cl.sendToKPasswd(msg) + if err != nil { + return false, err + } + err = r.Decrypt(key) + if err != nil { + return false, err + } + if r.ResultCode != KRB5_KPASSWD_SUCCESS { + return false, fmt.Errorf("error response from kadmin: code: %d; result: %s; krberror: %v", r.ResultCode, r.Result, r.KRBError) + } + cl.Credentials.WithPassword(newPasswd) + return true, nil +} + +func (cl *Client) sendToKPasswd(msg kadmin.Request) (r kadmin.Reply, err error) { + _, kps, err := cl.Config.GetKpasswdServers(cl.Credentials.Domain(), true) + if err != nil { + return + } + b, err := msg.Marshal() + if err != nil { + return + } + var rb []byte + if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit { + rb, err = dialSendUDP(kps, b) + if err != nil { + return + } + } else { + rb, err = dialSendTCP(kps, b) + if err != nil { + return + } + } + err = r.Unmarshal(rb) + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go new file mode 100644 index 00000000000..f7654d0d07e --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go @@ -0,0 +1,295 @@ +package client + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// sessions hold TGTs and are keyed on the realm name +type sessions struct { + Entries map[string]*session + mux sync.RWMutex +} + +// destroy erases all sessions +func (s *sessions) destroy() { + s.mux.Lock() + defer s.mux.Unlock() + for k, e := range s.Entries { + e.destroy() + delete(s.Entries, k) + } +} + +// update replaces a session with the one provided or adds it as a new one +func (s *sessions) update(sess *session) { + s.mux.Lock() + defer s.mux.Unlock() + // if a session already exists for this, cancel its auto renew. + if i, ok := s.Entries[sess.realm]; ok { + if i != sess { + // Session in the sessions cache is not the same as one provided. + // Cancel the one in the cache and add this one. + i.mux.Lock() + defer i.mux.Unlock() + i.cancel <- true + s.Entries[sess.realm] = sess + return + } + } + // No session for this realm was found so just add it + s.Entries[sess.realm] = sess +} + +// get returns the session for the realm specified +func (s *sessions) get(realm string) (*session, bool) { + s.mux.RLock() + defer s.mux.RUnlock() + sess, ok := s.Entries[realm] + return sess, ok +} + +// session holds the TGT details for a realm +type session struct { + realm string + authTime time.Time + endTime time.Time + renewTill time.Time + tgt messages.Ticket + sessionKey types.EncryptionKey + sessionKeyExpiration time.Time + cancel chan bool + mux sync.RWMutex +} + +// jsonSession is used to enable marshaling some information of a session in a JSON format +type jsonSession struct { + Realm string + AuthTime time.Time + EndTime time.Time + RenewTill time.Time + SessionKeyExpiration time.Time +} + +// AddSession adds a session for a realm with a TGT to the client's session cache. +// A goroutine is started to automatically renew the TGT before expiry. +func (cl *Client) addSession(tgt messages.Ticket, dep messages.EncKDCRepPart) { + if strings.ToLower(tgt.SName.NameString[0]) != "krbtgt" { + // Not a TGT + return + } + realm := tgt.SName.NameString[len(tgt.SName.NameString)-1] + s := &session{ + realm: realm, + authTime: dep.AuthTime, + endTime: dep.EndTime, + renewTill: dep.RenewTill, + tgt: tgt, + sessionKey: dep.Key, + sessionKeyExpiration: dep.KeyExpiration, + } + cl.sessions.update(s) + cl.enableAutoSessionRenewal(s) + cl.Log("TGT session added for %s (EndTime: %v)", realm, dep.EndTime) +} + +// update overwrites the session details with those from the TGT and decrypted encPart +func (s *session) update(tgt messages.Ticket, dep messages.EncKDCRepPart) { + s.mux.Lock() + defer s.mux.Unlock() + s.authTime = dep.AuthTime + s.endTime = dep.EndTime + s.renewTill = dep.RenewTill + s.tgt = tgt + s.sessionKey = dep.Key + s.sessionKeyExpiration = dep.KeyExpiration +} + +// destroy will cancel any auto renewal of the session and set the expiration times to the current time +func (s *session) destroy() { + s.mux.Lock() + defer s.mux.Unlock() + if s.cancel != nil { + s.cancel <- true + } + s.endTime = time.Now().UTC() + s.renewTill = s.endTime + s.sessionKeyExpiration = s.endTime +} + +// valid informs if the TGT is still within the valid time window +func (s *session) valid() bool { + s.mux.RLock() + defer s.mux.RUnlock() + t := time.Now().UTC() + if t.Before(s.endTime) && s.authTime.Before(t) { + return true + } + return false +} + +// tgtDetails is a thread safe way to get the session's realm, TGT and session key values +func (s *session) tgtDetails() (string, messages.Ticket, types.EncryptionKey) { + s.mux.RLock() + defer s.mux.RUnlock() + return s.realm, s.tgt, s.sessionKey +} + +// timeDetails is a thread safe way to get the session's validity time values +func (s *session) timeDetails() (string, time.Time, time.Time, time.Time, time.Time) { + s.mux.RLock() + defer s.mux.RUnlock() + return s.realm, s.authTime, s.endTime, s.renewTill, s.sessionKeyExpiration +} + +// JSON return information about the held sessions in a JSON format. +func (s *sessions) JSON() (string, error) { + s.mux.RLock() + defer s.mux.RUnlock() + var js []jsonSession + keys := make([]string, 0, len(s.Entries)) + for k := range s.Entries { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + r, at, et, rt, kt := s.Entries[k].timeDetails() + j := jsonSession{ + Realm: r, + AuthTime: at, + EndTime: et, + RenewTill: rt, + SessionKeyExpiration: kt, + } + js = append(js, j) + } + b, err := json.MarshalIndent(js, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} + +// enableAutoSessionRenewal turns on the automatic renewal for the client's TGT session. +func (cl *Client) enableAutoSessionRenewal(s *session) { + var timer *time.Timer + s.mux.Lock() + s.cancel = make(chan bool, 1) + s.mux.Unlock() + go func(s *session) { + for { + s.mux.RLock() + w := (s.endTime.Sub(time.Now().UTC()) * 5) / 6 + s.mux.RUnlock() + if w < 0 { + return + } + timer = time.NewTimer(w) + select { + case <-timer.C: + renewal, err := cl.refreshSession(s) + if err != nil { + cl.Log("error refreshing session: %v", err) + } + if !renewal && err == nil { + // end this goroutine as there will have been a new login and new auto renewal goroutine created. + return + } + case <-s.cancel: + // cancel has been called. Stop the timer and exit. + timer.Stop() + return + } + } + }(s) +} + +// renewTGT renews the client's TGT session. +func (cl *Client) renewTGT(s *session) error { + realm, tgt, skey := s.tgtDetails() + spn := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", realm}, + } + _, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, true) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error renewing TGT for %s", realm) + } + s.update(tgsRep.Ticket, tgsRep.DecryptedEncPart) + cl.sessions.update(s) + cl.Log("TGT session renewed for %s (EndTime: %v)", realm, tgsRep.DecryptedEncPart.EndTime) + return nil +} + +// refreshSession updates either through renewal or creating a new login. +// The boolean indicates if the update was a renewal. +func (cl *Client) refreshSession(s *session) (bool, error) { + s.mux.RLock() + realm := s.realm + renewTill := s.renewTill + s.mux.RUnlock() + cl.Log("refreshing TGT session for %s", realm) + if time.Now().UTC().Before(renewTill) { + err := cl.renewTGT(s) + return true, err + } + err := cl.realmLogin(realm) + return false, err +} + +// ensureValidSession makes sure there is a valid session for the realm +func (cl *Client) ensureValidSession(realm string) error { + s, ok := cl.sessions.get(realm) + if ok { + s.mux.RLock() + d := s.endTime.Sub(s.authTime) / 6 + if s.endTime.Sub(time.Now().UTC()) > d { + s.mux.RUnlock() + return nil + } + s.mux.RUnlock() + _, err := cl.refreshSession(s) + return err + } + return cl.realmLogin(realm) +} + +// sessionTGTDetails is a thread safe way to get the TGT and session key values for a realm +func (cl *Client) sessionTGT(realm string) (tgt messages.Ticket, sessionKey types.EncryptionKey, err error) { + err = cl.ensureValidSession(realm) + if err != nil { + return + } + s, ok := cl.sessions.get(realm) + if !ok { + err = fmt.Errorf("could not find TGT session for %s", realm) + return + } + _, tgt, sessionKey = s.tgtDetails() + return +} + +// sessionTimes provides the timing information with regards to a session for the realm specified. +func (cl *Client) sessionTimes(realm string) (authTime, endTime, renewTime, sessionExp time.Time, err error) { + s, ok := cl.sessions.get(realm) + if !ok { + err = fmt.Errorf("could not find TGT session for %s", realm) + return + } + _, authTime, endTime, renewTime, sessionExp = s.timeDetails() + return +} + +// spnRealm resolves the realm name of a service principal name +func (cl *Client) spnRealm(spn types.PrincipalName) string { + return cl.Config.ResolveRealm(spn.NameString[len(spn.NameString)-1]) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/settings.go b/vendor/github.com/jcmturner/gokrb5/v8/client/settings.go new file mode 100644 index 00000000000..bcd3945431e --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/settings.go @@ -0,0 +1,93 @@ +package client + +import ( + "encoding/json" + "fmt" + "log" +) + +// Settings holds optional client settings. +type Settings struct { + disablePAFXFast bool + assumePreAuthentication bool + preAuthEType int32 + logger *log.Logger +} + +// jsonSettings is used when marshaling the Settings details to JSON format. +type jsonSettings struct { + DisablePAFXFast bool + AssumePreAuthentication bool +} + +// NewSettings creates a new client settings struct. +func NewSettings(settings ...func(*Settings)) *Settings { + s := new(Settings) + for _, set := range settings { + set(s) + } + return s +} + +// DisablePAFXFAST used to configure the client to not use PA_FX_FAST. +// +// s := NewSettings(DisablePAFXFAST(true)) +func DisablePAFXFAST(b bool) func(*Settings) { + return func(s *Settings) { + s.disablePAFXFast = b + } +} + +// DisablePAFXFAST indicates is the client should disable the use of PA_FX_FAST. +func (s *Settings) DisablePAFXFAST() bool { + return s.disablePAFXFast +} + +// AssumePreAuthentication used to configure the client to assume pre-authentication is required. +// +// s := NewSettings(AssumePreAuthentication(true)) +func AssumePreAuthentication(b bool) func(*Settings) { + return func(s *Settings) { + s.assumePreAuthentication = b + } +} + +// AssumePreAuthentication indicates if the client should proactively assume using pre-authentication. +func (s *Settings) AssumePreAuthentication() bool { + return s.assumePreAuthentication +} + +// Logger used to configure client with a logger. +// +// s := NewSettings(kt, Logger(l)) +func Logger(l *log.Logger) func(*Settings) { + return func(s *Settings) { + s.logger = l + } +} + +// Logger returns the client logger instance. +func (s *Settings) Logger() *log.Logger { + return s.logger +} + +// Log will write to the service's logger if it is configured. +func (cl *Client) Log(format string, v ...interface{}) { + if cl.settings.Logger() != nil { + cl.settings.Logger().Output(2, fmt.Sprintf(format, v...)) + } +} + +// JSON returns a JSON representation of the settings. +func (s *Settings) JSON() (string, error) { + js := jsonSettings{ + DisablePAFXFast: s.disablePAFXFast, + AssumePreAuthentication: s.assumePreAuthentication, + } + b, err := json.MarshalIndent(js, "", " ") + if err != nil { + return "", err + } + return string(b), nil + +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/error.go b/vendor/github.com/jcmturner/gokrb5/v8/config/error.go new file mode 100644 index 00000000000..1fbda51f387 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/config/error.go @@ -0,0 +1,30 @@ +package config + +import "fmt" + +// UnsupportedDirective error. +type UnsupportedDirective struct { + text string +} + +// Error implements the error interface for unsupported directives. +func (e UnsupportedDirective) Error() string { + return e.text +} + +// Invalid config error. +type Invalid struct { + text string +} + +// Error implements the error interface for invalid config error. +func (e Invalid) Error() string { + return e.text +} + +// InvalidErrorf creates a new Invalid error. +func InvalidErrorf(format string, a ...interface{}) Invalid { + return Invalid{ + text: fmt.Sprintf("invalid krb5 config "+format, a...), + } +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go b/vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go new file mode 100644 index 00000000000..3f22c70c41c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go @@ -0,0 +1,141 @@ +package config + +import ( + "fmt" + "math/rand" + "net" + "strconv" + "strings" + + "github.com/jcmturner/dnsutils/v2" +) + +// GetKDCs returns the count of KDCs available and a map of KDC host names keyed on preference order. +func (c *Config) GetKDCs(realm string, tcp bool) (int, map[int]string, error) { + if realm == "" { + realm = c.LibDefaults.DefaultRealm + } + kdcs := make(map[int]string) + var count int + + // Get the KDCs from the krb5.conf. + var ks []string + for _, r := range c.Realms { + if r.Realm != realm { + continue + } + ks = r.KDC + } + count = len(ks) + + if count > 0 { + // Order the kdcs randomly for preference. + kdcs = randServOrder(ks) + return count, kdcs, nil + } + + if !c.LibDefaults.DNSLookupKDC { + return count, kdcs, fmt.Errorf("no KDCs defined in configuration for realm %s", realm) + } + + // Use DNS to resolve kerberos SRV records. + proto := "udp" + if tcp { + proto = "tcp" + } + index, addrs, err := dnsutils.OrderedSRV("kerberos", proto, realm) + if err != nil { + return count, kdcs, err + } + if len(addrs) < 1 { + return count, kdcs, fmt.Errorf("no KDC SRV records found for realm %s", realm) + } + count = index + for k, v := range addrs { + kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) + } + return count, kdcs, nil +} + +// GetKpasswdServers returns the count of kpasswd servers available and a map of kpasswd host names keyed on preference order. +// https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html#realms - see kpasswd_server section +func (c *Config) GetKpasswdServers(realm string, tcp bool) (int, map[int]string, error) { + kdcs := make(map[int]string) + var count int + + // Use DNS to resolve kerberos SRV records if configured to do so in krb5.conf. + if c.LibDefaults.DNSLookupKDC { + proto := "udp" + if tcp { + proto = "tcp" + } + c, addrs, err := dnsutils.OrderedSRV("kpasswd", proto, realm) + if err != nil { + return count, kdcs, err + } + if c < 1 { + c, addrs, err = dnsutils.OrderedSRV("kerberos-adm", proto, realm) + if err != nil { + return count, kdcs, err + } + } + if len(addrs) < 1 { + return count, kdcs, fmt.Errorf("no kpasswd or kadmin SRV records found for realm %s", realm) + } + count = c + for k, v := range addrs { + kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) + } + } else { + // Get the KDCs from the krb5.conf an order them randomly for preference. + var ks []string + var ka []string + for _, r := range c.Realms { + if r.Realm == realm { + ks = r.KPasswdServer + ka = r.AdminServer + break + } + } + if len(ks) < 1 { + for _, k := range ka { + h, _, err := net.SplitHostPort(k) + if err != nil { + continue + } + ks = append(ks, h+":464") + } + } + count = len(ks) + if count < 1 { + return count, kdcs, fmt.Errorf("no kpasswd or kadmin defined in configuration for realm %s", realm) + } + kdcs = randServOrder(ks) + } + return count, kdcs, nil +} + +func randServOrder(ks []string) map[int]string { + kdcs := make(map[int]string) + count := len(ks) + i := 1 + if count > 1 { + l := len(ks) + for l > 0 { + ri := rand.Intn(l) + kdcs[i] = ks[ri] + if l > 1 { + // Remove the entry from the source slice by swapping with the last entry and truncating + ks[len(ks)-1], ks[ri] = ks[ri], ks[len(ks)-1] + ks = ks[:len(ks)-1] + l = len(ks) + } else { + l = 0 + } + i++ + } + } else { + kdcs[i] = ks[0] + } + return kdcs +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go new file mode 100644 index 00000000000..a7638433d5f --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go @@ -0,0 +1,728 @@ +// Package config implements KRB5 client and service configuration as described at https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html +package config + +import ( + "bufio" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "regexp" + "strconv" + "strings" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// Config represents the KRB5 configuration. +type Config struct { + LibDefaults LibDefaults + Realms []Realm + DomainRealm DomainRealm + //CaPaths + //AppDefaults + //Plugins +} + +// WeakETypeList is a list of encryption types that have been deemed weak. +const WeakETypeList = "des-cbc-crc des-cbc-md4 des-cbc-md5 des-cbc-raw des3-cbc-raw des-hmac-sha1 arcfour-hmac-exp rc4-hmac-exp arcfour-hmac-md5-exp des" + +// New creates a new config struct instance. +func New() *Config { + d := make(DomainRealm) + return &Config{ + LibDefaults: newLibDefaults(), + DomainRealm: d, + } +} + +// LibDefaults represents the [libdefaults] section of the configuration. +type LibDefaults struct { + AllowWeakCrypto bool //default false + // ap_req_checksum_type int //unlikely to support this + Canonicalize bool //default false + CCacheType int //default is 4. unlikely to implement older + Clockskew time.Duration //max allowed skew in seconds, default 300 + //Default_ccache_name string // default /tmp/krb5cc_%{uid} //Not implementing as will hold in memory + DefaultClientKeytabName string //default /usr/local/var/krb5/user/%{euid}/client.keytab + DefaultKeytabName string //default /etc/krb5.keytab + DefaultRealm string + DefaultTGSEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DefaultTktEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DefaultTGSEnctypeIDs []int32 //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DefaultTktEnctypeIDs []int32 //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DNSCanonicalizeHostname bool //default true + DNSLookupKDC bool //default false + DNSLookupRealm bool + ExtraAddresses []net.IP //Not implementing yet + Forwardable bool //default false + IgnoreAcceptorHostname bool //default false + K5LoginAuthoritative bool //default false + K5LoginDirectory string //default user's home directory. Must be owned by the user or root + KDCDefaultOptions asn1.BitString //default 0x00000010 (KDC_OPT_RENEWABLE_OK) + KDCTimeSync int //default 1 + //kdc_req_checksum_type int //unlikely to implement as for very old KDCs + NoAddresses bool //default true + PermittedEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + PermittedEnctypeIDs []int32 + //plugin_base_dir string //not supporting plugins + PreferredPreauthTypes []int //default “17, 16, 15, 14”, which forces libkrb5 to attempt to use PKINIT if it is supported + Proxiable bool //default false + RDNS bool //default true + RealmTryDomains int //default -1 + RenewLifetime time.Duration //default 0 + SafeChecksumType int //default 8 + TicketLifetime time.Duration //default 1 day + UDPPreferenceLimit int // 1 means to always use tcp. MIT krb5 has a default value of 1465, and it prevents user setting more than 32700. + VerifyAPReqNofail bool //default false +} + +// Create a new LibDefaults struct. +func newLibDefaults() LibDefaults { + uid := "0" + var hdir string + usr, _ := user.Current() + if usr != nil { + uid = usr.Uid + hdir = usr.HomeDir + } + opts := asn1.BitString{} + opts.Bytes, _ = hex.DecodeString("00000010") + opts.BitLength = len(opts.Bytes) * 8 + return LibDefaults{ + CCacheType: 4, + Clockskew: time.Duration(300) * time.Second, + DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid), + DefaultKeytabName: "/etc/krb5.keytab", + DefaultTGSEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, + DefaultTktEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, + DNSCanonicalizeHostname: true, + K5LoginDirectory: hdir, + KDCDefaultOptions: opts, + KDCTimeSync: 1, + NoAddresses: true, + PermittedEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, + RDNS: true, + RealmTryDomains: -1, + SafeChecksumType: 8, + TicketLifetime: time.Duration(24) * time.Hour, + UDPPreferenceLimit: 1465, + PreferredPreauthTypes: []int{17, 16, 15, 14}, + } +} + +// Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct. +func (l *LibDefaults) parseLines(lines []string) error { + for _, line := range lines { + //Remove comments after the values + if idx := strings.IndexAny(line, "#;"); idx != -1 { + line = line[:idx] + } + line = strings.TrimSpace(line) + if line == "" { + continue + } + if !strings.Contains(line, "=") { + return InvalidErrorf("libdefaults section line (%s)", line) + } + + p := strings.Split(line, "=") + key := strings.TrimSpace(strings.ToLower(p[0])) + switch key { + case "allow_weak_crypto": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.AllowWeakCrypto = v + case "canonicalize": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Canonicalize = v + case "ccache_type": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseUint(p[1], 10, 32) + if err != nil || v < 0 || v > 4 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.CCacheType = int(v) + case "clockskew": + d, err := parseDuration(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Clockskew = d + case "default_client_keytab_name": + l.DefaultClientKeytabName = strings.TrimSpace(p[1]) + case "default_keytab_name": + l.DefaultKeytabName = strings.TrimSpace(p[1]) + case "default_realm": + l.DefaultRealm = strings.TrimSpace(p[1]) + case "default_tgs_enctypes": + l.DefaultTGSEnctypes = strings.Fields(p[1]) + case "default_tkt_enctypes": + l.DefaultTktEnctypes = strings.Fields(p[1]) + case "dns_canonicalize_hostname": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.DNSCanonicalizeHostname = v + case "dns_lookup_kdc": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.DNSLookupKDC = v + case "dns_lookup_realm": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.DNSLookupRealm = v + case "extra_addresses": + ipStr := strings.TrimSpace(p[1]) + for _, ip := range strings.Split(ipStr, ",") { + if eip := net.ParseIP(ip); eip != nil { + l.ExtraAddresses = append(l.ExtraAddresses, eip) + } + } + case "forwardable": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Forwardable = v + case "ignore_acceptor_hostname": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.IgnoreAcceptorHostname = v + case "k5login_authoritative": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.K5LoginAuthoritative = v + case "k5login_directory": + l.K5LoginDirectory = strings.TrimSpace(p[1]) + case "kdc_default_options": + v := strings.TrimSpace(p[1]) + v = strings.Replace(v, "0x", "", -1) + b, err := hex.DecodeString(v) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.KDCDefaultOptions.Bytes = b + l.KDCDefaultOptions.BitLength = len(b) * 8 + case "kdc_timesync": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseInt(p[1], 10, 32) + if err != nil || v < 0 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.KDCTimeSync = int(v) + case "noaddresses": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.NoAddresses = v + case "permitted_enctypes": + l.PermittedEnctypes = strings.Fields(p[1]) + case "preferred_preauth_types": + p[1] = strings.TrimSpace(p[1]) + t := strings.Split(p[1], ",") + var v []int + for _, s := range t { + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + v = append(v, int(i)) + } + l.PreferredPreauthTypes = v + case "proxiable": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Proxiable = v + case "rdns": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.RDNS = v + case "realm_try_domains": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseInt(p[1], 10, 32) + if err != nil || v < -1 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.RealmTryDomains = int(v) + case "renew_lifetime": + d, err := parseDuration(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.RenewLifetime = d + case "safe_checksum_type": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseInt(p[1], 10, 32) + if err != nil || v < 0 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.SafeChecksumType = int(v) + case "ticket_lifetime": + d, err := parseDuration(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.TicketLifetime = d + case "udp_preference_limit": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseUint(p[1], 10, 32) + if err != nil || v > 32700 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.UDPPreferenceLimit = int(v) + case "verify_ap_req_nofail": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.VerifyAPReqNofail = v + } + } + l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto) + l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto) + l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto) + return nil +} + +// Realm represents an entry in the [realms] section of the configuration. +type Realm struct { + Realm string + AdminServer []string + //auth_to_local //Not implementing for now + //auth_to_local_names //Not implementing for now + DefaultDomain string + KDC []string + KPasswdServer []string //default admin_server:464 + MasterKDC []string +} + +// Parse the lines of a [realms] entry into the Realm struct. +func (r *Realm) parseLines(name string, lines []string) (err error) { + r.Realm = name + var adminServerFinal bool + var KDCFinal bool + var kpasswdServerFinal bool + var masterKDCFinal bool + var ignore bool + var c int // counts the depth of blocks within brackets { } + for _, line := range lines { + if ignore && c > 0 && !strings.Contains(line, "{") && !strings.Contains(line, "}") { + continue + } + //Remove comments after the values + if idx := strings.IndexAny(line, "#;"); idx != -1 { + line = line[:idx] + } + line = strings.TrimSpace(line) + if line == "" { + continue + } + if !strings.Contains(line, "=") && !strings.Contains(line, "}") { + return InvalidErrorf("realms section line (%s)", line) + } + if strings.Contains(line, "v4_") { + ignore = true + err = UnsupportedDirective{"v4 configurations are not supported"} + } + if strings.Contains(line, "{") { + c++ + if ignore { + continue + } + } + if strings.Contains(line, "}") { + c-- + if c < 0 { + return InvalidErrorf("unpaired curly brackets") + } + if ignore { + if c < 1 { + c = 0 + ignore = false + } + continue + } + } + + p := strings.Split(line, "=") + key := strings.TrimSpace(strings.ToLower(p[0])) + v := strings.TrimSpace(p[1]) + switch key { + case "admin_server": + appendUntilFinal(&r.AdminServer, v, &adminServerFinal) + case "default_domain": + r.DefaultDomain = v + case "kdc": + if !strings.Contains(v, ":") { + // No port number specified default to 88 + if strings.HasSuffix(v, `*`) { + v = strings.TrimSpace(strings.TrimSuffix(v, `*`)) + ":88*" + } else { + v = strings.TrimSpace(v) + ":88" + } + } + appendUntilFinal(&r.KDC, v, &KDCFinal) + case "kpasswd_server": + appendUntilFinal(&r.KPasswdServer, v, &kpasswdServerFinal) + case "master_kdc": + appendUntilFinal(&r.MasterKDC, v, &masterKDCFinal) + } + } + //default for Kpasswd_server = admin_server:464 + if len(r.KPasswdServer) < 1 { + for _, a := range r.AdminServer { + s := strings.Split(a, ":") + r.KPasswdServer = append(r.KPasswdServer, s[0]+":464") + } + } + return +} + +// Parse the lines of the [realms] section of the configuration into an slice of Realm structs. +func parseRealms(lines []string) (realms []Realm, err error) { + var name string + var start int + var c int + for i, l := range lines { + //Remove comments after the values + if idx := strings.IndexAny(l, "#;"); idx != -1 { + l = l[:idx] + } + l = strings.TrimSpace(l) + if l == "" { + continue + } + //if strings.Contains(l, "v4_") { + // return nil, errors.New("v4 configurations are not supported in Realms section") + //} + if strings.Contains(l, "{") { + c++ + if !strings.Contains(l, "=") { + return nil, fmt.Errorf("realm configuration line invalid: %s", l) + } + if c == 1 { + start = i + p := strings.Split(l, "=") + name = strings.TrimSpace(p[0]) + } + } + if strings.Contains(l, "}") { + if c < 1 { + // but not started a block!!! + return nil, errors.New("invalid Realms section in configuration") + } + c-- + if c == 0 { + var r Realm + e := r.parseLines(name, lines[start+1:i]) + if e != nil { + if _, ok := e.(UnsupportedDirective); !ok { + err = e + return + } + err = e + } + realms = append(realms, r) + } + } + } + return +} + +// DomainRealm maps the domains to realms representing the [domain_realm] section of the configuration. +type DomainRealm map[string]string + +// Parse the lines of the [domain_realm] section of the configuration and add to the mapping. +func (d *DomainRealm) parseLines(lines []string) error { + for _, line := range lines { + //Remove comments after the values + if idx := strings.IndexAny(line, "#;"); idx != -1 { + line = line[:idx] + } + if strings.TrimSpace(line) == "" { + continue + } + if !strings.Contains(line, "=") { + return InvalidErrorf("realm line (%s)", line) + } + p := strings.Split(line, "=") + domain := strings.TrimSpace(strings.ToLower(p[0])) + realm := strings.TrimSpace(p[1]) + d.addMapping(domain, realm) + } + return nil +} + +// Add a domain to realm mapping. +func (d *DomainRealm) addMapping(domain, realm string) { + (*d)[domain] = realm +} + +// Delete a domain to realm mapping. +func (d *DomainRealm) deleteMapping(domain, realm string) { + delete(*d, domain) +} + +// ResolveRealm resolves the kerberos realm for the specified domain name from the domain to realm mapping. +// The most specific mapping is returned. +func (c *Config) ResolveRealm(domainName string) string { + domainName = strings.TrimSuffix(domainName, ".") + + // Try to match the entire hostname first + if r, ok := c.DomainRealm[domainName]; ok { + return r + } + + // Try to match all DNS domain parts + periods := strings.Count(domainName, ".") + 1 + for i := 2; i <= periods; i++ { + z := strings.SplitN(domainName, ".", i) + if r, ok := c.DomainRealm["."+z[len(z)-1]]; ok { + return r + } + } + return c.LibDefaults.DefaultRealm +} + +// Load the KRB5 configuration from the specified file path. +func Load(cfgPath string) (*Config, error) { + fh, err := os.Open(cfgPath) + if err != nil { + return nil, errors.New("configuration file could not be opened: " + cfgPath + " " + err.Error()) + } + defer fh.Close() + scanner := bufio.NewScanner(fh) + return NewFromScanner(scanner) +} + +// NewFromString creates a new Config struct from a string. +func NewFromString(s string) (*Config, error) { + reader := strings.NewReader(s) + return NewFromReader(reader) +} + +// NewFromReader creates a new Config struct from an io.Reader. +func NewFromReader(r io.Reader) (*Config, error) { + scanner := bufio.NewScanner(r) + return NewFromScanner(scanner) +} + +// NewFromScanner creates a new Config struct from a bufio.Scanner. +func NewFromScanner(scanner *bufio.Scanner) (*Config, error) { + c := New() + var e error + sections := make(map[int]string) + var sectionLineNum []int + var lines []string + for scanner.Scan() { + // Skip comments and blank lines + if matched, _ := regexp.MatchString(`^\s*(#|;|\n)`, scanner.Text()); matched { + continue + } + if matched, _ := regexp.MatchString(`^\s*\[libdefaults\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "libdefaults" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + if matched, _ := regexp.MatchString(`^\s*\[realms\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "realms" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + if matched, _ := regexp.MatchString(`^\s*\[domain_realm\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "domain_realm" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + if matched, _ := regexp.MatchString(`^\s*\[.*\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "unknown_section" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + lines = append(lines, scanner.Text()) + } + for i, start := range sectionLineNum { + var end int + if i+1 >= len(sectionLineNum) { + end = len(lines) + } else { + end = sectionLineNum[i+1] + } + switch section := sections[start]; section { + case "libdefaults": + err := c.LibDefaults.parseLines(lines[start:end]) + if err != nil { + if _, ok := err.(UnsupportedDirective); !ok { + return nil, fmt.Errorf("error processing libdefaults section: %v", err) + } + e = err + } + case "realms": + realms, err := parseRealms(lines[start:end]) + if err != nil { + if _, ok := err.(UnsupportedDirective); !ok { + return nil, fmt.Errorf("error processing realms section: %v", err) + } + e = err + } + c.Realms = realms + case "domain_realm": + err := c.DomainRealm.parseLines(lines[start:end]) + if err != nil { + if _, ok := err.(UnsupportedDirective); !ok { + return nil, fmt.Errorf("error processing domaain_realm section: %v", err) + } + e = err + } + } + } + return c, e +} + +// Parse a space delimited list of ETypes into a list of EType numbers optionally filtering out weak ETypes. +func parseETypes(s []string, w bool) []int32 { + var eti []int32 + for _, et := range s { + if !w { + var weak bool + for _, wet := range strings.Fields(WeakETypeList) { + if et == wet { + weak = true + break + } + } + if weak { + continue + } + } + i := etypeID.EtypeSupported(et) + if i != 0 { + eti = append(eti, i) + } + } + return eti +} + +// Parse a time duration string in the configuration to a golang time.Duration. +func parseDuration(s string) (time.Duration, error) { + s = strings.Replace(strings.TrimSpace(s), " ", "", -1) + + // handle Nd[NmNs] + if strings.Contains(s, "d") { + ds := strings.SplitN(s, "d", 2) + dn, err := strconv.ParseUint(ds[0], 10, 32) + if err != nil { + return time.Duration(0), errors.New("invalid time duration") + } + d := time.Duration(dn*24) * time.Hour + if ds[1] != "" { + dp, err := time.ParseDuration(ds[1]) + if err != nil { + return time.Duration(0), errors.New("invalid time duration") + } + d = d + dp + } + return d, nil + } + + // handle Nm[Ns] + d, err := time.ParseDuration(s) + if err == nil { + return d, nil + } + + // handle N + v, err := strconv.ParseUint(s, 10, 32) + if err == nil && v > 0 { + return time.Duration(v) * time.Second, nil + } + + // handle h:m[:s] + if strings.Contains(s, ":") { + t := strings.Split(s, ":") + if 2 > len(t) || len(t) > 3 { + return time.Duration(0), errors.New("invalid time duration value") + } + var i []int + for _, n := range t { + j, err := strconv.ParseInt(n, 10, 16) + if err != nil { + return time.Duration(0), errors.New("invalid time duration value") + } + i = append(i, int(j)) + } + d := time.Duration(i[0])*time.Hour + time.Duration(i[1])*time.Minute + if len(i) == 3 { + d = d + time.Duration(i[2])*time.Second + } + return d, nil + } + return time.Duration(0), errors.New("invalid time duration value") +} + +// Parse possible boolean values to golang bool. +func parseBoolean(s string) (bool, error) { + s = strings.TrimSpace(s) + v, err := strconv.ParseBool(s) + if err == nil { + return v, nil + } + switch strings.ToLower(s) { + case "yes": + return true, nil + case "y": + return true, nil + case "no": + return false, nil + case "n": + return false, nil + } + return false, errors.New("invalid boolean value") +} + +// Parse array of strings but stop if an asterisk is placed at the end of a line. +func appendUntilFinal(s *[]string, value string, final *bool) { + if *final { + return + } + if last := len(value) - 1; last >= 0 && value[last] == '*' { + *final = true + value = value[:len(value)-1] + } + *s = append(*s, value) +} + +// JSON return details of the config in a JSON format. +func (c *Config) JSON() (string, error) { + b, err := json.MarshalIndent(c, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go new file mode 100644 index 00000000000..c3b35c77a12 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go @@ -0,0 +1,333 @@ +package credentials + +import ( + "bytes" + "encoding/binary" + "errors" + "io/ioutil" + "strings" + "time" + "unsafe" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + headerFieldTagKDCOffset = 1 +) + +// CCache is the file credentials cache as define here: https://web.mit.edu/kerberos/krb5-latest/doc/formats/ccache_file_format.html +type CCache struct { + Version uint8 + Header header + DefaultPrincipal principal + Credentials []*Credential + Path string +} + +type header struct { + length uint16 + fields []headerField +} + +type headerField struct { + tag uint16 + length uint16 + value []byte +} + +// Credential cache entry principal struct. +type principal struct { + Realm string + PrincipalName types.PrincipalName +} + +// Credential holds a Kerberos client's ccache credential information. +type Credential struct { + Client principal + Server principal + Key types.EncryptionKey + AuthTime time.Time + StartTime time.Time + EndTime time.Time + RenewTill time.Time + IsSKey bool + TicketFlags asn1.BitString + Addresses []types.HostAddress + AuthData []types.AuthorizationDataEntry + Ticket []byte + SecondTicket []byte +} + +// LoadCCache loads a credential cache file into a CCache type. +func LoadCCache(cpath string) (*CCache, error) { + c := new(CCache) + b, err := ioutil.ReadFile(cpath) + if err != nil { + return c, err + } + err = c.Unmarshal(b) + return c, err +} + +// Unmarshal a byte slice of credential cache data into CCache type. +func (c *CCache) Unmarshal(b []byte) error { + p := 0 + //The first byte of the file always has the value 5 + if int8(b[p]) != 5 { + return errors.New("Invalid credential cache data. First byte does not equal 5") + } + p++ + //Get credential cache version + //The second byte contains the version number (1 to 4) + c.Version = b[p] + if c.Version < 1 || c.Version > 4 { + return errors.New("Invalid credential cache data. Keytab version is not within 1 to 4") + } + p++ + //Version 1 or 2 of the file format uses native byte order for integer representations. Versions 3 & 4 always uses big-endian byte order + var endian binary.ByteOrder + endian = binary.BigEndian + if (c.Version == 1 || c.Version == 2) && isNativeEndianLittle() { + endian = binary.LittleEndian + } + if c.Version == 4 { + err := parseHeader(b, &p, c, &endian) + if err != nil { + return err + } + } + c.DefaultPrincipal = parsePrincipal(b, &p, c, &endian) + for p < len(b) { + cred, err := parseCredential(b, &p, c, &endian) + if err != nil { + return err + } + c.Credentials = append(c.Credentials, cred) + } + return nil +} + +func parseHeader(b []byte, p *int, c *CCache, e *binary.ByteOrder) error { + if c.Version != 4 { + return errors.New("Credentials cache version is not 4 so there is no header to parse.") + } + h := header{} + h.length = uint16(readInt16(b, p, e)) + for *p <= int(h.length) { + f := headerField{} + f.tag = uint16(readInt16(b, p, e)) + f.length = uint16(readInt16(b, p, e)) + f.value = b[*p : *p+int(f.length)] + *p += int(f.length) + if !f.valid() { + return errors.New("Invalid credential cache header found") + } + h.fields = append(h.fields, f) + } + c.Header = h + return nil +} + +// Parse the Keytab bytes of a principal into a Keytab entry's principal. +func parsePrincipal(b []byte, p *int, c *CCache, e *binary.ByteOrder) (princ principal) { + if c.Version != 1 { + //Name Type is omitted in version 1 + princ.PrincipalName.NameType = readInt32(b, p, e) + } + nc := int(readInt32(b, p, e)) + if c.Version == 1 { + //In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2 + nc-- + } + lenRealm := readInt32(b, p, e) + princ.Realm = string(readBytes(b, p, int(lenRealm), e)) + for i := 0; i < nc; i++ { + l := readInt32(b, p, e) + princ.PrincipalName.NameString = append(princ.PrincipalName.NameString, string(readBytes(b, p, int(l), e))) + } + return princ +} + +func parseCredential(b []byte, p *int, c *CCache, e *binary.ByteOrder) (cred *Credential, err error) { + cred = new(Credential) + cred.Client = parsePrincipal(b, p, c, e) + cred.Server = parsePrincipal(b, p, c, e) + key := types.EncryptionKey{} + key.KeyType = int32(readInt16(b, p, e)) + if c.Version == 3 { + //repeated twice in version 3 + key.KeyType = int32(readInt16(b, p, e)) + } + key.KeyValue = readData(b, p, e) + cred.Key = key + cred.AuthTime = readTimestamp(b, p, e) + cred.StartTime = readTimestamp(b, p, e) + cred.EndTime = readTimestamp(b, p, e) + cred.RenewTill = readTimestamp(b, p, e) + if ik := readInt8(b, p, e); ik == 0 { + cred.IsSKey = false + } else { + cred.IsSKey = true + } + cred.TicketFlags = types.NewKrbFlags() + cred.TicketFlags.Bytes = readBytes(b, p, 4, e) + l := int(readInt32(b, p, e)) + cred.Addresses = make([]types.HostAddress, l, l) + for i := range cred.Addresses { + cred.Addresses[i] = readAddress(b, p, e) + } + l = int(readInt32(b, p, e)) + cred.AuthData = make([]types.AuthorizationDataEntry, l, l) + for i := range cred.AuthData { + cred.AuthData[i] = readAuthDataEntry(b, p, e) + } + cred.Ticket = readData(b, p, e) + cred.SecondTicket = readData(b, p, e) + return +} + +// GetClientPrincipalName returns a PrincipalName type for the client the credentials cache is for. +func (c *CCache) GetClientPrincipalName() types.PrincipalName { + return c.DefaultPrincipal.PrincipalName +} + +// GetClientRealm returns the reals of the client the credentials cache is for. +func (c *CCache) GetClientRealm() string { + return c.DefaultPrincipal.Realm +} + +// GetClientCredentials returns a Credentials object representing the client of the credentials cache. +func (c *CCache) GetClientCredentials() *Credentials { + return &Credentials{ + username: c.DefaultPrincipal.PrincipalName.PrincipalNameString(), + realm: c.GetClientRealm(), + cname: c.DefaultPrincipal.PrincipalName, + } +} + +// Contains tests if the cache contains a credential for the provided server PrincipalName +func (c *CCache) Contains(p types.PrincipalName) bool { + for _, cred := range c.Credentials { + if cred.Server.PrincipalName.Equal(p) { + return true + } + } + return false +} + +// GetEntry returns a specific credential for the PrincipalName provided. +func (c *CCache) GetEntry(p types.PrincipalName) (*Credential, bool) { + cred := new(Credential) + var found bool + for i := range c.Credentials { + if c.Credentials[i].Server.PrincipalName.Equal(p) { + cred = c.Credentials[i] + found = true + break + } + } + if !found { + return cred, false + } + return cred, true +} + +// GetEntries filters out configuration entries an returns a slice of credentials. +func (c *CCache) GetEntries() []*Credential { + creds := make([]*Credential, 0) + for _, cred := range c.Credentials { + // Filter out configuration entries + if strings.HasPrefix(cred.Server.Realm, "X-CACHECONF") { + continue + } + creds = append(creds, cred) + } + return creds +} + +func (h *headerField) valid() bool { + // See https://web.mit.edu/kerberos/krb5-latest/doc/formats/ccache_file_format.html - Header format + switch h.tag { + case headerFieldTagKDCOffset: + if h.length != 8 || len(h.value) != 8 { + return false + } + return true + } + return false +} + +func readData(b []byte, p *int, e *binary.ByteOrder) []byte { + l := readInt32(b, p, e) + return readBytes(b, p, int(l), e) +} + +func readAddress(b []byte, p *int, e *binary.ByteOrder) types.HostAddress { + a := types.HostAddress{} + a.AddrType = int32(readInt16(b, p, e)) + a.Address = readData(b, p, e) + return a +} + +func readAuthDataEntry(b []byte, p *int, e *binary.ByteOrder) types.AuthorizationDataEntry { + a := types.AuthorizationDataEntry{} + a.ADType = int32(readInt16(b, p, e)) + a.ADData = readData(b, p, e) + return a +} + +// Read bytes representing a timestamp. +func readTimestamp(b []byte, p *int, e *binary.ByteOrder) time.Time { + return time.Unix(int64(readInt32(b, p, e)), 0) +} + +// Read bytes representing an eight bit integer. +func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) { + buf := bytes.NewBuffer(b[*p : *p+1]) + binary.Read(buf, *e, &i) + *p++ + return +} + +// Read bytes representing a sixteen bit integer. +func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) { + buf := bytes.NewBuffer(b[*p : *p+2]) + binary.Read(buf, *e, &i) + *p += 2 + return +} + +// Read bytes representing a thirty two bit integer. +func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32) { + buf := bytes.NewBuffer(b[*p : *p+4]) + binary.Read(buf, *e, &i) + *p += 4 + return +} + +func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) []byte { + buf := bytes.NewBuffer(b[*p : *p+s]) + r := make([]byte, s) + binary.Read(buf, *e, &r) + *p += s + return r +} + +func isNativeEndianLittle() bool { + var x = 0x012345678 + var p = unsafe.Pointer(&x) + var bp = (*[4]byte)(p) + + var endian bool + if 0x01 == bp[0] { + endian = false + } else if (0x78 & 0xff) == (bp[0] & 0xff) { + endian = true + } else { + // Default to big endian + endian = false + } + return endian +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go new file mode 100644 index 00000000000..bddbc7e3e3a --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go @@ -0,0 +1,405 @@ +// Package credentials provides credentials management for Kerberos 5 authentication. +package credentials + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + // AttributeKeyADCredentials assigned number for AD credentials. + AttributeKeyADCredentials = "gokrb5AttributeKeyADCredentials" +) + +// Credentials struct for a user. +// Contains either a keytab, password or both. +// Keytabs are used over passwords if both are defined. +type Credentials struct { + username string + displayName string + realm string + cname types.PrincipalName + keytab *keytab.Keytab + password string + attributes map[string]interface{} + validUntil time.Time + authenticated bool + human bool + authTime time.Time + groupMembership map[string]bool + sessionID string +} + +// marshalCredentials is used to enable marshaling and unmarshaling of credentials +// without having exported fields on the Credentials struct +type marshalCredentials struct { + Username string + DisplayName string + Realm string + CName types.PrincipalName `json:"-"` + Keytab bool + Password bool + Attributes map[string]interface{} `json:"-"` + ValidUntil time.Time + Authenticated bool + Human bool + AuthTime time.Time + GroupMembership map[string]bool `json:"-"` + SessionID string +} + +// ADCredentials contains information obtained from the PAC. +type ADCredentials struct { + EffectiveName string + FullName string + UserID int + PrimaryGroupID int + LogOnTime time.Time + LogOffTime time.Time + PasswordLastSet time.Time + GroupMembershipSIDs []string + LogonDomainName string + LogonDomainID string + LogonServer string +} + +// New creates a new Credentials instance. +func New(username string, realm string) *Credentials { + uid, err := uuid.GenerateUUID() + if err != nil { + uid = "00unique-sess-ions-uuid-unavailable0" + } + return &Credentials{ + username: username, + displayName: username, + realm: realm, + cname: types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, username), + keytab: keytab.New(), + attributes: make(map[string]interface{}), + groupMembership: make(map[string]bool), + sessionID: uid, + human: true, + } +} + +// NewFromPrincipalName creates a new Credentials instance with the user details provides as a PrincipalName type. +func NewFromPrincipalName(cname types.PrincipalName, realm string) *Credentials { + c := New(cname.PrincipalNameString(), realm) + c.cname = cname + return c +} + +// WithKeytab sets the Keytab in the Credentials struct. +func (c *Credentials) WithKeytab(kt *keytab.Keytab) *Credentials { + c.keytab = kt + c.password = "" + return c +} + +// Keytab returns the credential's Keytab. +func (c *Credentials) Keytab() *keytab.Keytab { + return c.keytab +} + +// HasKeytab queries if the Credentials has a keytab defined. +func (c *Credentials) HasKeytab() bool { + if c.keytab != nil && len(c.keytab.Entries) > 0 { + return true + } + return false +} + +// WithPassword sets the password in the Credentials struct. +func (c *Credentials) WithPassword(password string) *Credentials { + c.password = password + c.keytab = keytab.New() // clear any keytab + return c +} + +// Password returns the credential's password. +func (c *Credentials) Password() string { + return c.password +} + +// HasPassword queries if the Credentials has a password defined. +func (c *Credentials) HasPassword() bool { + if c.password != "" { + return true + } + return false +} + +// SetValidUntil sets the expiry time of the credentials +func (c *Credentials) SetValidUntil(t time.Time) { + c.validUntil = t +} + +// SetADCredentials adds ADCredentials attributes to the credentials +func (c *Credentials) SetADCredentials(a ADCredentials) { + c.SetAttribute(AttributeKeyADCredentials, a) + if a.FullName != "" { + c.SetDisplayName(a.FullName) + } + if a.EffectiveName != "" { + c.SetUserName(a.EffectiveName) + } + for i := range a.GroupMembershipSIDs { + c.AddAuthzAttribute(a.GroupMembershipSIDs[i]) + } +} + +// GetADCredentials returns ADCredentials attributes sorted in the credential +func (c *Credentials) GetADCredentials() ADCredentials { + if a, ok := c.attributes[AttributeKeyADCredentials].(ADCredentials); ok { + return a + } + return ADCredentials{} +} + +// Methods to implement goidentity.Identity interface + +// UserName returns the credential's username. +func (c *Credentials) UserName() string { + return c.username +} + +// SetUserName sets the username value on the credential. +func (c *Credentials) SetUserName(s string) { + c.username = s +} + +// CName returns the credential's client principal name. +func (c *Credentials) CName() types.PrincipalName { + return c.cname +} + +// SetCName sets the client principal name on the credential. +func (c *Credentials) SetCName(pn types.PrincipalName) { + c.cname = pn +} + +// Domain returns the credential's domain. +func (c *Credentials) Domain() string { + return c.realm +} + +// SetDomain sets the domain value on the credential. +func (c *Credentials) SetDomain(s string) { + c.realm = s +} + +// Realm returns the credential's realm. Same as the domain. +func (c *Credentials) Realm() string { + return c.Domain() +} + +// SetRealm sets the realm value on the credential. Same as the domain +func (c *Credentials) SetRealm(s string) { + c.SetDomain(s) +} + +// DisplayName returns the credential's display name. +func (c *Credentials) DisplayName() string { + return c.displayName +} + +// SetDisplayName sets the display name value on the credential. +func (c *Credentials) SetDisplayName(s string) { + c.displayName = s +} + +// Human returns if the credential represents a human or not. +func (c *Credentials) Human() bool { + return c.human +} + +// SetHuman sets the credential as human. +func (c *Credentials) SetHuman(b bool) { + c.human = b +} + +// AuthTime returns the time the credential was authenticated. +func (c *Credentials) AuthTime() time.Time { + return c.authTime +} + +// SetAuthTime sets the time the credential was authenticated. +func (c *Credentials) SetAuthTime(t time.Time) { + c.authTime = t +} + +// AuthzAttributes returns the credentials authorizing attributes. +func (c *Credentials) AuthzAttributes() []string { + s := make([]string, len(c.groupMembership)) + i := 0 + for a := range c.groupMembership { + s[i] = a + i++ + } + return s +} + +// Authenticated indicates if the credential has been successfully authenticated or not. +func (c *Credentials) Authenticated() bool { + return c.authenticated +} + +// SetAuthenticated sets the credential as having been successfully authenticated. +func (c *Credentials) SetAuthenticated(b bool) { + c.authenticated = b +} + +// AddAuthzAttribute adds an authorization attribute to the credential. +func (c *Credentials) AddAuthzAttribute(a string) { + c.groupMembership[a] = true +} + +// RemoveAuthzAttribute removes an authorization attribute from the credential. +func (c *Credentials) RemoveAuthzAttribute(a string) { + if _, ok := c.groupMembership[a]; !ok { + return + } + delete(c.groupMembership, a) +} + +// EnableAuthzAttribute toggles an authorization attribute to an enabled state on the credential. +func (c *Credentials) EnableAuthzAttribute(a string) { + if enabled, ok := c.groupMembership[a]; ok && !enabled { + c.groupMembership[a] = true + } +} + +// DisableAuthzAttribute toggles an authorization attribute to a disabled state on the credential. +func (c *Credentials) DisableAuthzAttribute(a string) { + if enabled, ok := c.groupMembership[a]; ok && enabled { + c.groupMembership[a] = false + } +} + +// Authorized indicates if the credential has the specified authorizing attribute. +func (c *Credentials) Authorized(a string) bool { + if enabled, ok := c.groupMembership[a]; ok && enabled { + return true + } + return false +} + +// SessionID returns the credential's session ID. +func (c *Credentials) SessionID() string { + return c.sessionID +} + +// Expired indicates if the credential has expired. +func (c *Credentials) Expired() bool { + if !c.validUntil.IsZero() && time.Now().UTC().After(c.validUntil) { + return true + } + return false +} + +// ValidUntil returns the credential's valid until date +func (c *Credentials) ValidUntil() time.Time { + return c.validUntil +} + +// Attributes returns the Credentials' attributes map. +func (c *Credentials) Attributes() map[string]interface{} { + return c.attributes +} + +// SetAttribute sets the value of an attribute. +func (c *Credentials) SetAttribute(k string, v interface{}) { + c.attributes[k] = v +} + +// SetAttributes replaces the attributes map with the one provided. +func (c *Credentials) SetAttributes(a map[string]interface{}) { + c.attributes = a +} + +// RemoveAttribute deletes an attribute from the attribute map that has the key provided. +func (c *Credentials) RemoveAttribute(k string) { + delete(c.attributes, k) +} + +// Marshal the Credentials into a byte slice +func (c *Credentials) Marshal() ([]byte, error) { + gob.Register(map[string]interface{}{}) + gob.Register(ADCredentials{}) + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + mc := marshalCredentials{ + Username: c.username, + DisplayName: c.displayName, + Realm: c.realm, + CName: c.cname, + Keytab: c.HasKeytab(), + Password: c.HasPassword(), + Attributes: c.attributes, + ValidUntil: c.validUntil, + Authenticated: c.authenticated, + Human: c.human, + AuthTime: c.authTime, + GroupMembership: c.groupMembership, + SessionID: c.sessionID, + } + err := enc.Encode(&mc) + if err != nil { + return []byte{}, err + } + return buf.Bytes(), nil +} + +// Unmarshal a byte slice into Credentials +func (c *Credentials) Unmarshal(b []byte) error { + gob.Register(map[string]interface{}{}) + gob.Register(ADCredentials{}) + mc := new(marshalCredentials) + buf := bytes.NewBuffer(b) + dec := gob.NewDecoder(buf) + err := dec.Decode(mc) + if err != nil { + return err + } + c.username = mc.Username + c.displayName = mc.DisplayName + c.realm = mc.Realm + c.cname = mc.CName + c.attributes = mc.Attributes + c.validUntil = mc.ValidUntil + c.authenticated = mc.Authenticated + c.human = mc.Human + c.authTime = mc.AuthTime + c.groupMembership = mc.GroupMembership + c.sessionID = mc.SessionID + return nil +} + +// JSON return details of the Credentials in a JSON format. +func (c *Credentials) JSON() (string, error) { + mc := marshalCredentials{ + Username: c.username, + DisplayName: c.displayName, + Realm: c.realm, + CName: c.cname, + Keytab: c.HasKeytab(), + Password: c.HasPassword(), + ValidUntil: c.validUntil, + Authenticated: c.authenticated, + Human: c.human, + AuthTime: c.authTime, + SessionID: c.sessionID, + } + b, err := json.MarshalIndent(mc, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go new file mode 100644 index 00000000000..dd8babd5df4 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go @@ -0,0 +1,129 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha1" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3961" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3962" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// RFC 3962 + +// Aes128CtsHmacSha96 implements Kerberos encryption type aes128-cts-hmac-sha1-96 +type Aes128CtsHmacSha96 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes128CtsHmacSha96) GetETypeID() int32 { + return etypeID.AES128_CTS_HMAC_SHA1_96 +} + +// GetHashID returns the checksum type ID number. +func (e Aes128CtsHmacSha96) GetHashID() int32 { + return chksumtype.HMAC_SHA1_96_AES128 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes128CtsHmacSha96) GetKeyByteSize() int { + return 128 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes128CtsHmacSha96) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes128CtsHmacSha96) GetHashFunc() func() hash.Hash { + return sha1.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes128CtsHmacSha96) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes128CtsHmacSha96) GetDefaultStringToKeyParams() string { + return "00001000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes128CtsHmacSha96) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes128CtsHmacSha96) GetHMACBitLength() int { + return 96 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes128CtsHmacSha96) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes128CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + return rfc3962.StringToKey(secret, salt, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes128CtsHmacSha96) RandomToKey(b []byte) []byte { + return rfc3961.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes128CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc3962.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes128CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc3962.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes128CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) { + return rfc3962.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes128CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc3962.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes128CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveKey(protocolKey, usage, e) +} + +// DeriveRandom generates data needed for key generation. +func (e Aes128CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e Aes128CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes128CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes128CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go new file mode 100644 index 00000000000..b05af7d3d45 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go @@ -0,0 +1,132 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha256" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc8009" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// RFC https://tools.ietf.org/html/rfc8009 + +// Aes128CtsHmacSha256128 implements Kerberos encryption type aes128-cts-hmac-sha256-128 +type Aes128CtsHmacSha256128 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes128CtsHmacSha256128) GetETypeID() int32 { + return etypeID.AES128_CTS_HMAC_SHA256_128 +} + +// GetHashID returns the checksum type ID number. +func (e Aes128CtsHmacSha256128) GetHashID() int32 { + return chksumtype.HMAC_SHA256_128_AES128 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes128CtsHmacSha256128) GetKeyByteSize() int { + return 128 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes128CtsHmacSha256128) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes128CtsHmacSha256128) GetHashFunc() func() hash.Hash { + return sha256.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes128CtsHmacSha256128) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes128CtsHmacSha256128) GetDefaultStringToKeyParams() string { + return "00008000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes128CtsHmacSha256128) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes128CtsHmacSha256128) GetHMACBitLength() int { + return 128 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes128CtsHmacSha256128) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes128CtsHmacSha256128) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + saltp := rfc8009.GetSaltP(salt, "aes128-cts-hmac-sha256-128") + return rfc8009.StringToKey(secret, saltp, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes128CtsHmacSha256128) RandomToKey(b []byte) []byte { + return rfc8009.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes128CtsHmacSha256128) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc8009.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes128CtsHmacSha256128) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc8009.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes128CtsHmacSha256128) DecryptData(key, data []byte) ([]byte, error) { + return rfc8009.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes128CtsHmacSha256128) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc8009.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes128CtsHmacSha256128) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveKey(protocolKey, usage, e), nil +} + +// DeriveRandom generates data needed for key generation. +func (e Aes128CtsHmacSha256128) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the ciphertext message. +// As the hash is calculated over the iv concatenated with the AES cipher output not the plaintext the pt value to this +// interface method is not use. Pass any []byte. +func (e Aes128CtsHmacSha256128) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + // We don't need ib just there for the interface + return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes128CtsHmacSha256128) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes128CtsHmacSha256128) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go new file mode 100644 index 00000000000..45e439a434e --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go @@ -0,0 +1,129 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha1" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3961" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3962" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// RFC 3962 + +// Aes256CtsHmacSha96 implements Kerberos encryption type aes256-cts-hmac-sha1-96 +type Aes256CtsHmacSha96 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes256CtsHmacSha96) GetETypeID() int32 { + return etypeID.AES256_CTS_HMAC_SHA1_96 +} + +// GetHashID returns the checksum type ID number. +func (e Aes256CtsHmacSha96) GetHashID() int32 { + return chksumtype.HMAC_SHA1_96_AES256 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes256CtsHmacSha96) GetKeyByteSize() int { + return 256 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes256CtsHmacSha96) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes256CtsHmacSha96) GetHashFunc() func() hash.Hash { + return sha1.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes256CtsHmacSha96) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes256CtsHmacSha96) GetDefaultStringToKeyParams() string { + return "00001000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes256CtsHmacSha96) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes256CtsHmacSha96) GetHMACBitLength() int { + return 96 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes256CtsHmacSha96) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes256CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + return rfc3962.StringToKey(secret, salt, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes256CtsHmacSha96) RandomToKey(b []byte) []byte { + return rfc3961.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes256CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc3962.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes256CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc3962.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes256CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) { + return rfc3962.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes256CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc3962.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes256CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveKey(protocolKey, usage, e) +} + +// DeriveRandom generates data needed for key generation. +func (e Aes256CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e Aes256CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes256CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes256CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go new file mode 100644 index 00000000000..6a54475930e --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go @@ -0,0 +1,132 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha512" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc8009" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// RFC https://tools.ietf.org/html/rfc8009 + +// Aes256CtsHmacSha384192 implements Kerberos encryption type aes256-cts-hmac-sha384-192 +type Aes256CtsHmacSha384192 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes256CtsHmacSha384192) GetETypeID() int32 { + return etypeID.AES256_CTS_HMAC_SHA384_192 +} + +// GetHashID returns the checksum type ID number. +func (e Aes256CtsHmacSha384192) GetHashID() int32 { + return chksumtype.HMAC_SHA384_192_AES256 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes256CtsHmacSha384192) GetKeyByteSize() int { + return 192 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes256CtsHmacSha384192) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes256CtsHmacSha384192) GetHashFunc() func() hash.Hash { + return sha512.New384 +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes256CtsHmacSha384192) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes256CtsHmacSha384192) GetDefaultStringToKeyParams() string { + return "00008000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes256CtsHmacSha384192) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes256CtsHmacSha384192) GetHMACBitLength() int { + return 192 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes256CtsHmacSha384192) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes256CtsHmacSha384192) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + saltp := rfc8009.GetSaltP(salt, "aes256-cts-hmac-sha384-192") + return rfc8009.StringToKey(secret, saltp, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes256CtsHmacSha384192) RandomToKey(b []byte) []byte { + return rfc8009.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes256CtsHmacSha384192) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc8009.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes256CtsHmacSha384192) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc8009.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes256CtsHmacSha384192) DecryptData(key, data []byte) ([]byte, error) { + return rfc8009.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes256CtsHmacSha384192) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc8009.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes256CtsHmacSha384192) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveKey(protocolKey, usage, e), nil +} + +// DeriveRandom generates data needed for key generation. +func (e Aes256CtsHmacSha384192) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the ciphertext message. +// As the hash is calculated over the iv concatenated with the AES cipher output not the plaintext the pt value to this +// interface method is not use. Pass any []byte. +func (e Aes256CtsHmacSha384192) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + // We don't need ib just there for the interface + return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes256CtsHmacSha384192) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes256CtsHmacSha384192) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go new file mode 100644 index 00000000000..dab55be7583 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go @@ -0,0 +1,132 @@ +// Package common provides encryption methods common across encryption types +package common + +import ( + "bytes" + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +// ZeroPad pads bytes with zeros to nearest multiple of message size m. +func ZeroPad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("Invalid message block size when padding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("Data not valid to pad: Zero size") + } + if l := len(b) % m; l != 0 { + n := m - l + z := make([]byte, n) + b = append(b, z...) + } + return b, nil +} + +// PKCS7Pad pads bytes according to RFC 2315 to nearest multiple of message size m. +func PKCS7Pad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("Invalid message block size when padding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("Data not valid to pad: Zero size") + } + n := m - (len(b) % m) + pb := make([]byte, len(b)+n) + copy(pb, b) + copy(pb[len(b):], bytes.Repeat([]byte{byte(n)}, n)) + return pb, nil +} + +// PKCS7Unpad removes RFC 2315 padding from byes where message size is m. +func PKCS7Unpad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("invalid message block size when unpadding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("padded data not valid: Zero size") + } + if len(b)%m != 0 { + return nil, errors.New("padded data not valid: Not multiple of message block size") + } + c := b[len(b)-1] + n := int(c) + if n == 0 || n > len(b) { + return nil, errors.New("padded data not valid: Data may not have been padded") + } + for i := 0; i < n; i++ { + if b[len(b)-n+i] != c { + return nil, errors.New("padded data not valid") + } + } + return b[:len(b)-n], nil +} + +// GetHash generates the keyed hash value according to the etype's hash function. +func GetHash(pt, key []byte, usage []byte, etype etype.EType) ([]byte, error) { + k, err := etype.DeriveKey(key, usage) + if err != nil { + return nil, fmt.Errorf("unable to derive key for checksum: %v", err) + } + mac := hmac.New(etype.GetHashFunc(), k) + p := make([]byte, len(pt)) + copy(p, pt) + mac.Write(p) + return mac.Sum(nil)[:etype.GetHMACBitLength()/8], nil +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func GetChecksumHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) { + return GetHash(b, key, GetUsageKc(usage), etype) +} + +// GetIntegrityHash returns a keyed integrity hash of the bytes provided. +func GetIntegrityHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) { + return GetHash(b, key, GetUsageKi(usage), etype) +} + +// VerifyChecksum compares the checksum of the msg bytes is the same as the checksum provided. +func VerifyChecksum(key, chksum, msg []byte, usage uint32, etype etype.EType) bool { + //The encrypted message is a concatenation of the encrypted output and the hash HMAC. + expectedMAC, _ := GetChecksumHash(msg, key, usage, etype) + return hmac.Equal(chksum, expectedMAC) +} + +// GetUsageKc returns the checksum key usage value for the usage number un. +// +// See RFC 3961 5.3 key-derivation function definition. +func GetUsageKc(un uint32) []byte { + return getUsage(un, 0x99) +} + +// GetUsageKe returns the encryption key usage value for the usage number un +// +// See RFC 3961 5.3 key-derivation function definition. +func GetUsageKe(un uint32) []byte { + return getUsage(un, 0xAA) +} + +// GetUsageKi returns the integrity key usage value for the usage number un +// +// See RFC 3961 5.3 key-derivation function definition. +func GetUsageKi(un uint32) []byte { + return getUsage(un, 0x55) +} + +func getUsage(un uint32, o byte) []byte { + var buf bytes.Buffer + binary.Write(&buf, binary.BigEndian, un) + return append(buf.Bytes(), o) +} + +// IterationsToS2Kparams converts the number of iterations as an integer to a string representation. +func IterationsToS2Kparams(i uint32) string { + b := make([]byte, 4, 4) + binary.BigEndian.PutUint32(b, i) + return hex.EncodeToString(b) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go new file mode 100644 index 00000000000..5c96ddfdf6f --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go @@ -0,0 +1,175 @@ +// Package crypto implements cryptographic functions for Kerberos 5 implementation. +package crypto + +import ( + "encoding/hex" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" + "github.com/jcmturner/gokrb5/v8/iana/patype" + "github.com/jcmturner/gokrb5/v8/types" +) + +// GetEtype returns an instances of the required etype struct for the etype ID. +func GetEtype(id int32) (etype.EType, error) { + switch id { + case etypeID.AES128_CTS_HMAC_SHA1_96: + var et Aes128CtsHmacSha96 + return et, nil + case etypeID.AES256_CTS_HMAC_SHA1_96: + var et Aes256CtsHmacSha96 + return et, nil + case etypeID.AES128_CTS_HMAC_SHA256_128: + var et Aes128CtsHmacSha256128 + return et, nil + case etypeID.AES256_CTS_HMAC_SHA384_192: + var et Aes256CtsHmacSha384192 + return et, nil + case etypeID.DES3_CBC_SHA1_KD: + var et Des3CbcSha1Kd + return et, nil + case etypeID.RC4_HMAC: + var et RC4HMAC + return et, nil + default: + return nil, fmt.Errorf("unknown or unsupported EType: %d", id) + } +} + +// GetChksumEtype returns an instances of the required etype struct for the checksum ID. +func GetChksumEtype(id int32) (etype.EType, error) { + switch id { + case chksumtype.HMAC_SHA1_96_AES128: + var et Aes128CtsHmacSha96 + return et, nil + case chksumtype.HMAC_SHA1_96_AES256: + var et Aes256CtsHmacSha96 + return et, nil + case chksumtype.HMAC_SHA256_128_AES128: + var et Aes128CtsHmacSha256128 + return et, nil + case chksumtype.HMAC_SHA384_192_AES256: + var et Aes256CtsHmacSha384192 + return et, nil + case chksumtype.HMAC_SHA1_DES3_KD: + var et Des3CbcSha1Kd + return et, nil + case chksumtype.KERB_CHECKSUM_HMAC_MD5: + var et RC4HMAC + return et, nil + //case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED: + // var et RC4HMAC + // return et, nil + default: + return nil, fmt.Errorf("unknown or unsupported checksum type: %d", id) + } +} + +// GetKeyFromPassword generates an encryption key from the principal's password. +func GetKeyFromPassword(passwd string, cname types.PrincipalName, realm string, etypeID int32, pas types.PADataSequence) (types.EncryptionKey, etype.EType, error) { + var key types.EncryptionKey + et, err := GetEtype(etypeID) + if err != nil { + return key, et, fmt.Errorf("error getting encryption type: %v", err) + } + sk2p := et.GetDefaultStringToKeyParams() + var salt string + var paID int32 + for _, pa := range pas { + switch pa.PADataType { + case patype.PA_PW_SALT: + if paID > pa.PADataType { + continue + } + salt = string(pa.PADataValue) + case patype.PA_ETYPE_INFO: + if paID > pa.PADataType { + continue + } + var eti types.ETypeInfo + err := eti.Unmarshal(pa.PADataValue) + if err != nil { + return key, et, fmt.Errorf("error unmashaling PA Data to PA-ETYPE-INFO2: %v", err) + } + if etypeID != eti[0].EType { + et, err = GetEtype(eti[0].EType) + if err != nil { + return key, et, fmt.Errorf("error getting encryption type: %v", err) + } + } + salt = string(eti[0].Salt) + case patype.PA_ETYPE_INFO2: + if paID > pa.PADataType { + continue + } + var et2 types.ETypeInfo2 + err := et2.Unmarshal(pa.PADataValue) + if err != nil { + return key, et, fmt.Errorf("error unmashalling PA Data to PA-ETYPE-INFO2: %v", err) + } + if etypeID != et2[0].EType { + et, err = GetEtype(et2[0].EType) + if err != nil { + return key, et, fmt.Errorf("error getting encryption type: %v", err) + } + } + if len(et2[0].S2KParams) == 4 { + sk2p = hex.EncodeToString(et2[0].S2KParams) + } + salt = et2[0].Salt + } + } + if salt == "" { + salt = cname.GetSalt(realm) + } + k, err := et.StringToKey(passwd, salt, sk2p) + if err != nil { + return key, et, fmt.Errorf("error deriving key from string: %+v", err) + } + key = types.EncryptionKey{ + KeyType: etypeID, + KeyValue: k, + } + return key, et, nil +} + +// GetEncryptedData encrypts the data provided and returns and EncryptedData type. +// Pass a usage value of zero to use the key provided directly rather than deriving one. +func GetEncryptedData(plainBytes []byte, key types.EncryptionKey, usage uint32, kvno int) (types.EncryptedData, error) { + var ed types.EncryptedData + et, err := GetEtype(key.KeyType) + if err != nil { + return ed, fmt.Errorf("error getting etype: %v", err) + } + _, b, err := et.EncryptMessage(key.KeyValue, plainBytes, usage) + if err != nil { + return ed, err + } + + ed = types.EncryptedData{ + EType: key.KeyType, + Cipher: b, + KVNO: kvno, + } + return ed, nil +} + +// DecryptEncPart decrypts the EncryptedData. +func DecryptEncPart(ed types.EncryptedData, key types.EncryptionKey, usage uint32) ([]byte, error) { + return DecryptMessage(ed.Cipher, key, usage) +} + +// DecryptMessage decrypts the ciphertext and verifies the integrity. +func DecryptMessage(ciphertext []byte, key types.EncryptionKey, usage uint32) ([]byte, error) { + et, err := GetEtype(key.KeyType) + if err != nil { + return []byte{}, fmt.Errorf("error decrypting: %v", err) + } + b, err := et.DecryptMessage(key.KeyValue, ciphertext, usage) + if err != nil { + return nil, fmt.Errorf("error decrypting: %v", err) + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go new file mode 100644 index 00000000000..6e650eb6c19 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go @@ -0,0 +1,139 @@ +package crypto + +import ( + "crypto/des" + "crypto/hmac" + "crypto/sha1" + "errors" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3961" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +//RFC: 3961 Section 6.3 + +// Des3CbcSha1Kd implements Kerberos encryption type des3-cbc-hmac-sha1-kd +type Des3CbcSha1Kd struct { +} + +// GetETypeID returns the EType ID number. +func (e Des3CbcSha1Kd) GetETypeID() int32 { + return etypeID.DES3_CBC_SHA1_KD +} + +// GetHashID returns the checksum type ID number. +func (e Des3CbcSha1Kd) GetHashID() int32 { + return chksumtype.HMAC_SHA1_DES3_KD +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Des3CbcSha1Kd) GetKeyByteSize() int { + return 24 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Des3CbcSha1Kd) GetKeySeedBitLength() int { + return 21 * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Des3CbcSha1Kd) GetHashFunc() func() hash.Hash { + return sha1.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Des3CbcSha1Kd) GetMessageBlockByteSize() int { + //For traditional CBC mode with padding, it would be the underlying cipher's block size + return des.BlockSize +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Des3CbcSha1Kd) GetDefaultStringToKeyParams() string { + var s string + return s +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Des3CbcSha1Kd) GetConfounderByteSize() int { + return des.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Des3CbcSha1Kd) GetHMACBitLength() int { + return e.GetHashFunc()().Size() * 8 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Des3CbcSha1Kd) GetCypherBlockBitLength() int { + return des.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Des3CbcSha1Kd) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + if s2kparams != "" { + return []byte{}, errors.New("s2kparams must be an empty string") + } + return rfc3961.DES3StringToKey(secret, salt, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Des3CbcSha1Kd) RandomToKey(b []byte) []byte { + return rfc3961.DES3RandomToKey(b) +} + +// DeriveRandom generates data needed for key generation. +func (e Des3CbcSha1Kd) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + r, err := rfc3961.DeriveRandom(protocolKey, usage, e) + return r, err +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Des3CbcSha1Kd) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + r, err := e.DeriveRandom(protocolKey, usage) + if err != nil { + return nil, err + } + return e.RandomToKey(r), nil +} + +// EncryptData encrypts the data provided. +func (e Des3CbcSha1Kd) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc3961.DES3EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Des3CbcSha1Kd) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc3961.DES3EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Des3CbcSha1Kd) DecryptData(key, data []byte) ([]byte, error) { + return rfc3961.DES3DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Des3CbcSha1Kd) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc3961.DES3DecryptMessage(key, ciphertext, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e Des3CbcSha1Kd) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Des3CbcSha1Kd) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Des3CbcSha1Kd) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go new file mode 100644 index 00000000000..ab1496d3f67 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go @@ -0,0 +1,29 @@ +// Package etype provides the Kerberos Encryption Type interface +package etype + +import "hash" + +// EType is the interface defining the Encryption Type. +type EType interface { + GetETypeID() int32 + GetHashID() int32 + GetKeyByteSize() int + GetKeySeedBitLength() int + GetDefaultStringToKeyParams() string + StringToKey(string, salt, s2kparams string) ([]byte, error) + RandomToKey(b []byte) []byte + GetHMACBitLength() int + GetMessageBlockByteSize() int + EncryptData(key, data []byte) ([]byte, []byte, error) + EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) + DecryptData(key, data []byte) ([]byte, error) + DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) + GetCypherBlockBitLength() int + GetConfounderByteSize() int + DeriveKey(protocolKey, usage []byte) ([]byte, error) + DeriveRandom(protocolKey, usage []byte) ([]byte, error) + VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool + GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) + VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool + GetHashFunc() func() hash.Hash +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go new file mode 100644 index 00000000000..42f84b850b9 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go @@ -0,0 +1,133 @@ +package crypto + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "hash" + "io" + + "github.com/jcmturner/gokrb5/v8/crypto/rfc3961" + "github.com/jcmturner/gokrb5/v8/crypto/rfc4757" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" + "golang.org/x/crypto/md4" +) + +// RC4HMAC implements Kerberos encryption type rc4-hmac +type RC4HMAC struct { +} + +// GetETypeID returns the EType ID number. +func (e RC4HMAC) GetETypeID() int32 { + return etypeID.RC4_HMAC +} + +// GetHashID returns the checksum type ID number. +func (e RC4HMAC) GetHashID() int32 { + return chksumtype.KERB_CHECKSUM_HMAC_MD5 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e RC4HMAC) GetKeyByteSize() int { + return 16 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e RC4HMAC) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e RC4HMAC) GetHashFunc() func() hash.Hash { + return md5.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e RC4HMAC) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e RC4HMAC) GetDefaultStringToKeyParams() string { + return "" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e RC4HMAC) GetConfounderByteSize() int { + return 8 +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e RC4HMAC) GetHMACBitLength() int { + return md5.Size * 8 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e RC4HMAC) GetCypherBlockBitLength() int { + return 8 // doesn't really apply +} + +// StringToKey returns a key derived from the string provided. +func (e RC4HMAC) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + return rfc4757.StringToKey(secret) +} + +// RandomToKey returns a key from the bytes provided. +func (e RC4HMAC) RandomToKey(b []byte) []byte { + r := bytes.NewReader(b) + h := md4.New() + io.Copy(h, r) + return h.Sum(nil) +} + +// EncryptData encrypts the data provided. +func (e RC4HMAC) EncryptData(key, data []byte) ([]byte, []byte, error) { + b, err := rfc4757.EncryptData(key, data, e) + return []byte{}, b, err +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e RC4HMAC) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + b, err := rfc4757.EncryptMessage(key, message, usage, false, e) + return []byte{}, b, err +} + +// DecryptData decrypts the data provided. +func (e RC4HMAC) DecryptData(key, data []byte) ([]byte, error) { + return rfc4757.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e RC4HMAC) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc4757.DecryptMessage(key, ciphertext, usage, false, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e RC4HMAC) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc4757.HMAC(protocolKey, usage), nil +} + +// DeriveRandom generates data needed for key generation. +func (e RC4HMAC) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e RC4HMAC) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc4757.VerifyIntegrity(protocolKey, pt, ct, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e RC4HMAC) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return rfc4757.Checksum(protocolKey, usage, data) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e RC4HMAC) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + checksum, err := rfc4757.Checksum(protocolKey, usage, data) + if err != nil { + return false + } + return hmac.Equal(checksum, chksum) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go new file mode 100644 index 00000000000..1383258c7f1 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go @@ -0,0 +1,119 @@ +// Package rfc3961 provides encryption and checksum methods as specified in RFC 3961 +package rfc3961 + +import ( + "crypto/cipher" + "crypto/des" + "crypto/hmac" + "crypto/rand" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +// DES3EncryptData encrypts the data provided using DES3 and methods specific to the etype provided. +func DES3EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { + if len(key) != e.GetKeyByteSize() { + return nil, nil, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + data, _ = common.ZeroPad(data, e.GetMessageBlockByteSize()) + + block, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, nil, fmt.Errorf("error creating cipher: %v", err) + } + + //RFC 3961: initial cipher state All bits zero + ivz := make([]byte, des.BlockSize) + + ct := make([]byte, len(data)) + mode := cipher.NewCBCEncrypter(block, ivz) + mode.CryptBlocks(ct, data) + return ct[len(ct)-e.GetMessageBlockByteSize():], ct, nil +} + +// DES3EncryptMessage encrypts the message provided using DES3 and methods specific to the etype provided. +// The encrypted data is concatenated with its integrity hash to create an encrypted message. +func DES3EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { + //confounder + c := make([]byte, e.GetConfounderByteSize()) + _, err := rand.Read(c) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) + } + plainBytes := append(c, message...) + plainBytes, _ = common.ZeroPad(plainBytes, e.GetMessageBlockByteSize()) + + // Derive key for encryption from usage + var k []byte + if usage != 0 { + k, err = e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) + } + } + + iv, b, err := e.EncryptData(k, plainBytes) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + + // Generate and append integrity hash + ih, err := common.GetIntegrityHash(plainBytes, key, usage, e) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + b = append(b, ih...) + return iv, b, nil +} + +// DES3DecryptData decrypts the data provided using DES3 and methods specific to the etype provided. +func DES3DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + + if len(data) < des.BlockSize || len(data)%des.BlockSize != 0 { + return []byte{}, errors.New("ciphertext is not a multiple of the block size") + } + block, err := des.NewTripleDESCipher(key) + if err != nil { + return []byte{}, fmt.Errorf("error creating cipher: %v", err) + } + pt := make([]byte, len(data)) + ivz := make([]byte, des.BlockSize) + mode := cipher.NewCBCDecrypter(block, ivz) + mode.CryptBlocks(pt, data) + return pt, nil +} + +// DES3DecryptMessage decrypts the message provided using DES3 and methods specific to the etype provided. +// The integrity of the message is also verified. +func DES3DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { + //Derive the key + k, err := e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return nil, fmt.Errorf("error deriving key: %v", err) + } + // Strip off the checksum from the end + b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) + if err != nil { + return nil, fmt.Errorf("error decrypting: %v", err) + } + //Verify checksum + if !e.VerifyIntegrity(key, ciphertext, b, usage) { + return nil, errors.New("error decrypting: integrity verification failed") + } + //Remove the confounder bytes + return b[e.GetConfounderByteSize():], nil +} + +// VerifyIntegrity verifies the integrity of cipertext bytes ct. +func VerifyIntegrity(key, ct, pt []byte, usage uint32, etype etype.EType) bool { + h := make([]byte, etype.GetHMACBitLength()/8) + copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:]) + expectedMAC, _ := common.GetIntegrityHash(pt, key, usage, etype) + return hmac.Equal(h, expectedMAC) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go new file mode 100644 index 00000000000..ed9b169c5d9 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go @@ -0,0 +1,169 @@ +package rfc3961 + +import ( + "bytes" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +const ( + prfconstant = "prf" +) + +// DeriveRandom implements the RFC 3961 defined function: DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state)). +// +// key: base key or protocol key. Likely to be a key from a keytab file. +// +// usage: a constant. +// +// n: block size in bits (not bytes) - note if you use something like aes.BlockSize this is in bytes. +// +// k: key length / key seed length in bits. Eg. for AES256 this value is 256. +// +// e: the encryption etype function to use. +func DeriveRandom(key, usage []byte, e etype.EType) ([]byte, error) { + n := e.GetCypherBlockBitLength() + k := e.GetKeySeedBitLength() + //Ensure the usage constant is at least the size of the cypher block size. Pass it through the nfold algorithm that will "stretch" it if needs be. + nFoldUsage := Nfold(usage, n) + //k-truncate implemented by creating a byte array the size of k (k is in bits hence /8) + out := make([]byte, k/8) + // Keep feeding the output back into the encryption function until it is no longer short than k. + _, K, err := e.EncryptData(key, nFoldUsage) + if err != nil { + return out, err + } + for i := copy(out, K); i < len(out); { + _, K, _ = e.EncryptData(key, K) + i = i + copy(out[i:], K) + } + return out, nil +} + +// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods. +func DeriveKey(protocolKey, usage []byte, e etype.EType) ([]byte, error) { + r, err := e.DeriveRandom(protocolKey, usage) + if err != nil { + return nil, err + } + return e.RandomToKey(r), nil +} + +// RandomToKey returns a key from the bytes provided according to the definition in RFC 3961. +func RandomToKey(b []byte) []byte { + return b +} + +// DES3RandomToKey returns a key from the bytes provided according to the definition in RFC 3961 for DES3 etypes. +func DES3RandomToKey(b []byte) []byte { + r := fixWeakKey(stretch56Bits(b[:7])) + r2 := fixWeakKey(stretch56Bits(b[7:14])) + r = append(r, r2...) + r3 := fixWeakKey(stretch56Bits(b[14:21])) + r = append(r, r3...) + return r +} + +// DES3StringToKey returns a key derived from the string provided according to the definition in RFC 3961 for DES3 etypes. +func DES3StringToKey(secret, salt string, e etype.EType) ([]byte, error) { + s := secret + salt + tkey := e.RandomToKey(Nfold([]byte(s), e.GetKeySeedBitLength())) + return e.DeriveKey(tkey, []byte("kerberos")) +} + +// PseudoRandom function as defined in RFC 3961 +func PseudoRandom(key, b []byte, e etype.EType) ([]byte, error) { + h := e.GetHashFunc()() + h.Write(b) + tmp := h.Sum(nil)[:e.GetMessageBlockByteSize()] + k, err := e.DeriveKey(key, []byte(prfconstant)) + if err != nil { + return []byte{}, err + } + _, prf, err := e.EncryptData(k, tmp) + if err != nil { + return []byte{}, err + } + return prf, nil +} + +func stretch56Bits(b []byte) []byte { + d := make([]byte, len(b), len(b)) + copy(d, b) + var lb byte + for i, v := range d { + bv, nb := calcEvenParity(v) + d[i] = nb + if bv != 0 { + lb = lb | (1 << uint(i+1)) + } else { + lb = lb &^ (1 << uint(i+1)) + } + } + _, lb = calcEvenParity(lb) + d = append(d, lb) + return d +} + +func calcEvenParity(b byte) (uint8, uint8) { + lowestbit := b & 0x01 + // c counter of 1s in the first 7 bits of the byte + var c int + // Iterate over the highest 7 bits (hence p starts at 1 not zero) and count the 1s. + for p := 1; p < 8; p++ { + v := b & (1 << uint(p)) + if v != 0 { + c++ + } + } + if c%2 == 0 { + //Even number of 1s so set parity to 1 + b = b | 1 + } else { + //Odd number of 1s so set parity to 0 + b = b &^ 1 + } + return lowestbit, b +} + +func fixWeakKey(b []byte) []byte { + if weak(b) { + b[7] ^= 0xF0 + } + return b +} + +func weak(b []byte) bool { + // weak keys from https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-67r1.pdf + weakKeys := [4][]byte{ + {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + {0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE}, + {0xE0, 0xE0, 0xE0, 0xE0, 0xF1, 0xF1, 0xF1, 0xF1}, + {0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E}, + } + semiWeakKeys := [12][]byte{ + {0x01, 0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E}, + {0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E, 0x01}, + {0x01, 0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1}, + {0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1, 0x01}, + {0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE}, + {0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01}, + {0x1F, 0xE0, 0x1F, 0xE0, 0x0E, 0xF1, 0x0E, 0xF1}, + {0xE0, 0x1F, 0xE0, 0x1F, 0xF1, 0x0E, 0xF1, 0x0E}, + {0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E, 0xFE}, + {0xFE, 0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E}, + {0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE}, + {0xFE, 0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1}, + } + for _, k := range weakKeys { + if bytes.Equal(b, k) { + return true + } + } + for _, k := range semiWeakKeys { + if bytes.Equal(b, k) { + return true + } + } + return false +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go new file mode 100644 index 00000000000..9536b1e3e35 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go @@ -0,0 +1,107 @@ +package rfc3961 + +// Implementation of the n-fold algorithm as defined in RFC 3961. + +/* Credits +This golang implementation of nfold used the following project for help with implementation detail. +Although their source is in java it was helpful as a reference implementation of the RFC. +You can find the source code of their open source project along with license information below. +We acknowledge and are grateful to these developers for their contributions to open source + +Project: Apache Directory (http://http://directory.apache.org/) +https://svn.apache.org/repos/asf/directory/apacheds/tags/1.5.1/kerberos-shared/src/main/java/org/apache/directory/server/kerberos/shared/crypto/encryption/NFold.java +License: http://www.apache.org/licenses/LICENSE-2.0 +*/ + +// Nfold expands the key to ensure it is not smaller than one cipher block. +// Defined in RFC 3961. +// +// m input bytes that will be "stretched" to the least common multiple of n bits and the bit length of m. +func Nfold(m []byte, n int) []byte { + k := len(m) * 8 + + //Get the lowest common multiple of the two bit sizes + lcm := lcm(n, k) + relicate := lcm / k + var sumBytes []byte + + for i := 0; i < relicate; i++ { + rotation := 13 * i + sumBytes = append(sumBytes, rotateRight(m, rotation)...) + } + + nfold := make([]byte, n/8) + sum := make([]byte, n/8) + for i := 0; i < lcm/n; i++ { + for j := 0; j < n/8; j++ { + sum[j] = sumBytes[j+(i*len(sum))] + } + nfold = onesComplementAddition(nfold, sum) + } + return nfold +} + +func onesComplementAddition(n1, n2 []byte) []byte { + numBits := len(n1) * 8 + out := make([]byte, numBits/8) + carry := 0 + for i := numBits - 1; i > -1; i-- { + n1b := getBit(&n1, i) + n2b := getBit(&n2, i) + s := n1b + n2b + carry + + if s == 0 || s == 1 { + setBit(&out, i, s) + carry = 0 + } else if s == 2 { + carry = 1 + } else if s == 3 { + setBit(&out, i, 1) + carry = 1 + } + } + if carry == 1 { + carryArray := make([]byte, len(n1)) + carryArray[len(carryArray)-1] = 1 + out = onesComplementAddition(out, carryArray) + } + return out +} + +func rotateRight(b []byte, step int) []byte { + out := make([]byte, len(b)) + bitLen := len(b) * 8 + for i := 0; i < bitLen; i++ { + v := getBit(&b, i) + setBit(&out, (i+step)%bitLen, v) + } + return out +} + +func lcm(x, y int) int { + return (x * y) / gcd(x, y) +} + +func gcd(x, y int) int { + for y != 0 { + x, y = y, x%y + } + return x +} + +func getBit(b *[]byte, p int) int { + pByte := p / 8 + pBit := uint(p % 8) + vByte := (*b)[pByte] + vInt := int(vByte >> (8 - (pBit + 1)) & 0x0001) + return vInt +} + +func setBit(b *[]byte, p, v int) { + pByte := p / 8 + pBit := uint(p % 8) + oldByte := (*b)[pByte] + var newByte byte + newByte = byte(v<<(8-(pBit+1))) | oldByte + (*b)[pByte] = newByte +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go new file mode 100644 index 00000000000..5ff89e85b03 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go @@ -0,0 +1,89 @@ +// Package rfc3962 provides encryption and checksum methods as specified in RFC 3962 +package rfc3962 + +import ( + "crypto/rand" + "errors" + "fmt" + + "github.com/jcmturner/aescts/v2" + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 3962. +func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + ivz := make([]byte, e.GetCypherBlockBitLength()/8) + return aescts.Encrypt(key, ivz, data) +} + +// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 3962. +// The encrypted data is concatenated with its integrity hash to create an encrypted message. +func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + //confounder + c := make([]byte, e.GetConfounderByteSize()) + _, err := rand.Read(c) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) + } + plainBytes := append(c, message...) + + // Derive key for encryption from usage + var k []byte + if usage != 0 { + k, err = e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) + } + } + + // Encrypt the data + iv, b, err := e.EncryptData(k, plainBytes) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + + // Generate and append integrity hash + ih, err := common.GetIntegrityHash(plainBytes, key, usage, e) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + b = append(b, ih...) + return iv, b, nil +} + +// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 3962. +func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + ivz := make([]byte, e.GetCypherBlockBitLength()/8) + return aescts.Decrypt(key, ivz, data) +} + +// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 3962. +// The integrity of the message is also verified. +func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { + //Derive the key + k, err := e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return nil, fmt.Errorf("error deriving key: %v", err) + } + // Strip off the checksum from the end + b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) + if err != nil { + return nil, err + } + //Verify checksum + if !e.VerifyIntegrity(key, ciphertext, b, usage) { + return nil, errors.New("integrity verification failed") + } + //Remove the confounder bytes + return b[e.GetConfounderByteSize():], nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go new file mode 100644 index 00000000000..fb402d97b75 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go @@ -0,0 +1,51 @@ +package rfc3962 + +import ( + "encoding/binary" + "encoding/hex" + "errors" + + "github.com/jcmturner/gofork/x/crypto/pbkdf2" + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +const ( + s2kParamsZero = 4294967296 +) + +// StringToKey returns a key derived from the string provided according to the definition in RFC 3961. +func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) { + i, err := S2KparamsToItertions(s2kparams) + if err != nil { + return nil, err + } + return StringToKeyIter(secret, salt, i, e) +} + +// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0 +func StringToPBKDF2(secret, salt string, iterations int64, e etype.EType) []byte { + return pbkdf2.Key64([]byte(secret), []byte(salt), iterations, int64(e.GetKeyByteSize()), e.GetHashFunc()) +} + +// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 3961. +func StringToKeyIter(secret, salt string, iterations int64, e etype.EType) ([]byte, error) { + tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e)) + return e.DeriveKey(tkey, []byte("kerberos")) +} + +// S2KparamsToItertions converts the string representation of iterations to an integer +func S2KparamsToItertions(s2kparams string) (int64, error) { + //The s2kparams string should be hex string representing 4 bytes + //The 4 bytes represent a number in big endian order + //If the value is zero then the number of iterations should be 4,294,967,296 (2^32) + var i uint32 + if len(s2kparams) != 8 { + return int64(s2kParamsZero), errors.New("invalid s2kparams length") + } + b, err := hex.DecodeString(s2kparams) + if err != nil { + return int64(s2kParamsZero), errors.New("invalid s2kparams, cannot decode string to bytes") + } + i = binary.BigEndian.Uint32(b) + return int64(i), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go new file mode 100644 index 00000000000..45276e95322 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go @@ -0,0 +1,40 @@ +package rfc4757 + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "io" +) + +// Checksum returns a hash of the data in accordance with RFC 4757 +func Checksum(key []byte, usage uint32, data []byte) ([]byte, error) { + // Create hashing key + s := append([]byte(`signaturekey`), byte(0x00)) //includes zero octet at end + mac := hmac.New(md5.New, key) + mac.Write(s) + Ksign := mac.Sum(nil) + + // Format data + tb := UsageToMSMsgType(usage) + p := append(tb, data...) + h := md5.New() + rb := bytes.NewReader(p) + _, err := io.Copy(h, rb) + if err != nil { + return []byte{}, err + } + tmp := h.Sum(nil) + + // Generate HMAC + mac = hmac.New(md5.New, Ksign) + mac.Write(tmp) + return mac.Sum(nil), nil +} + +// HMAC returns a keyed MD5 checksum of the data +func HMAC(key []byte, data []byte) []byte { + mac := hmac.New(md5.New, key) + mac.Write(data) + return mac.Sum(nil) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go new file mode 100644 index 00000000000..fdebe736685 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go @@ -0,0 +1,80 @@ +// Package rfc4757 provides encryption and checksum methods as specified in RFC 4757 +package rfc4757 + +import ( + "crypto/hmac" + "crypto/rand" + "crypto/rc4" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 4757. +func EncryptData(key, data []byte, e etype.EType) ([]byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + rc4Cipher, err := rc4.NewCipher(key) + if err != nil { + return []byte{}, fmt.Errorf("error creating RC4 cipher: %v", err) + } + ed := make([]byte, len(data)) + copy(ed, data) + rc4Cipher.XORKeyStream(ed, ed) + rc4Cipher.Reset() + return ed, nil +} + +// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 4757. +func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + return EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 4757. +// The encrypted data is concatenated with its RC4 header containing integrity checksum and confounder to create an encrypted message. +func EncryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) { + confounder := make([]byte, e.GetConfounderByteSize()) // size = 8 + _, err := rand.Read(confounder) + if err != nil { + return []byte{}, fmt.Errorf("error generating confounder: %v", err) + } + k1 := key + k2 := HMAC(k1, UsageToMSMsgType(usage)) + toenc := append(confounder, data...) + chksum := HMAC(k2, toenc) + k3 := HMAC(k2, chksum) + + ed, err := EncryptData(k3, toenc, e) + if err != nil { + return []byte{}, fmt.Errorf("error encrypting data: %v", err) + } + + msg := append(chksum, ed...) + return msg, nil +} + +// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 4757. +// The integrity of the message is also verified. +func DecryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) { + checksum := data[:e.GetHMACBitLength()/8] + ct := data[e.GetHMACBitLength()/8:] + _, k2, k3 := deriveKeys(key, checksum, usage, export) + + pt, err := DecryptData(k3, ct, e) + if err != nil { + return []byte{}, fmt.Errorf("error decrypting data: %v", err) + } + + if !VerifyIntegrity(k2, pt, data, e) { + return []byte{}, errors.New("integrity checksum incorrect") + } + return pt[e.GetConfounderByteSize():], nil +} + +// VerifyIntegrity checks the integrity checksum of the data matches that calculated from the decrypted data. +func VerifyIntegrity(key, pt, data []byte, e etype.EType) bool { + chksum := HMAC(key, pt) + return hmac.Equal(chksum, data[:e.GetHMACBitLength()/8]) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go new file mode 100644 index 00000000000..d1f90c077d8 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go @@ -0,0 +1,40 @@ +package rfc4757 + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + + "golang.org/x/crypto/md4" +) + +// StringToKey returns a key derived from the string provided according to the definition in RFC 4757. +func StringToKey(secret string) ([]byte, error) { + b := make([]byte, len(secret)*2, len(secret)*2) + for i, r := range secret { + u := fmt.Sprintf("%04x", r) + c, err := hex.DecodeString(u) + if err != nil { + return []byte{}, errors.New("character could not be encoded") + } + // Swap round the two bytes to make little endian as we put into byte slice + b[2*i] = c[1] + b[2*i+1] = c[0] + } + r := bytes.NewReader(b) + h := md4.New() + _, err := io.Copy(h, r) + if err != nil { + return []byte{}, err + } + return h.Sum(nil), nil +} + +func deriveKeys(key, checksum []byte, usage uint32, export bool) (k1, k2, k3 []byte) { + k1 = key + k2 = HMAC(k1, UsageToMSMsgType(usage)) + k3 = HMAC(k2, checksum) + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go new file mode 100644 index 00000000000..068588d3baa --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go @@ -0,0 +1,20 @@ +package rfc4757 + +import "encoding/binary" + +// UsageToMSMsgType converts Kerberos key usage numbers to Microsoft message type encoded as a little-endian four byte slice. +func UsageToMSMsgType(usage uint32) []byte { + // Translate usage numbers to the Microsoft T numbers + switch usage { + case 3: + usage = 8 + case 9: + usage = 8 + case 23: + usage = 13 + } + // Now convert to bytes + tb := make([]byte, 4) // We force an int32 input so we can't go over 4 bytes + binary.PutUvarint(tb, uint64(usage)) + return tb +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go new file mode 100644 index 00000000000..54cff7b4629 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go @@ -0,0 +1,125 @@ +// Package rfc8009 provides encryption and checksum methods as specified in RFC 8009 +package rfc8009 + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/rand" + "errors" + "fmt" + + "github.com/jcmturner/aescts/v2" + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 8009. +func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + if len(key) != kl { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + ivz := make([]byte, aes.BlockSize) + return aescts.Encrypt(key, ivz, data) +} + +// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 8009. +// The encrypted data is concatenated with its integrity hash to create an encrypted message. +func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + if len(key) != kl { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key)) + } + if len(key) != e.GetKeyByteSize() { + } + //confounder + c := make([]byte, e.GetConfounderByteSize()) + _, err := rand.Read(c) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) + } + plainBytes := append(c, message...) + + // Derive key for encryption from usage + var k []byte + if usage != 0 { + k, err = e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) + } + } + + // Encrypt the data + iv, b, err := e.EncryptData(k, plainBytes) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + + ivz := make([]byte, e.GetConfounderByteSize()) + ih, err := GetIntegityHash(ivz, b, key, usage, e) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + b = append(b, ih...) + return iv, b, nil +} + +// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 8009. +func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + if len(key) != kl { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key)) + } + ivz := make([]byte, aes.BlockSize) + return aescts.Decrypt(key, ivz, data) +} + +// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 8009. +// The integrity of the message is also verified. +func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { + //Derive the key + k, err := e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return nil, fmt.Errorf("error deriving key: %v", err) + } + // Strip off the checksum from the end + b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) + if err != nil { + return nil, err + } + //Verify checksum + if !e.VerifyIntegrity(key, ciphertext, b, usage) { + return nil, errors.New("integrity verification failed") + } + //Remove the confounder bytes + return b[e.GetConfounderByteSize():], nil +} + +// GetIntegityHash returns a keyed integrity hash of the bytes provided as defined in RFC 8009 +func GetIntegityHash(iv, c, key []byte, usage uint32, e etype.EType) ([]byte, error) { + // Generate and append integrity hash + // Rather than calculating the hash over the confounder and plaintext + // it is calculated over the iv concatenated with the AES cipher output. + ib := append(iv, c...) + return common.GetIntegrityHash(ib, key, usage, e) +} + +// VerifyIntegrity verifies the integrity of cipertext bytes ct. +func VerifyIntegrity(key, ct []byte, usage uint32, etype etype.EType) bool { + h := make([]byte, etype.GetHMACBitLength()/8) + copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:]) + ivz := make([]byte, etype.GetConfounderByteSize()) + ib := append(ivz, ct[:len(ct)-(etype.GetHMACBitLength()/8)]...) + expectedMAC, _ := common.GetIntegrityHash(ib, key, usage, etype) + return hmac.Equal(h, expectedMAC) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go new file mode 100644 index 00000000000..e94732226c6 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go @@ -0,0 +1,135 @@ +package rfc8009 + +import ( + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" + "golang.org/x/crypto/pbkdf2" +) + +const ( + s2kParamsZero = 32768 +) + +// DeriveRandom for key derivation as defined in RFC 8009 +func DeriveRandom(protocolKey, usage []byte, e etype.EType) ([]byte, error) { + h := e.GetHashFunc()() + return KDF_HMAC_SHA2(protocolKey, []byte("prf"), usage, h.Size(), e), nil +} + +// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods. +// +// https://tools.ietf.org/html/rfc8009#section-5 +func DeriveKey(protocolKey, label []byte, e etype.EType) []byte { + var context []byte + var kl int + // Key length is longer for aes256-cts-hmac-sha384-192 is it is a Ke or from StringToKey (where label is "kerberos") + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + Swtch: + switch label[len(label)-1] { + case 0x73: + // 0x73 is "s" so label could be kerberos meaning StringToKey so now check if the label is "kerberos" + kerblabel := []byte("kerberos") + if len(label) != len(kerblabel) { + break + } + for i, b := range label { + if b != kerblabel[i] { + kl = e.GetKeySeedBitLength() + break Swtch + } + } + if kl == 0 { + // This is StringToKey + kl = 256 + } + case 0xAA: + // This is a Ke + kl = 256 + } + } + if kl == 0 { + kl = e.GetKeySeedBitLength() + } + return e.RandomToKey(KDF_HMAC_SHA2(protocolKey, label, context, kl, e)) +} + +// RandomToKey returns a key from the bytes provided according to the definition in RFC 8009. +func RandomToKey(b []byte) []byte { + return b +} + +// StringToKey returns a key derived from the string provided according to the definition in RFC 8009. +func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) { + i, err := S2KparamsToItertions(s2kparams) + if err != nil { + return nil, err + } + return StringToKeyIter(secret, salt, i, e) +} + +// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 8009. +func StringToKeyIter(secret, salt string, iterations int, e etype.EType) ([]byte, error) { + tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e)) + return e.DeriveKey(tkey, []byte("kerberos")) +} + +// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0 +func StringToPBKDF2(secret, salt string, iterations int, e etype.EType) []byte { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + return pbkdf2.Key([]byte(secret), []byte(salt), iterations, kl, e.GetHashFunc()) +} + +// KDF_HMAC_SHA2 key derivation: https://tools.ietf.org/html/rfc8009#section-3 +func KDF_HMAC_SHA2(protocolKey, label, context []byte, kl int, e etype.EType) []byte { + //k: Length in bits of the key to be outputted, expressed in big-endian binary representation in 4 bytes. + k := make([]byte, 4, 4) + binary.BigEndian.PutUint32(k, uint32(kl)) + + c := make([]byte, 4, 4) + binary.BigEndian.PutUint32(c, uint32(1)) + c = append(c, label...) + c = append(c, byte(0)) + if len(context) > 0 { + c = append(c, context...) + } + c = append(c, k...) + + mac := hmac.New(e.GetHashFunc(), protocolKey) + mac.Write(c) + return mac.Sum(nil)[:(kl / 8)] +} + +// GetSaltP returns the salt value based on the etype name: https://tools.ietf.org/html/rfc8009#section-4 +func GetSaltP(salt, ename string) string { + b := []byte(ename) + b = append(b, byte(0)) + b = append(b, []byte(salt)...) + return string(b) +} + +// S2KparamsToItertions converts the string representation of iterations to an integer for RFC 8009. +func S2KparamsToItertions(s2kparams string) (int, error) { + var i uint32 + if len(s2kparams) != 8 { + return s2kParamsZero, errors.New("Invalid s2kparams length") + } + b, err := hex.DecodeString(s2kparams) + if err != nil { + return s2kParamsZero, errors.New("Invalid s2kparams, cannot decode string to bytes") + } + i = binary.BigEndian.Uint32(b) + //buf := bytes.NewBuffer(b) + //err = binary.Read(buf, binary.BigEndian, &i) + if err != nil { + return s2kParamsZero, errors.New("Invalid s2kparams, cannot convert to big endian int32") + } + return int(i), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go new file mode 100644 index 00000000000..ab8daa2897b --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go @@ -0,0 +1,174 @@ +package gssapi + +import ( + "bytes" + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/types" +) + +// RFC 4121, section 4.2.6.1 + +const ( + // MICTokenFlagSentByAcceptor - this flag indicates the sender is the context acceptor. When not set, it indicates the sender is the context initiator + MICTokenFlagSentByAcceptor = 1 << iota + // MICTokenFlagSealed - this flag indicates confidentiality is provided for. It SHALL NOT be set in MIC tokens + MICTokenFlagSealed + // MICTokenFlagAcceptorSubkey - a subkey asserted by the context acceptor is used to protect the message + MICTokenFlagAcceptorSubkey +) + +const ( + micHdrLen = 16 // Length of the MIC Token's header +) + +// MICToken represents a GSS API MIC token, as defined in RFC 4121. +// It contains the header fields, the payload (this is not transmitted) and +// the checksum, and provides the logic for converting to/from bytes plus +// computing and verifying checksums +type MICToken struct { + // const GSS Token ID: 0x0404 + Flags byte // contains three flags: acceptor, sealed, acceptor subkey + // const Filler: 0xFF 0xFF 0xFF 0xFF 0xFF + SndSeqNum uint64 // sender's sequence number. big-endian + Payload []byte // your data! :) + Checksum []byte // checksum of { payload | header } +} + +// Return the 2 bytes identifying a GSS API MIC token +func getGSSMICTokenID() *[2]byte { + return &[2]byte{0x04, 0x04} +} + +// Return the filler bytes used in header +func fillerBytes() *[5]byte { + return &[5]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF} +} + +// Marshal the MICToken into a byte slice. +// The payload should have been set and the checksum computed, otherwise an error is returned. +func (mt *MICToken) Marshal() ([]byte, error) { + if mt.Checksum == nil { + return nil, errors.New("checksum has not been set") + } + + bytes := make([]byte, micHdrLen+len(mt.Checksum)) + copy(bytes[0:micHdrLen], mt.getMICChecksumHeader()[:]) + copy(bytes[micHdrLen:], mt.Checksum) + + return bytes, nil +} + +// SetChecksum uses the passed encryption key and key usage to compute the checksum over the payload and +// the header, and sets the Checksum field of this MICToken. +// If the payload has not been set or the checksum has already been set, an error is returned. +func (mt *MICToken) SetChecksum(key types.EncryptionKey, keyUsage uint32) error { + if mt.Checksum != nil { + return errors.New("checksum has already been computed") + } + checksum, err := mt.checksum(key, keyUsage) + if err != nil { + return err + } + mt.Checksum = checksum + return nil +} + +// Compute and return the checksum of this token, computed using the passed key and key usage. +// Note: This will NOT update the struct's Checksum field. +func (mt *MICToken) checksum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) { + if mt.Payload == nil { + return nil, errors.New("cannot compute checksum with uninitialized payload") + } + d := make([]byte, micHdrLen+len(mt.Payload)) + copy(d[0:], mt.Payload) + copy(d[len(mt.Payload):], mt.getMICChecksumHeader()) + + encType, err := crypto.GetEtype(key.KeyType) + if err != nil { + return nil, err + } + return encType.GetChecksumHash(key.KeyValue, d, keyUsage) +} + +// Build a header suitable for a checksum computation +func (mt *MICToken) getMICChecksumHeader() []byte { + header := make([]byte, micHdrLen) + copy(header[0:2], getGSSMICTokenID()[:]) + header[2] = mt.Flags + copy(header[3:8], fillerBytes()[:]) + binary.BigEndian.PutUint64(header[8:16], mt.SndSeqNum) + return header +} + +// Verify computes the token's checksum with the provided key and usage, +// and compares it to the checksum present in the token. +// In case of any failure, (false, err) is returned, with err an explanatory error. +func (mt *MICToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) { + computed, err := mt.checksum(key, keyUsage) + if err != nil { + return false, err + } + if !hmac.Equal(computed, mt.Checksum) { + return false, fmt.Errorf( + "checksum mismatch. Computed: %s, Contained in token: %s", + hex.EncodeToString(computed), hex.EncodeToString(mt.Checksum)) + } + return true, nil +} + +// Unmarshal bytes into the corresponding MICToken. +// If expectFromAcceptor is true we expect the token to have been emitted by the gss acceptor, +// and will check the according flag, returning an error if the token does not match the expectation. +func (mt *MICToken) Unmarshal(b []byte, expectFromAcceptor bool) error { + if len(b) < micHdrLen { + return errors.New("bytes shorter than header length") + } + if !bytes.Equal(getGSSMICTokenID()[:], b[0:2]) { + return fmt.Errorf("wrong Token ID, Expected %s, was %s", + hex.EncodeToString(getGSSMICTokenID()[:]), + hex.EncodeToString(b[0:2])) + } + flags := b[2] + isFromAcceptor := flags&MICTokenFlagSentByAcceptor != 0 + if isFromAcceptor && !expectFromAcceptor { + return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor") + } + if !isFromAcceptor && expectFromAcceptor { + return errors.New("unexpected acceptor flag is not set: expecting a token from the acceptor, not in the initiator") + } + if !bytes.Equal(b[3:8], fillerBytes()[:]) { + return fmt.Errorf("unexpected filler bytes: expecting %s, was %s", + hex.EncodeToString(fillerBytes()[:]), + hex.EncodeToString(b[3:8])) + } + + mt.Flags = flags + mt.SndSeqNum = binary.BigEndian.Uint64(b[8:16]) + mt.Checksum = b[micHdrLen:] + return nil +} + +// NewInitiatorMICToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum. +// Other flags are set to 0. +// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier. +// This is currently not supported. +func NewInitiatorMICToken(payload []byte, key types.EncryptionKey) (*MICToken, error) { + token := MICToken{ + Flags: 0x00, + SndSeqNum: 0, + Payload: payload, + } + + if err := token.SetChecksum(key, keyusage.GSSAPI_INITIATOR_SIGN); err != nil { + return nil, err + } + + return &token, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md new file mode 100644 index 00000000000..8fdcf70c329 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md @@ -0,0 +1,20 @@ +# Notes on GSS-API Negotiation Mechanism +https://tools.ietf.org/html/rfc4178 + +Client sends an initial negotiation message to the server which specifies the list of mechanisms +the client can support in order of decreasing preference. +This message is generated with the ``NewNegTokenInitKrb5`` method. +The message generated by this function specifies only a kerberos v5 mechanism is supported. + +The RFC states that this message can optionally contain the initial mechanism token +for the preferred mechanism (KRB5 in this case) of the client. The ``NewNegTokenInitKrb5`` +includes this in the message. + +The server side responds to this message with a one of four messages: + +| Message Type/State | Description | +|--------------------|-------------| +| accept-completed | indicates that the initiator-selected mechanism was acceptable to the target, and that the security mechanism token embedded in the first negotiation message was sufficient to complete the authentication | +| accept-incomplete | At least one more message is needed from the client to establish security context. | +| reject | Negotiation is being terminated. | +| request-mic | (this state can only be present in the first reply message from the target) indicates that the MIC token exchange is REQUIRED if per-message integrity services are available | \ No newline at end of file diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go new file mode 100644 index 00000000000..8c91859b89f --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go @@ -0,0 +1,27 @@ +package gssapi + +import "github.com/jcmturner/gofork/encoding/asn1" + +// GSS-API context flags assigned numbers. +const ( + ContextFlagDeleg = 1 + ContextFlagMutual = 2 + ContextFlagReplay = 4 + ContextFlagSequence = 8 + ContextFlagConf = 16 + ContextFlagInteg = 32 + ContextFlagAnon = 64 +) + +// ContextFlags flags for GSSAPI +// DEPRECATED - do not use +type ContextFlags asn1.BitString + +// NewContextFlags creates a new ContextFlags instance +// DEPRECATED - do not use +func NewContextFlags() ContextFlags { + var c ContextFlags + c.BitLength = 32 + c.Bytes = make([]byte, 4) + return c +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go new file mode 100644 index 00000000000..80822319022 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go @@ -0,0 +1,202 @@ +// Package gssapi implements Generic Security Services Application Program Interface required for SPNEGO kerberos authentication. +package gssapi + +import ( + "context" + "fmt" + + "github.com/jcmturner/gofork/encoding/asn1" +) + +// GSS-API OID names +const ( + // GSS-API OID names + OIDKRB5 OIDName = "KRB5" // MechType OID for Kerberos 5 + OIDMSLegacyKRB5 OIDName = "MSLegacyKRB5" // MechType OID for Kerberos 5 + OIDSPNEGO OIDName = "SPNEGO" + OIDGSSIAKerb OIDName = "GSSIAKerb" // Indicates the client cannot get a service ticket and asks the server to serve as an intermediate to the target KDC. http://k5wiki.kerberos.org/wiki/Projects/IAKERB#IAKERB_mech +) + +// GSS-API status values +const ( + StatusBadBindings = 1 << iota + StatusBadMech + StatusBadName + StatusBadNameType + StatusBadStatus + StatusBadSig + StatusBadMIC + StatusContextExpired + StatusCredentialsExpired + StatusDefectiveCredential + StatusDefectiveToken + StatusFailure + StatusNoContext + StatusNoCred + StatusBadQOP + StatusUnauthorized + StatusUnavailable + StatusDuplicateElement + StatusNameNotMN + StatusComplete + StatusContinueNeeded + StatusDuplicateToken + StatusOldToken + StatusUnseqToken + StatusGapToken +) + +// ContextToken is an interface for a GSS-API context token. +type ContextToken interface { + Marshal() ([]byte, error) + Unmarshal(b []byte) error + Verify() (bool, Status) + Context() context.Context +} + +/* +CREDENTIAL MANAGEMENT + +GSS_Acquire_cred acquire credentials for use +GSS_Release_cred release credentials after use +GSS_Inquire_cred display information about credentials +GSS_Add_cred construct credentials incrementally +GSS_Inquire_cred_by_mech display per-mechanism credential information + +CONTEXT-LEVEL CALLS + +GSS_Init_sec_context initiate outbound security context +GSS_Accept_sec_context accept inbound security context +GSS_Delete_sec_context flush context when no longer needed +GSS_Process_context_token process received control token on context +GSS_Context_time indicate validity time remaining on context +GSS_Inquire_context display information about context +GSS_Wrap_size_limit determine GSS_Wrap token size limit +GSS_Export_sec_context transfer context to other process +GSS_Import_sec_context import transferred context + +PER-MESSAGE CALLS + +GSS_GetMIC apply integrity check, receive as token separate from message +GSS_VerifyMIC validate integrity check token along with message +GSS_Wrap sign, optionally encrypt, encapsulate +GSS_Unwrap decapsulate, decrypt if needed, validate integrity check + +SUPPORT CALLS + +GSS_Display_status translate status codes to printable form +GSS_Indicate_mechs indicate mech_types supported on local system +GSS_Compare_name compare two names for equality +GSS_Display_name translate name to printable form +GSS_Import_name convert printable name to normalized form +GSS_Release_name free storage of normalized-form name +GSS_Release_buffer free storage of general GSS-allocated object +GSS_Release_OID_set free storage of OID set object +GSS_Create_empty_OID_set create empty OID set +GSS_Add_OID_set_member add member to OID set +GSS_Test_OID_set_member test if OID is member of OID set +GSS_Inquire_names_for_mech indicate name types supported by mechanism +GSS_Inquire_mechs_for_name indicates mechanisms supporting name type +GSS_Canonicalize_name translate name to per-mechanism form +GSS_Export_name externalize per-mechanism name +GSS_Duplicate_name duplicate name object +*/ + +// Mechanism is the GSS-API interface for authentication mechanisms. +type Mechanism interface { + OID() asn1.ObjectIdentifier + AcquireCred() error // acquire credentials for use (eg. AS exchange for KRB5) + InitSecContext() (ContextToken, error) // initiate outbound security context (eg TGS exchange builds AP_REQ to go into ContextToken to send to service) + AcceptSecContext(ct ContextToken) (bool, context.Context, Status) // service verifies the token server side to establish a context + MIC() MICToken // apply integrity check, receive as token separate from message + VerifyMIC(mt MICToken) (bool, error) // validate integrity check token along with message + Wrap(msg []byte) WrapToken // sign, optionally encrypt, encapsulate + Unwrap(wt WrapToken) []byte // decapsulate, decrypt if needed, validate integrity check +} + +// OIDName is the type for defined GSS-API OIDs. +type OIDName string + +// OID returns the OID for the provided OID name. +func (o OIDName) OID() asn1.ObjectIdentifier { + switch o { + case OIDSPNEGO: + return asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 2} + case OIDKRB5: + return asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} + case OIDMSLegacyKRB5: + return asn1.ObjectIdentifier{1, 2, 840, 48018, 1, 2, 2} + case OIDGSSIAKerb: + return asn1.ObjectIdentifier{1, 3, 6, 1, 5, 2, 5} + } + return asn1.ObjectIdentifier{} +} + +// Status is the GSS-API status and implements the error interface. +type Status struct { + Code int + Message string +} + +// Error returns the Status description. +func (s Status) Error() string { + var str string + switch s.Code { + case StatusBadBindings: + str = "channel binding mismatch" + case StatusBadMech: + str = "unsupported mechanism requested" + case StatusBadName: + str = "invalid name provided" + case StatusBadNameType: + str = "name of unsupported type provided" + case StatusBadStatus: + str = "invalid input status selector" + case StatusBadSig: + str = "token had invalid integrity check" + case StatusBadMIC: + str = "preferred alias for GSS_S_BAD_SIG" + case StatusContextExpired: + str = "specified security context expired" + case StatusCredentialsExpired: + str = "expired credentials detected" + case StatusDefectiveCredential: + str = "defective credential detected" + case StatusDefectiveToken: + str = "defective token detected" + case StatusFailure: + str = "failure, unspecified at GSS-API level" + case StatusNoContext: + str = "no valid security context specified" + case StatusNoCred: + str = "no valid credentials provided" + case StatusBadQOP: + str = "unsupported QOP valu" + case StatusUnauthorized: + str = "operation unauthorized" + case StatusUnavailable: + str = "operation unavailable" + case StatusDuplicateElement: + str = "duplicate credential element requested" + case StatusNameNotMN: + str = "name contains multi-mechanism elements" + case StatusComplete: + str = "normal completion" + case StatusContinueNeeded: + str = "continuation call to routine required" + case StatusDuplicateToken: + str = "duplicate per-message token detected" + case StatusOldToken: + str = "timed-out per-message token detected" + case StatusUnseqToken: + str = "reordered (early) per-message token detected" + case StatusGapToken: + str = "skipped predecessor token(s) detected" + default: + str = "unknown GSS-API error status" + } + if s.Message != "" { + return fmt.Sprintf("%s: %s", str, s.Message) + } + return str +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go new file mode 100644 index 00000000000..ea7d0543e09 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go @@ -0,0 +1,195 @@ +package gssapi + +import ( + "bytes" + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/types" +) + +// RFC 4121, section 4.2.6.2 + +const ( + // HdrLen is the length of the Wrap Token's header + HdrLen = 16 + // FillerByte is a filler in the WrapToken structure + FillerByte byte = 0xFF +) + +// WrapToken represents a GSS API Wrap token, as defined in RFC 4121. +// It contains the header fields, the payload and the checksum, and provides +// the logic for converting to/from bytes plus computing and verifying checksums +type WrapToken struct { + // const GSS Token ID: 0x0504 + Flags byte // contains three flags: acceptor, sealed, acceptor subkey + // const Filler: 0xFF + EC uint16 // checksum length. big-endian + RRC uint16 // right rotation count. big-endian + SndSeqNum uint64 // sender's sequence number. big-endian + Payload []byte // your data! :) + CheckSum []byte // authenticated checksum of { payload | header } +} + +// Return the 2 bytes identifying a GSS API Wrap token +func getGssWrapTokenId() *[2]byte { + return &[2]byte{0x05, 0x04} +} + +// Marshal the WrapToken into a byte slice. +// The payload should have been set and the checksum computed, otherwise an error is returned. +func (wt *WrapToken) Marshal() ([]byte, error) { + if wt.CheckSum == nil { + return nil, errors.New("checksum has not been set") + } + if wt.Payload == nil { + return nil, errors.New("payload has not been set") + } + + pldOffset := HdrLen // Offset of the payload in the token + chkSOffset := HdrLen + len(wt.Payload) // Offset of the checksum in the token + + bytes := make([]byte, chkSOffset+int(wt.EC)) + copy(bytes[0:], getGssWrapTokenId()[:]) + bytes[2] = wt.Flags + bytes[3] = FillerByte + binary.BigEndian.PutUint16(bytes[4:6], wt.EC) + binary.BigEndian.PutUint16(bytes[6:8], wt.RRC) + binary.BigEndian.PutUint64(bytes[8:16], wt.SndSeqNum) + copy(bytes[pldOffset:], wt.Payload) + copy(bytes[chkSOffset:], wt.CheckSum) + return bytes, nil +} + +// SetCheckSum uses the passed encryption key and key usage to compute the checksum over the payload and +// the header, and sets the CheckSum field of this WrapToken. +// If the payload has not been set or the checksum has already been set, an error is returned. +func (wt *WrapToken) SetCheckSum(key types.EncryptionKey, keyUsage uint32) error { + if wt.Payload == nil { + return errors.New("payload has not been set") + } + if wt.CheckSum != nil { + return errors.New("checksum has already been computed") + } + chkSum, cErr := wt.computeCheckSum(key, keyUsage) + if cErr != nil { + return cErr + } + wt.CheckSum = chkSum + return nil +} + +// ComputeCheckSum computes and returns the checksum of this token, computed using the passed key and key usage. +// Note: This will NOT update the struct's Checksum field. +func (wt *WrapToken) computeCheckSum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) { + if wt.Payload == nil { + return nil, errors.New("cannot compute checksum with uninitialized payload") + } + // Build a slice containing { payload | header } + checksumMe := make([]byte, HdrLen+len(wt.Payload)) + copy(checksumMe[0:], wt.Payload) + copy(checksumMe[len(wt.Payload):], getChecksumHeader(wt.Flags, wt.SndSeqNum)) + + encType, err := crypto.GetEtype(key.KeyType) + if err != nil { + return nil, err + } + return encType.GetChecksumHash(key.KeyValue, checksumMe, keyUsage) +} + +// Build a header suitable for a checksum computation +func getChecksumHeader(flags byte, senderSeqNum uint64) []byte { + header := make([]byte, 16) + copy(header[0:], []byte{0x05, 0x04, flags, 0xFF, 0x00, 0x00, 0x00, 0x00}) + binary.BigEndian.PutUint64(header[8:], senderSeqNum) + return header +} + +// Verify computes the token's checksum with the provided key and usage, +// and compares it to the checksum present in the token. +// In case of any failure, (false, Err) is returned, with Err an explanatory error. +func (wt *WrapToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) { + computed, cErr := wt.computeCheckSum(key, keyUsage) + if cErr != nil { + return false, cErr + } + if !hmac.Equal(computed, wt.CheckSum) { + return false, fmt.Errorf( + "checksum mismatch. Computed: %s, Contained in token: %s", + hex.EncodeToString(computed), hex.EncodeToString(wt.CheckSum)) + } + return true, nil +} + +// Unmarshal bytes into the corresponding WrapToken. +// If expectFromAcceptor is true, we expect the token to have been emitted by the gss acceptor, +// and will check the according flag, returning an error if the token does not match the expectation. +func (wt *WrapToken) Unmarshal(b []byte, expectFromAcceptor bool) error { + // Check if we can read a whole header + if len(b) < 16 { + return errors.New("bytes shorter than header length") + } + // Is the Token ID correct? + if !bytes.Equal(getGssWrapTokenId()[:], b[0:2]) { + return fmt.Errorf("wrong Token ID. Expected %s, was %s", + hex.EncodeToString(getGssWrapTokenId()[:]), + hex.EncodeToString(b[0:2])) + } + // Check the acceptor flag + flags := b[2] + isFromAcceptor := flags&0x01 == 1 + if isFromAcceptor && !expectFromAcceptor { + return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor") + } + if !isFromAcceptor && expectFromAcceptor { + return errors.New("expected acceptor flag is not set: expecting a token from the acceptor, not the initiator") + } + // Check the filler byte + if b[3] != FillerByte { + return fmt.Errorf("unexpected filler byte: expecting 0xFF, was %s ", hex.EncodeToString(b[3:4])) + } + checksumL := binary.BigEndian.Uint16(b[4:6]) + // Sanity check on the checksum length + if int(checksumL) > len(b)-HdrLen { + return fmt.Errorf("inconsistent checksum length: %d bytes to parse, checksum length is %d", len(b), checksumL) + } + + wt.Flags = flags + wt.EC = checksumL + wt.RRC = binary.BigEndian.Uint16(b[6:8]) + wt.SndSeqNum = binary.BigEndian.Uint64(b[8:16]) + wt.Payload = b[16 : len(b)-int(checksumL)] + wt.CheckSum = b[len(b)-int(checksumL):] + return nil +} + +// NewInitiatorWrapToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum. +// Other flags are set to 0, and the RRC and sequence number are initialized to 0. +// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier. +// This is currently not supported. +func NewInitiatorWrapToken(payload []byte, key types.EncryptionKey) (*WrapToken, error) { + encType, err := crypto.GetEtype(key.KeyType) + if err != nil { + return nil, err + } + + token := WrapToken{ + Flags: 0x00, // all zeroed out (this is a token sent by the initiator) + // Checksum size: length of output of the HMAC function, in bytes. + EC: uint16(encType.GetHMACBitLength() / 8), + RRC: 0, + SndSeqNum: 0, + Payload: payload, + } + + if err := token.SetCheckSum(key, keyusage.GSSAPI_INITIATOR_SEAL); err != nil { + return nil, err + } + + return &token, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go new file mode 100644 index 00000000000..457b89d7ace --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go @@ -0,0 +1,15 @@ +// Package addrtype provides Address type assigned numbers. +package addrtype + +// Address type IDs. +const ( + IPv4 int32 = 2 + Directional int32 = 3 + ChaosNet int32 = 5 + XNS int32 = 6 + ISO int32 = 7 + DECNETPhaseIV int32 = 12 + AppleTalkDDP int32 = 16 + NetBios int32 = 20 + IPv6 int32 = 24 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go new file mode 100644 index 00000000000..e805b746660 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go @@ -0,0 +1,23 @@ +// Package adtype provides Authenticator type assigned numbers. +package adtype + +// Authenticator type IDs. +const ( + ADIfRelevant int32 = 1 + ADIntendedForServer int32 = 2 + ADIntendedForApplicationClass int32 = 3 + ADKDCIssued int32 = 4 + ADAndOr int32 = 5 + ADMandatoryTicketExtensions int32 = 6 + ADInTicketExtensions int32 = 7 + ADMandatoryForKDC int32 = 8 + OSFDCE int32 = 64 + SESAME int32 = 65 + ADOSFDCEPKICertID int32 = 66 + ADAuthenticationStrength int32 = 70 + ADFXFastArmor int32 = 71 + ADFXFastUsed int32 = 72 + ADWin2KPAC int32 = 128 + ADEtypeNegotiation int32 = 129 + //Reserved values 9-63 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go new file mode 100644 index 00000000000..d74cd60e93e --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go @@ -0,0 +1,24 @@ +// Package asnAppTag provides ASN1 application tag numbers. +package asnAppTag + +// ASN1 application tag numbers. +const ( + Ticket = 1 + Authenticator = 2 + EncTicketPart = 3 + ASREQ = 10 + TGSREQ = 12 + ASREP = 11 + TGSREP = 13 + APREQ = 14 + APREP = 15 + KRBSafe = 20 + KRBPriv = 21 + KRBCred = 22 + EncASRepPart = 25 + EncTGSRepPart = 26 + EncAPRepPart = 27 + EncKrbPrivPart = 28 + EncKrbCredPart = 29 + KRBError = 30 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go new file mode 100644 index 00000000000..93db952dda1 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go @@ -0,0 +1,32 @@ +// Package chksumtype provides Kerberos 5 checksum type assigned numbers. +package chksumtype + +// Checksum type IDs. +const ( + //RESERVED : 0 + CRC32 int32 = 1 + RSA_MD4 int32 = 2 + RSA_MD4_DES int32 = 3 + DES_MAC int32 = 4 + DES_MAC_K int32 = 5 + RSA_MD4_DES_K int32 = 6 + RSA_MD5 int32 = 7 + RSA_MD5_DES int32 = 8 + RSA_MD5_DES3 int32 = 9 + SHA1_ID10 int32 = 10 + //UNASSIGNED : 11 + HMAC_SHA1_DES3_KD int32 = 12 + HMAC_SHA1_DES3 int32 = 13 + SHA1_ID14 int32 = 14 + HMAC_SHA1_96_AES128 int32 = 15 + HMAC_SHA1_96_AES256 int32 = 16 + CMAC_CAMELLIA128 int32 = 17 + CMAC_CAMELLIA256 int32 = 18 + HMAC_SHA256_128_AES128 int32 = 19 + HMAC_SHA384_192_AES256 int32 = 20 + //UNASSIGNED : 21-32770 + GSSAPI int32 = 32771 + //UNASSIGNED : 32772-2147483647 + KERB_CHECKSUM_HMAC_MD5_UNSIGNED uint32 = 4294967158 // 0xFFFFFF76 documentation says this is -138 but in an unsigned int this is 4294967158 + KERB_CHECKSUM_HMAC_MD5 int32 = -138 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go new file mode 100644 index 00000000000..0b8e916d5b6 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go @@ -0,0 +1,5 @@ +// Package iana provides Kerberos 5 assigned numbers. +package iana + +// PVNO is the Protocol Version Number. +const PVNO = 5 diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go new file mode 100644 index 00000000000..fd756bc5e36 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go @@ -0,0 +1,155 @@ +// Package errorcode provides Kerberos 5 assigned error codes. +package errorcode + +import "fmt" + +// Kerberos error codes. +const ( + KDC_ERR_NONE int32 = 0 //No error + KDC_ERR_NAME_EXP int32 = 1 //Client's entry in database has expired + KDC_ERR_SERVICE_EXP int32 = 2 //Server's entry in database has expired + KDC_ERR_BAD_PVNO int32 = 3 //Requested protocol version number not supported + KDC_ERR_C_OLD_MAST_KVNO int32 = 4 //Client's key encrypted in old master key + KDC_ERR_S_OLD_MAST_KVNO int32 = 5 //Server's key encrypted in old master key + KDC_ERR_C_PRINCIPAL_UNKNOWN int32 = 6 //Client not found in Kerberos database + KDC_ERR_S_PRINCIPAL_UNKNOWN int32 = 7 //Server not found in Kerberos database + KDC_ERR_PRINCIPAL_NOT_UNIQUE int32 = 8 //Multiple principal entries in database + KDC_ERR_NULL_KEY int32 = 9 //The client or server has a null key + KDC_ERR_CANNOT_POSTDATE int32 = 10 //Ticket not eligible for postdating + KDC_ERR_NEVER_VALID int32 = 11 //Requested starttime is later than end time + KDC_ERR_POLICY int32 = 12 //KDC policy rejects request + KDC_ERR_BADOPTION int32 = 13 //KDC cannot accommodate requested option + KDC_ERR_ETYPE_NOSUPP int32 = 14 //KDC has no support for encryption type + KDC_ERR_SUMTYPE_NOSUPP int32 = 15 //KDC has no support for checksum type + KDC_ERR_PADATA_TYPE_NOSUPP int32 = 16 //KDC has no support for padata type + KDC_ERR_TRTYPE_NOSUPP int32 = 17 //KDC has no support for transited type + KDC_ERR_CLIENT_REVOKED int32 = 18 //Clients credentials have been revoked + KDC_ERR_SERVICE_REVOKED int32 = 19 //Credentials for server have been revoked + KDC_ERR_TGT_REVOKED int32 = 20 //TGT has been revoked + KDC_ERR_CLIENT_NOTYET int32 = 21 //Client not yet valid; try again later + KDC_ERR_SERVICE_NOTYET int32 = 22 //Server not yet valid; try again later + KDC_ERR_KEY_EXPIRED int32 = 23 //Password has expired; change password to reset + KDC_ERR_PREAUTH_FAILED int32 = 24 //Pre-authentication information was invalid + KDC_ERR_PREAUTH_REQUIRED int32 = 25 //Additional pre-authentication required + KDC_ERR_SERVER_NOMATCH int32 = 26 //Requested server and ticket don't match + KDC_ERR_MUST_USE_USER2USER int32 = 27 //Server principal valid for user2user only + KDC_ERR_PATH_NOT_ACCEPTED int32 = 28 //KDC Policy rejects transited path + KDC_ERR_SVC_UNAVAILABLE int32 = 29 //A service is not available + KRB_AP_ERR_BAD_INTEGRITY int32 = 31 //Integrity check on decrypted field failed + KRB_AP_ERR_TKT_EXPIRED int32 = 32 //Ticket expired + KRB_AP_ERR_TKT_NYV int32 = 33 //Ticket not yet valid + KRB_AP_ERR_REPEAT int32 = 34 //Request is a replay + KRB_AP_ERR_NOT_US int32 = 35 //The ticket isn't for us + KRB_AP_ERR_BADMATCH int32 = 36 //Ticket and authenticator don't match + KRB_AP_ERR_SKEW int32 = 37 //Clock skew too great + KRB_AP_ERR_BADADDR int32 = 38 //Incorrect net address + KRB_AP_ERR_BADVERSION int32 = 39 //Protocol version mismatch + KRB_AP_ERR_MSG_TYPE int32 = 40 //Invalid msg type + KRB_AP_ERR_MODIFIED int32 = 41 //Message stream modified + KRB_AP_ERR_BADORDER int32 = 42 //Message out of order + KRB_AP_ERR_BADKEYVER int32 = 44 //Specified version of key is not available + KRB_AP_ERR_NOKEY int32 = 45 //Service key not available + KRB_AP_ERR_MUT_FAIL int32 = 46 //Mutual authentication failed + KRB_AP_ERR_BADDIRECTION int32 = 47 //Incorrect message direction + KRB_AP_ERR_METHOD int32 = 48 //Alternative authentication method required + KRB_AP_ERR_BADSEQ int32 = 49 //Incorrect sequence number in message + KRB_AP_ERR_INAPP_CKSUM int32 = 50 //Inappropriate type of checksum in message + KRB_AP_PATH_NOT_ACCEPTED int32 = 51 //Policy rejects transited path + KRB_ERR_RESPONSE_TOO_BIG int32 = 52 //Response too big for UDP; retry with TCP + KRB_ERR_GENERIC int32 = 60 //Generic error (description in e-text) + KRB_ERR_FIELD_TOOLONG int32 = 61 //Field is too long for this implementation + KDC_ERROR_CLIENT_NOT_TRUSTED int32 = 62 //Reserved for PKINIT + KDC_ERROR_KDC_NOT_TRUSTED int32 = 63 //Reserved for PKINIT + KDC_ERROR_INVALID_SIG int32 = 64 //Reserved for PKINIT + KDC_ERR_KEY_TOO_WEAK int32 = 65 //Reserved for PKINIT + KDC_ERR_CERTIFICATE_MISMATCH int32 = 66 //Reserved for PKINIT + KRB_AP_ERR_NO_TGT int32 = 67 //No TGT available to validate USER-TO-USER + KDC_ERR_WRONG_REALM int32 = 68 //Reserved for future use + KRB_AP_ERR_USER_TO_USER_REQUIRED int32 = 69 //Ticket must be for USER-TO-USER + KDC_ERR_CANT_VERIFY_CERTIFICATE int32 = 70 //Reserved for PKINIT + KDC_ERR_INVALID_CERTIFICATE int32 = 71 //Reserved for PKINIT + KDC_ERR_REVOKED_CERTIFICATE int32 = 72 //Reserved for PKINIT + KDC_ERR_REVOCATION_STATUS_UNKNOWN int32 = 73 //Reserved for PKINIT + KDC_ERR_REVOCATION_STATUS_UNAVAILABLE int32 = 74 //Reserved for PKINIT + KDC_ERR_CLIENT_NAME_MISMATCH int32 = 75 //Reserved for PKINIT + KDC_ERR_KDC_NAME_MISMATCH int32 = 76 //Reserved for PKINIT +) + +// Lookup an error code description. +func Lookup(i int32) string { + if s, ok := errorcodeLookup[i]; ok { + return fmt.Sprintf("(%d) %s", i, s) + } + return fmt.Sprintf("Unknown ErrorCode %d", i) +} + +var errorcodeLookup = map[int32]string{ + KDC_ERR_NONE: "KDC_ERR_NONE No error", + KDC_ERR_NAME_EXP: "KDC_ERR_NAME_EXP Client's entry in database has expired", + KDC_ERR_SERVICE_EXP: "KDC_ERR_SERVICE_EXP Server's entry in database has expired", + KDC_ERR_BAD_PVNO: "KDC_ERR_BAD_PVNO Requested protocol version number not supported", + KDC_ERR_C_OLD_MAST_KVNO: "KDC_ERR_C_OLD_MAST_KVNO Client's key encrypted in old master key", + KDC_ERR_S_OLD_MAST_KVNO: "KDC_ERR_S_OLD_MAST_KVNO Server's key encrypted in old master key", + KDC_ERR_C_PRINCIPAL_UNKNOWN: "KDC_ERR_C_PRINCIPAL_UNKNOWN Client not found in Kerberos database", + KDC_ERR_S_PRINCIPAL_UNKNOWN: "KDC_ERR_S_PRINCIPAL_UNKNOWN Server not found in Kerberos database", + KDC_ERR_PRINCIPAL_NOT_UNIQUE: "KDC_ERR_PRINCIPAL_NOT_UNIQUE Multiple principal entries in database", + KDC_ERR_NULL_KEY: "KDC_ERR_NULL_KEY The client or server has a null key", + KDC_ERR_CANNOT_POSTDATE: "KDC_ERR_CANNOT_POSTDATE Ticket not eligible for postdating", + KDC_ERR_NEVER_VALID: "KDC_ERR_NEVER_VALID Requested starttime is later than end time", + KDC_ERR_POLICY: "KDC_ERR_POLICY KDC policy rejects request", + KDC_ERR_BADOPTION: "KDC_ERR_BADOPTION KDC cannot accommodate requested option", + KDC_ERR_ETYPE_NOSUPP: "KDC_ERR_ETYPE_NOSUPP KDC has no support for encryption type", + KDC_ERR_SUMTYPE_NOSUPP: "KDC_ERR_SUMTYPE_NOSUPP KDC has no support for checksum type", + KDC_ERR_PADATA_TYPE_NOSUPP: "KDC_ERR_PADATA_TYPE_NOSUPP KDC has no support for padata type", + KDC_ERR_TRTYPE_NOSUPP: "KDC_ERR_TRTYPE_NOSUPP KDC has no support for transited type", + KDC_ERR_CLIENT_REVOKED: "KDC_ERR_CLIENT_REVOKED Clients credentials have been revoked", + KDC_ERR_SERVICE_REVOKED: "KDC_ERR_SERVICE_REVOKED Credentials for server have been revoked", + KDC_ERR_TGT_REVOKED: "KDC_ERR_TGT_REVOKED TGT has been revoked", + KDC_ERR_CLIENT_NOTYET: "KDC_ERR_CLIENT_NOTYET Client not yet valid; try again later", + KDC_ERR_SERVICE_NOTYET: "KDC_ERR_SERVICE_NOTYET Server not yet valid; try again later", + KDC_ERR_KEY_EXPIRED: "KDC_ERR_KEY_EXPIRED Password has expired; change password to reset", + KDC_ERR_PREAUTH_FAILED: "KDC_ERR_PREAUTH_FAILED Pre-authentication information was invalid", + KDC_ERR_PREAUTH_REQUIRED: "KDC_ERR_PREAUTH_REQUIRED Additional pre-authentication required", + KDC_ERR_SERVER_NOMATCH: "KDC_ERR_SERVER_NOMATCH Requested server and ticket don't match", + KDC_ERR_MUST_USE_USER2USER: "KDC_ERR_MUST_USE_USER2USER Server principal valid for user2user only", + KDC_ERR_PATH_NOT_ACCEPTED: "KDC_ERR_PATH_NOT_ACCEPTED KDC Policy rejects transited path", + KDC_ERR_SVC_UNAVAILABLE: "KDC_ERR_SVC_UNAVAILABLE A service is not available", + KRB_AP_ERR_BAD_INTEGRITY: "KRB_AP_ERR_BAD_INTEGRITY Integrity check on decrypted field failed", + KRB_AP_ERR_TKT_EXPIRED: "KRB_AP_ERR_TKT_EXPIRED Ticket expired", + KRB_AP_ERR_TKT_NYV: "KRB_AP_ERR_TKT_NYV Ticket not yet valid", + KRB_AP_ERR_REPEAT: "KRB_AP_ERR_REPEAT Request is a replay", + KRB_AP_ERR_NOT_US: "KRB_AP_ERR_NOT_US The ticket isn't for us", + KRB_AP_ERR_BADMATCH: "KRB_AP_ERR_BADMATCH Ticket and authenticator don't match", + KRB_AP_ERR_SKEW: "KRB_AP_ERR_SKEW Clock skew too great", + KRB_AP_ERR_BADADDR: "KRB_AP_ERR_BADADDR Incorrect net address", + KRB_AP_ERR_BADVERSION: "KRB_AP_ERR_BADVERSION Protocol version mismatch", + KRB_AP_ERR_MSG_TYPE: "KRB_AP_ERR_MSG_TYPE Invalid msg type", + KRB_AP_ERR_MODIFIED: "KRB_AP_ERR_MODIFIED Message stream modified", + KRB_AP_ERR_BADORDER: "KRB_AP_ERR_BADORDER Message out of order", + KRB_AP_ERR_BADKEYVER: "KRB_AP_ERR_BADKEYVER Specified version of key is not available", + KRB_AP_ERR_NOKEY: "KRB_AP_ERR_NOKEY Service key not available", + KRB_AP_ERR_MUT_FAIL: "KRB_AP_ERR_MUT_FAIL Mutual authentication failed", + KRB_AP_ERR_BADDIRECTION: "KRB_AP_ERR_BADDIRECTION Incorrect message direction", + KRB_AP_ERR_METHOD: "KRB_AP_ERR_METHOD Alternative authentication method required", + KRB_AP_ERR_BADSEQ: "KRB_AP_ERR_BADSEQ Incorrect sequence number in message", + KRB_AP_ERR_INAPP_CKSUM: "KRB_AP_ERR_INAPP_CKSUM Inappropriate type of checksum in message", + KRB_AP_PATH_NOT_ACCEPTED: "KRB_AP_PATH_NOT_ACCEPTED Policy rejects transited path", + KRB_ERR_RESPONSE_TOO_BIG: "KRB_ERR_RESPONSE_TOO_BIG Response too big for UDP; retry with TCP", + KRB_ERR_GENERIC: "KRB_ERR_GENERIC Generic error (description in e-text)", + KRB_ERR_FIELD_TOOLONG: "KRB_ERR_FIELD_TOOLONG Field is too long for this implementation", + KDC_ERROR_CLIENT_NOT_TRUSTED: "KDC_ERROR_CLIENT_NOT_TRUSTED Reserved for PKINIT", + KDC_ERROR_KDC_NOT_TRUSTED: "KDC_ERROR_KDC_NOT_TRUSTED Reserved for PKINIT", + KDC_ERROR_INVALID_SIG: "KDC_ERROR_INVALID_SIG Reserved for PKINIT", + KDC_ERR_KEY_TOO_WEAK: "KDC_ERR_KEY_TOO_WEAK Reserved for PKINIT", + KDC_ERR_CERTIFICATE_MISMATCH: "KDC_ERR_CERTIFICATE_MISMATCH Reserved for PKINIT", + KRB_AP_ERR_NO_TGT: "KRB_AP_ERR_NO_TGT No TGT available to validate USER-TO-USER", + KDC_ERR_WRONG_REALM: "KDC_ERR_WRONG_REALM Reserved for future use", + KRB_AP_ERR_USER_TO_USER_REQUIRED: "KRB_AP_ERR_USER_TO_USER_REQUIRED Ticket must be for USER-TO-USER", + KDC_ERR_CANT_VERIFY_CERTIFICATE: "KDC_ERR_CANT_VERIFY_CERTIFICATE Reserved for PKINIT", + KDC_ERR_INVALID_CERTIFICATE: "KDC_ERR_INVALID_CERTIFICATE Reserved for PKINIT", + KDC_ERR_REVOKED_CERTIFICATE: "KDC_ERR_REVOKED_CERTIFICATE Reserved for PKINIT", + KDC_ERR_REVOCATION_STATUS_UNKNOWN: "KDC_ERR_REVOCATION_STATUS_UNKNOWN Reserved for PKINIT", + KDC_ERR_REVOCATION_STATUS_UNAVAILABLE: "KDC_ERR_REVOCATION_STATUS_UNAVAILABLE Reserved for PKINIT", + KDC_ERR_CLIENT_NAME_MISMATCH: "KDC_ERR_CLIENT_NAME_MISMATCH Reserved for PKINIT", + KDC_ERR_KDC_NAME_MISMATCH: "KDC_ERR_KDC_NAME_MISMATCH Reserved for PKINIT", +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go new file mode 100644 index 00000000000..46a0d748fb7 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go @@ -0,0 +1,101 @@ +// Package etypeID provides Kerberos 5 encryption type assigned numbers. +package etypeID + +// Kerberos encryption type assigned numbers. +const ( + //RESERVED : 0 + DES_CBC_CRC int32 = 1 + DES_CBC_MD4 int32 = 2 + DES_CBC_MD5 int32 = 3 + DES_CBC_RAW int32 = 4 + DES3_CBC_MD5 int32 = 5 + DES3_CBC_RAW int32 = 6 + DES3_CBC_SHA1 int32 = 7 + DES_HMAC_SHA1 int32 = 8 + DSAWITHSHA1_CMSOID int32 = 9 + MD5WITHRSAENCRYPTION_CMSOID int32 = 10 + SHA1WITHRSAENCRYPTION_CMSOID int32 = 11 + RC2CBC_ENVOID int32 = 12 + RSAENCRYPTION_ENVOID int32 = 13 + RSAES_OAEP_ENV_OID int32 = 14 + DES_EDE3_CBC_ENV_OID int32 = 15 + DES3_CBC_SHA1_KD int32 = 16 + AES128_CTS_HMAC_SHA1_96 int32 = 17 + AES256_CTS_HMAC_SHA1_96 int32 = 18 + AES128_CTS_HMAC_SHA256_128 int32 = 19 + AES256_CTS_HMAC_SHA384_192 int32 = 20 + //UNASSIGNED : 21-22 + RC4_HMAC int32 = 23 + RC4_HMAC_EXP int32 = 24 + CAMELLIA128_CTS_CMAC int32 = 25 + CAMELLIA256_CTS_CMAC int32 = 26 + //UNASSIGNED : 27-64 + SUBKEY_KEYMATERIAL int32 = 65 + //UNASSIGNED : 66-2147483647 +) + +// ETypesByName is a map of EncType names to their assigned EncType number. +var ETypesByName = map[string]int32{ + "des-cbc-crc": DES_CBC_CRC, + "des-cbc-md4": DES_CBC_MD4, + "des-cbc-md5": DES_CBC_MD5, + "des-cbc-raw": DES_CBC_RAW, + "des3-cbc-md5": DES3_CBC_MD5, + "des3-cbc-raw": DES3_CBC_RAW, + "des3-cbc-sha1": DES3_CBC_SHA1, + "des3-hmac-sha1": DES_HMAC_SHA1, + "des3-cbc-sha1-kd": DES3_CBC_SHA1_KD, + "des-hmac-sha1": DES_HMAC_SHA1, + "dsaWithSHA1-CmsOID": DSAWITHSHA1_CMSOID, + "md5WithRSAEncryption-CmsOID": MD5WITHRSAENCRYPTION_CMSOID, + "sha1WithRSAEncryption-CmsOID": SHA1WITHRSAENCRYPTION_CMSOID, + "rc2CBC-EnvOID": RC2CBC_ENVOID, + "rsaEncryption-EnvOID": RSAENCRYPTION_ENVOID, + "rsaES-OAEP-ENV-OID": RSAES_OAEP_ENV_OID, + "des-ede3-cbc-Env-OID": DES_EDE3_CBC_ENV_OID, + "aes128-cts-hmac-sha1-96": AES128_CTS_HMAC_SHA1_96, + "aes128-cts": AES128_CTS_HMAC_SHA1_96, + "aes128-sha1": AES128_CTS_HMAC_SHA1_96, + "aes256-cts-hmac-sha1-96": AES256_CTS_HMAC_SHA1_96, + "aes256-cts": AES256_CTS_HMAC_SHA1_96, + "aes256-sha1": AES256_CTS_HMAC_SHA1_96, + "aes128-cts-hmac-sha256-128": AES128_CTS_HMAC_SHA256_128, + "aes128-sha2": AES128_CTS_HMAC_SHA256_128, + "aes256-cts-hmac-sha384-192": AES256_CTS_HMAC_SHA384_192, + "aes256-sha2": AES256_CTS_HMAC_SHA384_192, + "arcfour-hmac": RC4_HMAC, + "rc4-hmac": RC4_HMAC, + "arcfour-hmac-md5": RC4_HMAC, + "arcfour-hmac-exp": RC4_HMAC_EXP, + "rc4-hmac-exp": RC4_HMAC_EXP, + "arcfour-hmac-md5-exp": RC4_HMAC_EXP, + "camellia128-cts-cmac": CAMELLIA128_CTS_CMAC, + "camellia128-cts": CAMELLIA128_CTS_CMAC, + "camellia256-cts-cmac": CAMELLIA256_CTS_CMAC, + "camellia256-cts": CAMELLIA256_CTS_CMAC, + "subkey-keymaterial": SUBKEY_KEYMATERIAL, +} + +// EtypeSupported resolves the etype name string to the etype ID. +// If zero is returned the etype is not supported by gokrb5. +func EtypeSupported(etype string) int32 { + // Slice of supported enctype IDs + s := []int32{ + AES128_CTS_HMAC_SHA1_96, + AES256_CTS_HMAC_SHA1_96, + AES128_CTS_HMAC_SHA256_128, + AES256_CTS_HMAC_SHA384_192, + DES3_CBC_SHA1_KD, + RC4_HMAC, + } + id := ETypesByName[etype] + if id == 0 { + return id + } + for _, sid := range s { + if id == sid { + return id + } + } + return 0 +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go new file mode 100644 index 00000000000..787801f8f99 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go @@ -0,0 +1,36 @@ +// Package flags provides Kerberos 5 flag assigned numbers. +package flags + +// Flag values for KRB5 messages and tickets. +const ( + Reserved = 0 + Forwardable = 1 + Forwarded = 2 + Proxiable = 3 + Proxy = 4 + AllowPostDate = 5 + MayPostDate = 5 + PostDated = 6 + Invalid = 7 + Renewable = 8 + Initial = 9 + PreAuthent = 10 + HWAuthent = 11 + OptHardwareAuth = 11 + RequestAnonymous = 12 + TransitedPolicyChecked = 12 + OKAsDelegate = 13 + EncPARep = 15 + Canonicalize = 15 + DisableTransitedCheck = 26 + RenewableOK = 27 + EncTktInSkey = 28 + Renew = 30 + Validate = 31 + + // AP Option Flags + // 0 Reserved for future use. + APOptionUseSessionKey = 1 + APOptionMutualRequired = 2 + // 3-31 Reserved for future use. +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go new file mode 100644 index 00000000000..5b232d1d40d --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go @@ -0,0 +1,42 @@ +// Package keyusage provides Kerberos 5 key usage assigned numbers. +package keyusage + +// Key usage numbers. +const ( + AS_REQ_PA_ENC_TIMESTAMP = 1 + KDC_REP_TICKET = 2 + AS_REP_ENCPART = 3 + TGS_REQ_KDC_REQ_BODY_AUTHDATA_SESSION_KEY = 4 + TGS_REQ_KDC_REQ_BODY_AUTHDATA_SUB_KEY = 5 + TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM = 6 + TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR = 7 + TGS_REP_ENCPART_SESSION_KEY = 8 + TGS_REP_ENCPART_AUTHENTICATOR_SUB_KEY = 9 + AP_REQ_AUTHENTICATOR_CHKSUM = 10 + AP_REQ_AUTHENTICATOR = 11 + AP_REP_ENCPART = 12 + KRB_PRIV_ENCPART = 13 + KRB_CRED_ENCPART = 14 + KRB_SAFE_CHKSUM = 15 + KERB_NON_KERB_SALT = 16 + KERB_NON_KERB_CKSUM_SALT = 17 + //18. Reserved for future use in Kerberos and related protocols. + AD_KDC_ISSUED_CHKSUM = 19 + //20-21. Reserved for future use in Kerberos and related protocols. + GSSAPI_ACCEPTOR_SEAL = 22 + GSSAPI_ACCEPTOR_SIGN = 23 + GSSAPI_INITIATOR_SEAL = 24 + GSSAPI_INITIATOR_SIGN = 25 + KEY_USAGE_FAST_REQ_CHKSUM = 50 + KEY_USAGE_FAST_ENC = 51 + KEY_USAGE_FAST_REP = 52 + KEY_USAGE_FAST_FINISHED = 53 + KEY_USAGE_ENC_CHALLENGE_CLIENT = 54 + KEY_USAGE_ENC_CHALLENGE_KDC = 55 + KEY_USAGE_AS_REQ = 56 + //26-511. Reserved for future use in Kerberos and related protocols. + //512-1023. Reserved for uses internal to a Kerberos implementation. + //1024. Encryption for application use in protocols that do not specify key usage values + //1025. Checksums for application use in protocols that do not specify key usage values + //1026-2047. Reserved for application use. +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go new file mode 100644 index 00000000000..ad21810b675 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go @@ -0,0 +1,18 @@ +// Package msgtype provides Kerberos 5 message type assigned numbers. +package msgtype + +// KRB message type IDs. +const ( + KRB_AS_REQ = 10 //Request for initial authentication + KRB_AS_REP = 11 //Response to KRB_AS_REQ request + KRB_TGS_REQ = 12 //Request for authentication based on TGT + KRB_TGS_REP = 13 //Response to KRB_TGS_REQ request + KRB_AP_REQ = 14 //Application request to server + KRB_AP_REP = 15 //Response to KRB_AP_REQ_MUTUAL + KRB_RESERVED16 = 16 //Reserved for user-to-user krb_tgt_request + KRB_RESERVED17 = 17 //Reserved for user-to-user krb_tgt_reply + KRB_SAFE = 20 // Safe (checksummed) application message + KRB_PRIV = 21 // Private (encrypted) application message + KRB_CRED = 22 //Private (encrypted) message to forward credentials + KRB_ERROR = 30 //Error response +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go new file mode 100644 index 00000000000..c111a05f926 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go @@ -0,0 +1,15 @@ +// Package nametype provides Kerberos 5 principal name type numbers. +package nametype + +// Kerberos name type IDs. +const ( + KRB_NT_UNKNOWN int32 = 0 //Name type not known + KRB_NT_PRINCIPAL int32 = 1 //Just the name of the principal as in DCE, or for users + KRB_NT_SRV_INST int32 = 2 //Service and other unique instance (krbtgt) + KRB_NT_SRV_HST int32 = 3 //Service with host name as instance (telnet, rcommands) + KRB_NT_SRV_XHST int32 = 4 //Service with host as remaining components + KRB_NT_UID int32 = 5 //Unique ID + KRB_NT_X500_PRINCIPAL int32 = 6 //Encoded X.509 Distinguished name [RFC2253] + KRB_NT_SMTP_NAME int32 = 7 //Name in form of SMTP email name (e.g., user@example.com) + KRB_NT_ENTERPRISE int32 = 10 //Enterprise name; may be mapped to principal name +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go new file mode 100644 index 00000000000..aa04f63765c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go @@ -0,0 +1,77 @@ +// Package patype provides Kerberos 5 pre-authentication type assigned numbers. +package patype + +// Kerberos pre-authentication type assigned numbers. +const ( + PA_TGS_REQ int32 = 1 + PA_ENC_TIMESTAMP int32 = 2 + PA_PW_SALT int32 = 3 + //RESERVED : 4 + PA_ENC_UNIX_TIME int32 = 5 + PA_SANDIA_SECUREID int32 = 6 + PA_SESAME int32 = 7 + PA_OSF_DCE int32 = 8 + PA_CYBERSAFE_SECUREID int32 = 9 + PA_AFS3_SALT int32 = 10 + PA_ETYPE_INFO int32 = 11 + PA_SAM_CHALLENGE int32 = 12 + PA_SAM_RESPONSE int32 = 13 + PA_PK_AS_REQ_OLD int32 = 14 + PA_PK_AS_REP_OLD int32 = 15 + PA_PK_AS_REQ int32 = 16 + PA_PK_AS_REP int32 = 17 + PA_PK_OCSP_RESPONSE int32 = 18 + PA_ETYPE_INFO2 int32 = 19 + PA_USE_SPECIFIED_KVNO int32 = 20 + PA_SVR_REFERRAL_INFO int32 = 20 + PA_SAM_REDIRECT int32 = 21 + PA_GET_FROM_TYPED_DATA int32 = 22 + TD_PADATA int32 = 22 + PA_SAM_ETYPE_INFO int32 = 23 + PA_ALT_PRINC int32 = 24 + PA_SERVER_REFERRAL int32 = 25 + //UNASSIGNED : 26-29 + PA_SAM_CHALLENGE2 int32 = 30 + PA_SAM_RESPONSE2 int32 = 31 + //UNASSIGNED : 32-40 + PA_EXTRA_TGT int32 = 41 + //UNASSIGNED : 42-100 + TD_PKINIT_CMS_CERTIFICATES int32 = 101 + TD_KRB_PRINCIPAL int32 = 102 + TD_KRB_REALM int32 = 103 + TD_TRUSTED_CERTIFIERS int32 = 104 + TD_CERTIFICATE_INDEX int32 = 105 + TD_APP_DEFINED_ERROR int32 = 106 + TD_REQ_NONCE int32 = 107 + TD_REQ_SEQ int32 = 108 + TD_DH_PARAMETERS int32 = 109 + //UNASSIGNED : 110 + TD_CMS_DIGEST_ALGORITHMS int32 = 111 + TD_CERT_DIGEST_ALGORITHMS int32 = 112 + //UNASSIGNED : 113-127 + PA_PAC_REQUEST int32 = 128 + PA_FOR_USER int32 = 129 + PA_FOR_X509_USER int32 = 130 + PA_FOR_CHECK_DUPS int32 = 131 + PA_AS_CHECKSUM int32 = 132 + PA_FX_COOKIE int32 = 133 + PA_AUTHENTICATION_SET int32 = 134 + PA_AUTH_SET_SELECTED int32 = 135 + PA_FX_FAST int32 = 136 + PA_FX_ERROR int32 = 137 + PA_ENCRYPTED_CHALLENGE int32 = 138 + //UNASSIGNED : 139-140 + PA_OTP_CHALLENGE int32 = 141 + PA_OTP_REQUEST int32 = 142 + PA_OTP_CONFIRM int32 = 143 + PA_OTP_PIN_CHANGE int32 = 144 + PA_EPAK_AS_REQ int32 = 145 + PA_EPAK_AS_REP int32 = 146 + PA_PKINIT_KX int32 = 147 + PA_PKU2U_NAME int32 = 148 + PA_REQ_ENC_PA_REP int32 = 149 + PA_AS_FRESHNESS int32 = 150 + //UNASSIGNED : 151-164 + PA_SUPPORTED_ETYPES int32 = 165 + PA_EXTENDED_ERROR int32 = 166 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go new file mode 100644 index 00000000000..2d68eda1924 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go @@ -0,0 +1,23 @@ +package kadmin + +import ( + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/types" +) + +// ChangePasswdData is the payload to a password change message. +type ChangePasswdData struct { + NewPasswd []byte `asn1:"explicit,tag:0"` + TargName types.PrincipalName `asn1:"explicit,optional,tag:1"` + TargRealm string `asn1:"generalstring,optional,explicit,tag:2"` +} + +// Marshal ChangePasswdData into a byte slice. +func (c *ChangePasswdData) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*c) + if err != nil { + return []byte{}, err + } + //b = asn1tools.AddASNAppTag(b, asnAppTag.) + return b, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go new file mode 100644 index 00000000000..d1864c998be --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go @@ -0,0 +1,114 @@ +package kadmin + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + verisonHex = "ff80" +) + +// Request message for changing password. +type Request struct { + APREQ messages.APReq + KRBPriv messages.KRBPriv +} + +// Reply message for a password change. +type Reply struct { + MessageLength int + Version int + APREPLength int + APREP messages.APRep + KRBPriv messages.KRBPriv + KRBError messages.KRBError + IsKRBError bool + ResultCode uint16 + Result string +} + +// Marshal a Request into a byte slice. +func (m *Request) Marshal() (b []byte, err error) { + b = []byte{255, 128} // protocol version number: contains the hex constant 0xff80 (big-endian integer). + ab, e := m.APREQ.Marshal() + if e != nil { + err = fmt.Errorf("error marshaling AP_REQ: %v", e) + return + } + if len(ab) > math.MaxUint16 { + err = errors.New("length of AP_REQ greater then max Uint16 size") + return + } + al := make([]byte, 2) + binary.BigEndian.PutUint16(al, uint16(len(ab))) + b = append(b, al...) + b = append(b, ab...) + pb, e := m.KRBPriv.Marshal() + if e != nil { + err = fmt.Errorf("error marshaling KRB_Priv: %v", e) + return + } + b = append(b, pb...) + if len(b)+2 > math.MaxUint16 { + err = errors.New("length of message greater then max Uint16 size") + return + } + ml := make([]byte, 2) + binary.BigEndian.PutUint16(ml, uint16(len(b)+2)) + b = append(ml, b...) + return +} + +// Unmarshal a byte slice into a Reply. +func (m *Reply) Unmarshal(b []byte) error { + m.MessageLength = int(binary.BigEndian.Uint16(b[0:2])) + m.Version = int(binary.BigEndian.Uint16(b[2:4])) + if m.Version != 1 { + return fmt.Errorf("kadmin reply has incorrect protocol version number: %d", m.Version) + } + m.APREPLength = int(binary.BigEndian.Uint16(b[4:6])) + if m.APREPLength != 0 { + err := m.APREP.Unmarshal(b[6 : 6+m.APREPLength]) + if err != nil { + return err + } + err = m.KRBPriv.Unmarshal(b[6+m.APREPLength : m.MessageLength]) + if err != nil { + return err + } + } else { + m.IsKRBError = true + m.KRBError.Unmarshal(b[6:m.MessageLength]) + m.ResultCode, m.Result = parseResponse(m.KRBError.EData) + } + return nil +} + +func parseResponse(b []byte) (c uint16, s string) { + c = binary.BigEndian.Uint16(b[0:2]) + buf := bytes.NewBuffer(b[2:]) + m := make([]byte, len(b)-2) + binary.Read(buf, binary.BigEndian, &m) + s = string(m) + return +} + +// Decrypt the encrypted part of the KRBError within the change password Reply. +func (m *Reply) Decrypt(key types.EncryptionKey) error { + if m.IsKRBError { + return m.KRBError + } + err := m.KRBPriv.DecryptEncPart(key) + if err != nil { + return err + } + m.ResultCode, m.Result = parseResponse(m.KRBPriv.DecryptedEncPart.UserData) + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go new file mode 100644 index 00000000000..db199bffcb2 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go @@ -0,0 +1,68 @@ +// Package kadmin provides Kerberos administration capabilities. +package kadmin + +import ( + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// ChangePasswdMsg generate a change password request and also return the key needed to decrypt the reply. +func ChangePasswdMsg(cname types.PrincipalName, realm, password string, tkt messages.Ticket, sessionKey types.EncryptionKey) (r Request, k types.EncryptionKey, err error) { + // Create change password data struct and marshal to bytes + chgpasswd := ChangePasswdData{ + NewPasswd: []byte(password), + TargName: cname, + TargRealm: realm, + } + chpwdb, err := chgpasswd.Marshal() + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error marshaling change passwd data") + return + } + + // Generate authenticator + auth, err := types.NewAuthenticator(realm, cname) + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") + return + } + etype, err := crypto.GetEtype(sessionKey.KeyType) + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey etype") + return + } + err = auth.GenerateSeqNumberAndSubKey(etype.GetETypeID(), etype.GetKeyByteSize()) + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey") + return + } + k = auth.SubKey + + // Generate AP_REQ + APreq, err := messages.NewAPReq(tkt, sessionKey, auth) + if err != nil { + return + } + + // Form the KRBPriv encpart data + kp := messages.EncKrbPrivPart{ + UserData: chpwdb, + Timestamp: auth.CTime, + Usec: auth.Cusec, + SequenceNumber: auth.SeqNumber, + } + kpriv := messages.NewKRBPriv(kp) + err = kpriv.EncryptEncPart(k) + if err != nil { + err = krberror.Errorf(err, krberror.EncryptingError, "error encrypting change passwd data") + return + } + + r = Request{ + APREQ: APreq, + KRBPriv: kpriv, + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go new file mode 100644 index 00000000000..5c2e9d79a1b --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go @@ -0,0 +1,530 @@ +// Package keytab implements Kerberos keytabs: https://web.mit.edu/kerberos/krb5-devel/doc/formats/keytab_file_format.html. +package keytab + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + "unsafe" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + keytabFirstByte byte = 05 +) + +// Keytab struct. +type Keytab struct { + version uint8 + Entries []entry +} + +// Keytab entry struct. +type entry struct { + Principal principal + Timestamp time.Time + KVNO8 uint8 + Key types.EncryptionKey + KVNO uint32 +} + +func (e entry) String() string { + return fmt.Sprintf("% 4d %s %-56s %2d %-64x", + e.KVNO8, + e.Timestamp.Format("02/01/06 15:04:05"), + e.Principal.String(), + e.Key.KeyType, + e.Key.KeyValue, + ) +} + +// Keytab entry principal struct. +type principal struct { + NumComponents int16 `json:"-"` + Realm string + Components []string + NameType int32 +} + +func (p principal) String() string { + return fmt.Sprintf("%s@%s", strings.Join(p.Components, "/"), p.Realm) +} + +// New creates new, empty Keytab type. +func New() *Keytab { + var e []entry + return &Keytab{ + version: 2, + Entries: e, + } +} + +// GetEncryptionKey returns the EncryptionKey from the Keytab for the newest entry with the required kvno, etype and matching principal. +// If the kvno is zero then the latest kvno will be returned. The kvno is also returned for +func (kt *Keytab) GetEncryptionKey(princName types.PrincipalName, realm string, kvno int, etype int32) (types.EncryptionKey, int, error) { + var key types.EncryptionKey + var t time.Time + var kv int + for _, k := range kt.Entries { + if k.Principal.Realm == realm && len(k.Principal.Components) == len(princName.NameString) && + k.Key.KeyType == etype && + (k.KVNO == uint32(kvno) || kvno == 0) && + k.Timestamp.After(t) { + p := true + for i, n := range k.Principal.Components { + if princName.NameString[i] != n { + p = false + break + } + } + if p { + key = k.Key + kv = int(k.KVNO) + t = k.Timestamp + } + } + } + if len(key.KeyValue) < 1 { + return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", princName.NameString, realm, kvno, etype) + } + return key, kv, nil +} + +// Create a new Keytab entry. +func newEntry() entry { + var b []byte + return entry{ + Principal: newPrincipal(), + Timestamp: time.Time{}, + KVNO8: 0, + Key: types.EncryptionKey{ + KeyType: 0, + KeyValue: b, + }, + KVNO: 0, + } +} + +func (kt Keytab) String() string { + var s string + s = `KVNO Timestamp Principal ET Key +---- ----------------- -------------------------------------------------------- -- ---------------------------------------------------------------- +` + for _, entry := range kt.Entries { + s += entry.String() + "\n" + } + return s +} + +// AddEntry adds an entry to the keytab. The password should be provided in plain text and it will be converted using the defined enctype to be stored. +func (kt *Keytab) AddEntry(principalName, realm, password string, ts time.Time, KVNO uint8, encType int32) error { + // Generate a key from the password + princ, _ := types.ParseSPNString(principalName) + key, _, err := crypto.GetKeyFromPassword(password, princ, realm, encType, types.PADataSequence{}) + if err != nil { + return err + } + + // Populate the keytab entry principal + ktep := newPrincipal() + ktep.NumComponents = int16(len(princ.NameString)) + if kt.version == 1 { + ktep.NumComponents += 1 + } + + ktep.Realm = realm + ktep.Components = princ.NameString + ktep.NameType = princ.NameType + + // Populate the keytab entry + e := newEntry() + e.Principal = ktep + e.Timestamp = ts + e.KVNO8 = KVNO + e.KVNO = uint32(KVNO) + e.Key = key + + kt.Entries = append(kt.Entries, e) + return nil +} + +// Create a new principal. +func newPrincipal() principal { + var c []string + return principal{ + NumComponents: 0, + Realm: "", + Components: c, + NameType: 0, + } +} + +// Load a Keytab file into a Keytab type. +func Load(ktPath string) (*Keytab, error) { + kt := new(Keytab) + b, err := ioutil.ReadFile(ktPath) + if err != nil { + return kt, err + } + err = kt.Unmarshal(b) + return kt, err +} + +// Marshal keytab into byte slice +func (kt *Keytab) Marshal() ([]byte, error) { + b := []byte{keytabFirstByte, kt.version} + for _, e := range kt.Entries { + eb, err := e.marshal(int(kt.version)) + if err != nil { + return b, err + } + b = append(b, eb...) + } + return b, nil +} + +// Write the keytab bytes to io.Writer. +// Returns the number of bytes written +func (kt *Keytab) Write(w io.Writer) (int, error) { + b, err := kt.Marshal() + if err != nil { + return 0, fmt.Errorf("error marshaling keytab: %v", err) + } + return w.Write(b) +} + +// Unmarshal byte slice of Keytab data into Keytab type. +func (kt *Keytab) Unmarshal(b []byte) error { + if len(b) < 2 { + return fmt.Errorf("byte array is less than 2 bytes: %d", len(b)) + } + + //The first byte of the file always has the value 5 + if b[0] != keytabFirstByte { + return errors.New("invalid keytab data. First byte does not equal 5") + } + //Get keytab version + //The 2nd byte contains the version number (1 or 2) + kt.version = b[1] + if kt.version != 1 && kt.version != 2 { + return errors.New("invalid keytab data. Keytab version is neither 1 nor 2") + } + //Version 1 of the file format uses native byte order for integer representations. Version 2 always uses big-endian byte order + var endian binary.ByteOrder + endian = binary.BigEndian + if kt.version == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + // n tracks position in the byte array + n := 2 + l, err := readInt32(b, &n, &endian) + if err != nil { + return err + } + for l != 0 { + if l < 0 { + //Zero padded so skip over + l = l * -1 + n = n + int(l) + } else { + if n < 0 { + return fmt.Errorf("%d can't be less than zero", n) + } + if n+int(l) > len(b) { + return fmt.Errorf("%s's length is less than %d", b, n+int(l)) + } + eb := b[n : n+int(l)] + n = n + int(l) + ke := newEntry() + // p keeps track as to where we are in the byte stream + var p int + var err error + parsePrincipal(eb, &p, kt, &ke, &endian) + ke.Timestamp, err = readTimestamp(eb, &p, &endian) + if err != nil { + return err + } + rei8, err := readInt8(eb, &p, &endian) + if err != nil { + return err + } + ke.KVNO8 = uint8(rei8) + rei16, err := readInt16(eb, &p, &endian) + if err != nil { + return err + } + ke.Key.KeyType = int32(rei16) + rei16, err = readInt16(eb, &p, &endian) + if err != nil { + return err + } + kl := int(rei16) + ke.Key.KeyValue, err = readBytes(eb, &p, kl, &endian) + if err != nil { + return err + } + // The 32-bit key version overrides the 8-bit key version. + // If at least 4 bytes are left after the other fields are read and they are non-zero + // this indicates the 32-bit version is present. + if len(eb)-p >= 4 { + // The 32-bit key may be present + ri32, err := readInt32(eb, &p, &endian) + if err != nil { + return err + } + ke.KVNO = uint32(ri32) + } + if ke.KVNO == 0 { + // Handles if the value from the last 4 bytes was zero and also if there are not the 4 bytes present. Makes sense to put the same value here as KVNO8 + ke.KVNO = uint32(ke.KVNO8) + } + // Add the entry to the keytab + kt.Entries = append(kt.Entries, ke) + } + // Check if there are still 4 bytes left to read + // Also check that n is greater than zero + if n < 0 || n > len(b) || len(b[n:]) < 4 { + break + } + // Read the size of the next entry + l, err = readInt32(b, &n, &endian) + if err != nil { + return err + } + } + return nil +} + +func (e entry) marshal(v int) ([]byte, error) { + var b []byte + pb, err := e.Principal.marshal(v) + if err != nil { + return b, err + } + b = append(b, pb...) + + var endian binary.ByteOrder + endian = binary.BigEndian + if v == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + + t := make([]byte, 9) + endian.PutUint32(t[0:4], uint32(e.Timestamp.Unix())) + t[4] = e.KVNO8 + endian.PutUint16(t[5:7], uint16(e.Key.KeyType)) + endian.PutUint16(t[7:9], uint16(len(e.Key.KeyValue))) + b = append(b, t...) + + buf := new(bytes.Buffer) + err = binary.Write(buf, endian, e.Key.KeyValue) + if err != nil { + return b, err + } + b = append(b, buf.Bytes()...) + + t = make([]byte, 4) + endian.PutUint32(t, e.KVNO) + b = append(b, t...) + + // Add the length header + t = make([]byte, 4) + endian.PutUint32(t, uint32(len(b))) + b = append(t, b...) + return b, nil +} + +// Parse the Keytab bytes of a principal into a Keytab entry's principal. +func parsePrincipal(b []byte, p *int, kt *Keytab, ke *entry, e *binary.ByteOrder) error { + var err error + ke.Principal.NumComponents, err = readInt16(b, p, e) + if err != nil { + return err + } + if kt.version == 1 { + //In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2 + ke.Principal.NumComponents-- + } + lenRealm, err := readInt16(b, p, e) + if err != nil { + return err + } + realmB, err := readBytes(b, p, int(lenRealm), e) + if err != nil { + return err + } + ke.Principal.Realm = string(realmB) + for i := 0; i < int(ke.Principal.NumComponents); i++ { + l, err := readInt16(b, p, e) + if err != nil { + return err + } + compB, err := readBytes(b, p, int(l), e) + if err != nil { + return err + } + ke.Principal.Components = append(ke.Principal.Components, string(compB)) + } + if kt.version != 1 { + //Name Type is omitted in version 1 + ke.Principal.NameType, err = readInt32(b, p, e) + if err != nil { + return err + } + } + return nil +} + +func (p principal) marshal(v int) ([]byte, error) { + //var b []byte + b := make([]byte, 2) + var endian binary.ByteOrder + endian = binary.BigEndian + if v == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + endian.PutUint16(b[0:], uint16(p.NumComponents)) + realm, err := marshalString(p.Realm, v) + if err != nil { + return b, err + } + b = append(b, realm...) + for _, c := range p.Components { + cb, err := marshalString(c, v) + if err != nil { + return b, err + } + b = append(b, cb...) + } + if v != 1 { + t := make([]byte, 4) + endian.PutUint32(t, uint32(p.NameType)) + b = append(b, t...) + } + return b, nil +} + +func marshalString(s string, v int) ([]byte, error) { + sb := []byte(s) + b := make([]byte, 2) + var endian binary.ByteOrder + endian = binary.BigEndian + if v == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + endian.PutUint16(b[0:], uint16(len(sb))) + buf := new(bytes.Buffer) + err := binary.Write(buf, endian, sb) + if err != nil { + return b, err + } + b = append(b, buf.Bytes()...) + return b, err +} + +// Read bytes representing a timestamp. +func readTimestamp(b []byte, p *int, e *binary.ByteOrder) (time.Time, error) { + i32, err := readInt32(b, p, e) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(i32), 0), nil +} + +// Read bytes representing an eight bit integer. +func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 1) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+1) + } + buf := bytes.NewBuffer(b[*p : *p+1]) + binary.Read(buf, *e, &i) + *p++ + return +} + +// Read bytes representing a sixteen bit integer. +func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 2) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+2) + } + + buf := bytes.NewBuffer(b[*p : *p+2]) + binary.Read(buf, *e, &i) + *p += 2 + return +} + +// Read bytes representing a thirty two bit integer. +func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 4) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+4) + } + + buf := bytes.NewBuffer(b[*p : *p+4]) + binary.Read(buf, *e, &i) + *p += 4 + return +} + +func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) ([]byte, error) { + if s < 0 { + return nil, fmt.Errorf("%d cannot be less than zero", s) + } + i := *p + s + if i > len(b) { + return nil, fmt.Errorf("%s's length is greater than %d", b, i) + } + buf := bytes.NewBuffer(b[*p:i]) + r := make([]byte, s) + if err := binary.Read(buf, *e, &r); err != nil { + return nil, err + } + *p += s + return r, nil +} + +func isNativeEndianLittle() bool { + var x = 0x012345678 + var p = unsafe.Pointer(&x) + var bp = (*[4]byte)(p) + + var endian bool + if 0x01 == bp[0] { + endian = false + } else if (0x78 & 0xff) == (bp[0] & 0xff) { + endian = true + } else { + // Default to big endian + endian = false + } + return endian +} + +// JSON return information about the keys held in the keytab in a JSON format. +func (kt *Keytab) JSON() (string, error) { + b, err := json.MarshalIndent(kt, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go b/vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go new file mode 100644 index 00000000000..01c6d9904c5 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go @@ -0,0 +1,67 @@ +// Package krberror provides error type and functions for gokrb5. +package krberror + +import ( + "fmt" + "strings" +) + +// Error type descriptions. +const ( + separator = " < " + EncodingError = "Encoding_Error" + NetworkingError = "Networking_Error" + DecryptingError = "Decrypting_Error" + EncryptingError = "Encrypting_Error" + ChksumError = "Checksum_Error" + KRBMsgError = "KRBMessage_Handling_Error" + ConfigError = "Configuration_Error" + KDCError = "KDC_Error" +) + +// Krberror is an error type for gokrb5 +type Krberror struct { + RootCause string + EText []string +} + +// Error function to implement the error interface. +func (e Krberror) Error() string { + return fmt.Sprintf("[Root cause: %s] ", e.RootCause) + strings.Join(e.EText, separator) +} + +// Add another error statement to the error. +func (e *Krberror) Add(et string, s string) { + e.EText = append([]string{fmt.Sprintf("%s: %s", et, s)}, e.EText...) +} + +// New creates a new instance of Krberror. +func New(et, s string) Krberror { + return Krberror{ + RootCause: et, + EText: []string{s}, + } +} + +// Errorf appends to or creates a new Krberror. +func Errorf(err error, et, format string, a ...interface{}) Krberror { + if e, ok := err.(Krberror); ok { + e.Add(et, fmt.Sprintf(format, a...)) + return e + } + return NewErrorf(et, format+": %s", append(a, err)...) +} + +// NewErrorf creates a new Krberror from a formatted string. +func NewErrorf(et, format string, a ...interface{}) Krberror { + var s string + if len(a) > 0 { + s = fmt.Sprintf("%s: %s", et, fmt.Sprintf(format, a...)) + } else { + s = fmt.Sprintf("%s: %s", et, format) + } + return Krberror{ + RootCause: et, + EText: []string{s}, + } +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go new file mode 100644 index 00000000000..555fb807207 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go @@ -0,0 +1,49 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +// APRep implements RFC 4120 KRB_AP_REP: https://tools.ietf.org/html/rfc4120#section-5.5.2. +type APRep struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + EncPart types.EncryptedData `asn1:"explicit,tag:2"` +} + +// EncAPRepPart is the encrypted part of KRB_AP_REP. +type EncAPRepPart struct { + CTime time.Time `asn1:"generalized,explicit,tag:0"` + Cusec int `asn1:"explicit,tag:1"` + Subkey types.EncryptionKey `asn1:"optional,explicit,tag:2"` + SequenceNumber int64 `asn1:"optional,explicit,tag:3"` +} + +// Unmarshal bytes b into the APRep struct. +func (a *APRep) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREP)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_AP_REP + if a.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_AP_REP. Expected: %v; Actual: %v", expectedMsgType, a.MsgType) + } + return nil +} + +// Unmarshal bytes b into the APRep encrypted part struct. +func (a *EncAPRepPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncAPRepPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "AP_REP unmarshal error") + } + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go new file mode 100644 index 00000000000..18360797631 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go @@ -0,0 +1,199 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +type marshalAPReq struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + APOptions asn1.BitString `asn1:"explicit,tag:2"` + // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag + Ticket asn1.RawValue `asn1:"explicit,tag:3"` + EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"` +} + +// APReq implements RFC 4120 KRB_AP_REQ: https://tools.ietf.org/html/rfc4120#section-5.5.1. +type APReq struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + APOptions asn1.BitString `asn1:"explicit,tag:2"` + Ticket Ticket `asn1:"explicit,tag:3"` + EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"` + Authenticator types.Authenticator `asn1:"optional"` +} + +// NewAPReq generates a new KRB_AP_REQ struct. +func NewAPReq(tkt Ticket, sessionKey types.EncryptionKey, auth types.Authenticator) (APReq, error) { + var a APReq + ed, err := encryptAuthenticator(auth, sessionKey, tkt) + if err != nil { + return a, krberror.Errorf(err, krberror.KRBMsgError, "error creating Authenticator for AP_REQ") + } + a = APReq{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_AP_REQ, + APOptions: types.NewKrbFlags(), + Ticket: tkt, + EncryptedAuthenticator: ed, + } + return a, nil +} + +// Encrypt Authenticator +func encryptAuthenticator(a types.Authenticator, sessionKey types.EncryptionKey, tkt Ticket) (types.EncryptedData, error) { + var ed types.EncryptedData + m, err := a.Marshal() + if err != nil { + return ed, krberror.Errorf(err, krberror.EncodingError, "marshaling error of EncryptedData form of Authenticator") + } + usage := authenticatorKeyUsage(tkt.SName) + ed, err = crypto.GetEncryptedData(m, sessionKey, uint32(usage), tkt.EncPart.KVNO) + if err != nil { + return ed, krberror.Errorf(err, krberror.EncryptingError, "error encrypting Authenticator") + } + return ed, nil +} + +// DecryptAuthenticator decrypts the Authenticator within the AP_REQ. +// sessionKey may simply be the key within the decrypted EncPart of the ticket within the AP_REQ. +func (a *APReq) DecryptAuthenticator(sessionKey types.EncryptionKey) error { + usage := authenticatorKeyUsage(a.Ticket.SName) + ab, e := crypto.DecryptEncPart(a.EncryptedAuthenticator, sessionKey, uint32(usage)) + if e != nil { + return fmt.Errorf("error decrypting authenticator: %v", e) + } + err := a.Authenticator.Unmarshal(ab) + if err != nil { + return fmt.Errorf("error unmarshaling authenticator: %v", err) + } + return nil +} + +func authenticatorKeyUsage(pn types.PrincipalName) int { + if pn.NameString[0] == "krbtgt" { + return keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR + } + return keyusage.AP_REQ_AUTHENTICATOR +} + +// Unmarshal bytes b into the APReq struct. +func (a *APReq) Unmarshal(b []byte) error { + var m marshalAPReq + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREQ)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "unmarshal error of AP_REQ") + } + if m.MsgType != msgtype.KRB_AP_REQ { + return NewKRBError(types.PrincipalName{}, "", errorcode.KRB_AP_ERR_MSG_TYPE, errorcode.Lookup(errorcode.KRB_AP_ERR_MSG_TYPE)) + } + a.PVNO = m.PVNO + a.MsgType = m.MsgType + a.APOptions = m.APOptions + a.EncryptedAuthenticator = m.EncryptedAuthenticator + a.Ticket, err = unmarshalTicket(m.Ticket.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "unmarshaling error of Ticket within AP_REQ") + } + return nil +} + +// Marshal APReq struct. +func (a *APReq) Marshal() ([]byte, error) { + m := marshalAPReq{ + PVNO: a.PVNO, + MsgType: a.MsgType, + APOptions: a.APOptions, + EncryptedAuthenticator: a.EncryptedAuthenticator, + } + var b []byte + b, err := a.Ticket.Marshal() + if err != nil { + return b, err + } + m.Ticket = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 3, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "marshaling error of AP_REQ") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.APREQ) + return mk, nil +} + +// Verify an AP_REQ using service's keytab, spn and max acceptable clock skew duration. +// The service ticket encrypted part and authenticator will be decrypted as part of this operation. +func (a *APReq) Verify(kt *keytab.Keytab, d time.Duration, cAddr types.HostAddress, snameOverride *types.PrincipalName) (bool, error) { + // Decrypt ticket's encrypted part with service key + //TODO decrypt with service's session key from its TGT is use-to-user. Need to figure out how to get TGT. + //if types.IsFlagSet(&a.APOptions, flags.APOptionUseSessionKey) { + // err := a.Ticket.Decrypt(tgt.DecryptedEncPart.Key) + // if err != nil { + // return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of ticket provided using session key") + // } + //} else { + // err := a.Ticket.DecryptEncPart(*kt, &a.Ticket.SName) + // if err != nil { + // return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided") + // } + //} + sname := &a.Ticket.SName + if snameOverride != nil { + sname = snameOverride + } + err := a.Ticket.DecryptEncPart(kt, sname) + if err != nil { + return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided") + } + + // Check time validity of ticket + ok, err := a.Ticket.Valid(d) + if err != nil || !ok { + return ok, err + } + + // Check client's address is listed in the client addresses in the ticket + if len(a.Ticket.DecryptedEncPart.CAddr) > 0 { + //If client addresses are present check if any of them match the source IP that sent the APReq + //If there is no match return KRB_AP_ERR_BADADDR error. + if !types.HostAddressesContains(a.Ticket.DecryptedEncPart.CAddr, cAddr) { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADADDR, "client address not within the list contained in the service ticket") + } + } + + // Decrypt authenticator with session key from ticket's encrypted part + err = a.DecryptAuthenticator(a.Ticket.DecryptedEncPart.Key) + if err != nil { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BAD_INTEGRITY, "could not decrypt authenticator") + } + + // Check CName in authenticator is the same as that in the ticket + if !a.Authenticator.CName.Equal(a.Ticket.DecryptedEncPart.CName) { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADMATCH, "CName in Authenticator does not match that in service ticket") + } + + // Check the clock skew between the client and the service server + ct := a.Authenticator.CTime.Add(time.Duration(a.Authenticator.Cusec) * time.Microsecond) + t := time.Now().UTC() + if t.Sub(ct) > d || ct.Sub(t) > d { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_SKEW, fmt.Sprintf("clock skew with client too large. greater than %v seconds", d)) + } + return true, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go new file mode 100644 index 00000000000..69df9f0f6f5 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go @@ -0,0 +1,360 @@ +package messages + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.4.2 + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/flags" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/iana/patype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +type marshalKDCRep struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + PAData types.PADataSequence `asn1:"explicit,optional,tag:2"` + CRealm string `asn1:"generalstring,explicit,tag:3"` + CName types.PrincipalName `asn1:"explicit,tag:4"` + // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag + Ticket asn1.RawValue `asn1:"explicit,tag:5"` + EncPart types.EncryptedData `asn1:"explicit,tag:6"` +} + +// KDCRepFields represents the KRB_KDC_REP fields. +type KDCRepFields struct { + PVNO int + MsgType int + PAData []types.PAData + CRealm string + CName types.PrincipalName + Ticket Ticket + EncPart types.EncryptedData + DecryptedEncPart EncKDCRepPart +} + +// ASRep implements RFC 4120 KRB_AS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2. +type ASRep struct { + KDCRepFields +} + +// TGSRep implements RFC 4120 KRB_TGS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2. +type TGSRep struct { + KDCRepFields +} + +// EncKDCRepPart is the encrypted part of KRB_KDC_REP. +type EncKDCRepPart struct { + Key types.EncryptionKey `asn1:"explicit,tag:0"` + LastReqs []LastReq `asn1:"explicit,tag:1"` + Nonce int `asn1:"explicit,tag:2"` + KeyExpiration time.Time `asn1:"generalized,explicit,optional,tag:3"` + Flags asn1.BitString `asn1:"explicit,tag:4"` + AuthTime time.Time `asn1:"generalized,explicit,tag:5"` + StartTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + EndTime time.Time `asn1:"generalized,explicit,tag:7"` + RenewTill time.Time `asn1:"generalized,explicit,optional,tag:8"` + SRealm string `asn1:"generalstring,explicit,tag:9"` + SName types.PrincipalName `asn1:"explicit,tag:10"` + CAddr []types.HostAddress `asn1:"explicit,optional,tag:11"` + EncPAData types.PADataSequence `asn1:"explicit,optional,tag:12"` +} + +// LastReq part of KRB_KDC_REP. +type LastReq struct { + LRType int32 `asn1:"explicit,tag:0"` + LRValue time.Time `asn1:"generalized,explicit,tag:1"` +} + +// Unmarshal bytes b into the ASRep struct. +func (k *ASRep) Unmarshal(b []byte) error { + var m marshalKDCRep + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREP)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + if m.MsgType != msgtype.KRB_AS_REP { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an AS_REP. Expected: %v; Actual: %v", msgtype.KRB_AS_REP, m.MsgType) + } + //Process the raw ticket within + tkt, err := unmarshalTicket(m.Ticket.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within AS_REP") + } + k.KDCRepFields = KDCRepFields{ + PVNO: m.PVNO, + MsgType: m.MsgType, + PAData: m.PAData, + CRealm: m.CRealm, + CName: m.CName, + Ticket: tkt, + EncPart: m.EncPart, + } + return nil +} + +// Marshal ASRep struct. +func (k *ASRep) Marshal() ([]byte, error) { + m := marshalKDCRep{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + CRealm: k.CRealm, + CName: k.CName, + EncPart: k.EncPart, + } + b, err := k.Ticket.Marshal() + if err != nil { + return []byte{}, err + } + m.Ticket = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 5, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REP") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREP) + return mk, nil +} + +// Unmarshal bytes b into the TGSRep struct. +func (k *TGSRep) Unmarshal(b []byte) error { + var m marshalKDCRep + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREP)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + if m.MsgType != msgtype.KRB_TGS_REP { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an TGS_REP. Expected: %v; Actual: %v", msgtype.KRB_TGS_REP, m.MsgType) + } + //Process the raw ticket within + tkt, err := unmarshalTicket(m.Ticket.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within TGS_REP") + } + k.KDCRepFields = KDCRepFields{ + PVNO: m.PVNO, + MsgType: m.MsgType, + PAData: m.PAData, + CRealm: m.CRealm, + CName: m.CName, + Ticket: tkt, + EncPart: m.EncPart, + } + return nil +} + +// Marshal TGSRep struct. +func (k *TGSRep) Marshal() ([]byte, error) { + m := marshalKDCRep{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + CRealm: k.CRealm, + CName: k.CName, + EncPart: k.EncPart, + } + b, err := k.Ticket.Marshal() + if err != nil { + return []byte{}, err + } + m.Ticket = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 5, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REP") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREP) + return mk, nil +} + +// Unmarshal bytes b into encrypted part of KRB_KDC_REP. +func (e *EncKDCRepPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncASRepPart)) + if err != nil { + // Try using tag 26 + // Ref: RFC 4120 - mentions that some implementations use application tag number 26 wether or not the reply is + // a AS-REP or a TGS-REP. + _, err = asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncTGSRepPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part within KDC_REP") + } + } + return nil +} + +// Marshal encrypted part of KRB_KDC_REP. +func (e *EncKDCRepPart) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*e) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "marshaling error of AS_REP encpart") + } + b = asn1tools.AddASNAppTag(b, asnAppTag.EncASRepPart) + return b, nil +} + +// DecryptEncPart decrypts the encrypted part of an AS_REP. +func (k *ASRep) DecryptEncPart(c *credentials.Credentials) (types.EncryptionKey, error) { + var key types.EncryptionKey + var err error + if c.HasKeytab() { + key, _, err = c.Keytab().GetEncryptionKey(k.CName, k.CRealm, k.EncPart.KVNO, k.EncPart.EType) + if err != nil { + return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") + } + } + if c.HasPassword() { + key, _, err = crypto.GetKeyFromPassword(c.Password(), k.CName, k.CRealm, k.EncPart.EType, k.PAData) + if err != nil { + return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") + } + } + if !c.HasKeytab() && !c.HasPassword() { + return key, krberror.NewErrorf(krberror.DecryptingError, "no secret available in credentials to perform decryption of AS_REP encrypted part") + } + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.AS_REP_ENCPART) + if err != nil { + return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") + } + var denc EncKDCRepPart + err = denc.Unmarshal(b) + if err != nil { + return key, krberror.Errorf(err, krberror.EncodingError, "error unmarshaling decrypted encpart of AS_REP") + } + k.DecryptedEncPart = denc + return key, nil +} + +// Verify checks the validity of AS_REP message. +func (k *ASRep) Verify(cfg *config.Config, creds *credentials.Credentials, asReq ASReq) (bool, error) { + //Ref RFC 4120 Section 3.1.5 + if !k.CName.Equal(asReq.ReqBody.CName) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.CName, k.CName) + } + if k.CRealm != asReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.CRealm) + } + key, err := k.DecryptEncPart(creds) + if err != nil { + return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting EncPart of AS_REP") + } + if k.DecryptedEncPart.Nonce != asReq.ReqBody.Nonce { + return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request") + } + if !k.DecryptedEncPart.SName.Equal(asReq.ReqBody.SName) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", asReq.ReqBody.SName, k.DecryptedEncPart.SName) + } + if k.DecryptedEncPart.SRealm != asReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.DecryptedEncPart.SRealm) + } + if len(asReq.ReqBody.Addresses) > 0 { + if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, asReq.ReqBody.Addresses) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the AS_REP does not match those listed in the AS_REQ") + } + } + t := time.Now().UTC() + if t.Sub(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(t) > cfg.LibDefaults.Clockskew { + return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds", cfg.LibDefaults.Clockskew.Seconds()) + } + // RFC 6806 https://tools.ietf.org/html/rfc6806.html#section-11 + if asReq.PAData.Contains(patype.PA_REQ_ENC_PA_REP) && types.IsFlagSet(&k.DecryptedEncPart.Flags, flags.EncPARep) { + if len(k.DecryptedEncPart.EncPAData) < 2 || !k.DecryptedEncPart.EncPAData.Contains(patype.PA_FX_FAST) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "KDC did not respond appropriately to FAST negotiation") + } + for _, pa := range k.DecryptedEncPart.EncPAData { + if pa.PADataType == patype.PA_REQ_ENC_PA_REP { + var pafast types.PAReqEncPARep + err := pafast.Unmarshal(pa.PADataValue) + if err != nil { + return false, krberror.Errorf(err, krberror.EncodingError, "KDC FAST negotiation response error, could not unmarshal PA_REQ_ENC_PA_REP") + } + etype, err := crypto.GetChksumEtype(pafast.ChksumType) + if err != nil { + return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response error") + } + ab, _ := asReq.Marshal() + if !etype.VerifyChecksum(key.KeyValue, ab, pafast.Chksum, keyusage.KEY_USAGE_AS_REQ) { + return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response checksum invalid") + } + } + } + } + return true, nil +} + +// DecryptEncPart decrypts the encrypted part of an TGS_REP. +func (k *TGSRep) DecryptEncPart(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.TGS_REP_ENCPART_SESSION_KEY) + if err != nil { + return krberror.Errorf(err, krberror.DecryptingError, "error decrypting TGS_REP EncPart") + } + var denc EncKDCRepPart + err = denc.Unmarshal(b) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part") + } + k.DecryptedEncPart = denc + return nil +} + +// Verify checks the validity of the TGS_REP message. +func (k *TGSRep) Verify(cfg *config.Config, tgsReq TGSReq) (bool, error) { + if !k.CName.Equal(tgsReq.ReqBody.CName) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.CName, k.CName) + } + if k.Ticket.Realm != tgsReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "realm in response ticket does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.Ticket.Realm) + } + if k.DecryptedEncPart.Nonce != tgsReq.ReqBody.Nonce { + return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request") + } + //if k.Ticket.SName.NameType != tgsReq.ReqBody.SName.NameType || k.Ticket.SName.NameString == nil { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.Ticket.SName) + //} + //for i := range k.Ticket.SName.NameString { + // if k.Ticket.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.Ticket.SName) + // } + //} + //if k.DecryptedEncPart.SName.NameType != tgsReq.ReqBody.SName.NameType || k.DecryptedEncPart.SName.NameString == nil { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName) + //} + //for i := range k.DecryptedEncPart.SName.NameString { + // if k.DecryptedEncPart.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName) + // } + //} + if k.DecryptedEncPart.SRealm != tgsReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.DecryptedEncPart.SRealm) + } + if len(k.DecryptedEncPart.CAddr) > 0 { + if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, tgsReq.ReqBody.Addresses) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the TGS_REP does not match those listed in the TGS_REQ") + } + } + if time.Since(k.DecryptedEncPart.StartTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.StartTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew { + if time.Since(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew { + return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds.", cfg.LibDefaults.Clockskew.Seconds()) + } + } + return true, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go new file mode 100644 index 00000000000..3745afed2b9 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go @@ -0,0 +1,432 @@ +package messages + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.4.1 + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/flags" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/iana/patype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +type marshalKDCReq struct { + PVNO int `asn1:"explicit,tag:1"` + MsgType int `asn1:"explicit,tag:2"` + PAData types.PADataSequence `asn1:"explicit,optional,tag:3"` + ReqBody asn1.RawValue `asn1:"explicit,tag:4"` +} + +// KDCReqFields represents the KRB_KDC_REQ fields. +type KDCReqFields struct { + PVNO int + MsgType int + PAData types.PADataSequence + ReqBody KDCReqBody + Renewal bool +} + +// ASReq implements RFC 4120 KRB_AS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1. +type ASReq struct { + KDCReqFields +} + +// TGSReq implements RFC 4120 KRB_TGS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1. +type TGSReq struct { + KDCReqFields +} + +type marshalKDCReqBody struct { + KDCOptions asn1.BitString `asn1:"explicit,tag:0"` + CName types.PrincipalName `asn1:"explicit,optional,tag:1"` + Realm string `asn1:"generalstring,explicit,tag:2"` + SName types.PrincipalName `asn1:"explicit,optional,tag:3"` + From time.Time `asn1:"generalized,explicit,optional,tag:4"` + Till time.Time `asn1:"generalized,explicit,tag:5"` + RTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + Nonce int `asn1:"explicit,tag:7"` + EType []int32 `asn1:"explicit,tag:8"` + Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"` + EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"` + // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag + AdditionalTickets asn1.RawValue `asn1:"explicit,optional,tag:11"` +} + +// KDCReqBody implements the KRB_KDC_REQ request body. +type KDCReqBody struct { + KDCOptions asn1.BitString `asn1:"explicit,tag:0"` + CName types.PrincipalName `asn1:"explicit,optional,tag:1"` + Realm string `asn1:"generalstring,explicit,tag:2"` + SName types.PrincipalName `asn1:"explicit,optional,tag:3"` + From time.Time `asn1:"generalized,explicit,optional,tag:4"` + Till time.Time `asn1:"generalized,explicit,tag:5"` + RTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + Nonce int `asn1:"explicit,tag:7"` + EType []int32 `asn1:"explicit,tag:8"` + Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"` + EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"` + AdditionalTickets []Ticket `asn1:"explicit,optional,tag:11"` +} + +// NewASReqForTGT generates a new KRB_AS_REQ struct for a TGT request. +func NewASReqForTGT(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) { + sname := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", realm}, + } + return NewASReq(realm, c, cname, sname) +} + +// NewASReqForChgPasswd generates a new KRB_AS_REQ struct for a change password request. +func NewASReqForChgPasswd(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) { + sname := types.PrincipalName{ + NameType: nametype.KRB_NT_PRINCIPAL, + NameString: []string{"kadmin", "changepw"}, + } + return NewASReq(realm, c, cname, sname) +} + +// NewASReq generates a new KRB_AS_REQ struct for a given SNAME. +func NewASReq(realm string, c *config.Config, cname, sname types.PrincipalName) (ASReq, error) { + nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32)) + if err != nil { + return ASReq{}, err + } + t := time.Now().UTC() + // Copy the default options to make this thread safe + kopts := types.NewKrbFlags() + copy(kopts.Bytes, c.LibDefaults.KDCDefaultOptions.Bytes) + kopts.BitLength = c.LibDefaults.KDCDefaultOptions.BitLength + a := ASReq{ + KDCReqFields{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_AS_REQ, + PAData: types.PADataSequence{}, + ReqBody: KDCReqBody{ + KDCOptions: kopts, + Realm: realm, + CName: cname, + SName: sname, + Till: t.Add(c.LibDefaults.TicketLifetime), + Nonce: int(nonce.Int64()), + EType: c.LibDefaults.DefaultTktEnctypeIDs, + }, + }, + } + if c.LibDefaults.Forwardable { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Forwardable) + } + if c.LibDefaults.Canonicalize { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Canonicalize) + } + if c.LibDefaults.Proxiable { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Proxiable) + } + if c.LibDefaults.RenewLifetime != 0 { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable) + a.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime) + a.ReqBody.RTime = t.Add(time.Duration(48) * time.Hour) + } + if !c.LibDefaults.NoAddresses { + ha, err := types.LocalHostAddresses() + if err != nil { + return a, fmt.Errorf("could not get local addresses: %v", err) + } + ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...) + a.ReqBody.Addresses = ha + } + return a, nil +} + +// NewTGSReq generates a new KRB_TGS_REQ struct. +func NewTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, tgt Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool) (TGSReq, error) { + a, err := tgsReq(cname, sname, kdcRealm, renewal, c) + if err != nil { + return a, err + } + err = a.setPAData(tgt, sessionKey) + return a, err +} + +// NewUser2UserTGSReq returns a TGS-REQ suitable for user-to-user authentication (https://tools.ietf.org/html/rfc4120#section-3.7) +func NewUser2UserTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, clientTGT Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool, verifyingTGT Ticket) (TGSReq, error) { + a, err := tgsReq(cname, sname, kdcRealm, renewal, c) + if err != nil { + return a, err + } + a.ReqBody.AdditionalTickets = []Ticket{verifyingTGT} + types.SetFlag(&a.ReqBody.KDCOptions, flags.EncTktInSkey) + err = a.setPAData(clientTGT, sessionKey) + return a, err +} + +// tgsReq populates the fields for a TGS_REQ +func tgsReq(cname, sname types.PrincipalName, kdcRealm string, renewal bool, c *config.Config) (TGSReq, error) { + nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32)) + if err != nil { + return TGSReq{}, err + } + t := time.Now().UTC() + k := KDCReqFields{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_TGS_REQ, + ReqBody: KDCReqBody{ + KDCOptions: types.NewKrbFlags(), + Realm: kdcRealm, + CName: cname, // Add the CName to make validation of the reply easier + SName: sname, + Till: t.Add(c.LibDefaults.TicketLifetime), + Nonce: int(nonce.Int64()), + EType: c.LibDefaults.DefaultTGSEnctypeIDs, + }, + Renewal: renewal, + } + if c.LibDefaults.Forwardable { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Forwardable) + } + if c.LibDefaults.Canonicalize { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Canonicalize) + } + if c.LibDefaults.Proxiable { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Proxiable) + } + if c.LibDefaults.RenewLifetime > time.Duration(0) { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable) + k.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime) + } + if !c.LibDefaults.NoAddresses { + ha, err := types.LocalHostAddresses() + if err != nil { + return TGSReq{}, fmt.Errorf("could not get local addresses: %v", err) + } + ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...) + k.ReqBody.Addresses = ha + } + if renewal { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Renew) + types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable) + } + return TGSReq{ + k, + }, nil +} + +func (k *TGSReq) setPAData(tgt Ticket, sessionKey types.EncryptionKey) error { + // Marshal the request and calculate checksum + b, err := k.ReqBody.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REQ body") + } + etype, err := crypto.GetEtype(sessionKey.KeyType) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting etype to encrypt authenticator") + } + cb, err := etype.GetChecksumHash(sessionKey.KeyValue, b, keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM) + if err != nil { + return krberror.Errorf(err, krberror.ChksumError, "error getting etype checksum hash") + } + + // Form PAData for TGS_REQ + // Create authenticator + auth, err := types.NewAuthenticator(tgt.Realm, k.ReqBody.CName) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") + } + auth.Cksum = types.Checksum{ + CksumType: etype.GetHashID(), + Checksum: cb, + } + // Create AP_REQ + apReq, err := NewAPReq(tgt, sessionKey, auth) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AP_REQ") + } + apb, err := apReq.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error marshaling AP_REQ for pre-authentication data") + } + k.PAData = types.PADataSequence{ + types.PAData{ + PADataType: patype.PA_TGS_REQ, + PADataValue: apb, + }, + } + return nil +} + +// Unmarshal bytes b into the ASReq struct. +func (k *ASReq) Unmarshal(b []byte) error { + var m marshalKDCReq + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREQ)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling AS_REQ") + } + expectedMsgType := msgtype.KRB_AS_REQ + if m.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a AS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) + } + var reqb KDCReqBody + err = reqb.Unmarshal(m.ReqBody.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error processing AS_REQ body") + } + k.MsgType = m.MsgType + k.PAData = m.PAData + k.PVNO = m.PVNO + k.ReqBody = reqb + return nil +} + +// Unmarshal bytes b into the TGSReq struct. +func (k *TGSReq) Unmarshal(b []byte) error { + var m marshalKDCReq + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREQ)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling TGS_REQ") + } + expectedMsgType := msgtype.KRB_TGS_REQ + if m.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a TGS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) + } + var reqb KDCReqBody + err = reqb.Unmarshal(m.ReqBody.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error processing TGS_REQ body") + } + k.MsgType = m.MsgType + k.PAData = m.PAData + k.PVNO = m.PVNO + k.ReqBody = reqb + return nil +} + +// Unmarshal bytes b into the KRB_KDC_REQ body struct. +func (k *KDCReqBody) Unmarshal(b []byte) error { + var m marshalKDCReqBody + _, err := asn1.Unmarshal(b, &m) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling KDC_REQ body") + } + k.KDCOptions = m.KDCOptions + if len(k.KDCOptions.Bytes) < 4 { + tb := make([]byte, 4-len(k.KDCOptions.Bytes)) + k.KDCOptions.Bytes = append(tb, k.KDCOptions.Bytes...) + k.KDCOptions.BitLength = len(k.KDCOptions.Bytes) * 8 + } + k.CName = m.CName + k.Realm = m.Realm + k.SName = m.SName + k.From = m.From + k.Till = m.Till + k.RTime = m.RTime + k.Nonce = m.Nonce + k.EType = m.EType + k.Addresses = m.Addresses + k.EncAuthData = m.EncAuthData + if len(m.AdditionalTickets.Bytes) > 0 { + k.AdditionalTickets, err = unmarshalTicketsSequence(m.AdditionalTickets) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling additional tickets") + } + } + return nil +} + +// Marshal ASReq struct. +func (k *ASReq) Marshal() ([]byte, error) { + m := marshalKDCReq{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + } + b, err := k.ReqBody.Marshal() + if err != nil { + var mk []byte + return mk, err + } + m.ReqBody = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 4, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREQ) + return mk, nil +} + +// Marshal TGSReq struct. +func (k *TGSReq) Marshal() ([]byte, error) { + m := marshalKDCReq{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + } + b, err := k.ReqBody.Marshal() + if err != nil { + var mk []byte + return mk, err + } + m.ReqBody = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 4, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREQ) + return mk, nil +} + +// Marshal KRB_KDC_REQ body struct. +func (k *KDCReqBody) Marshal() ([]byte, error) { + var b []byte + m := marshalKDCReqBody{ + KDCOptions: k.KDCOptions, + CName: k.CName, + Realm: k.Realm, + SName: k.SName, + From: k.From, + Till: k.Till, + RTime: k.RTime, + Nonce: k.Nonce, + EType: k.EType, + Addresses: k.Addresses, + EncAuthData: k.EncAuthData, + } + rawtkts, err := MarshalTicketSequence(k.AdditionalTickets) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body additional tickets") + } + //The asn1.rawValue needs the tag setting on it for where it is in the KDCReqBody + rawtkts.Tag = 11 + if len(rawtkts.Bytes) > 0 { + m.AdditionalTickets = rawtkts + } + b, err = asn1.Marshal(m) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body") + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go new file mode 100644 index 00000000000..536fdb9ec93 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go @@ -0,0 +1,102 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +type marshalKRBCred struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + Tickets asn1.RawValue `asn1:"explicit,tag:2"` + EncPart types.EncryptedData `asn1:"explicit,tag:3"` +} + +// KRBCred implements RFC 4120 KRB_CRED: https://tools.ietf.org/html/rfc4120#section-5.8.1. +type KRBCred struct { + PVNO int + MsgType int + Tickets []Ticket + EncPart types.EncryptedData + DecryptedEncPart EncKrbCredPart +} + +// EncKrbCredPart is the encrypted part of KRB_CRED. +type EncKrbCredPart struct { + TicketInfo []KrbCredInfo `asn1:"explicit,tag:0"` + Nouce int `asn1:"optional,explicit,tag:1"` + Timestamp time.Time `asn1:"generalized,optional,explicit,tag:2"` + Usec int `asn1:"optional,explicit,tag:3"` + SAddress types.HostAddress `asn1:"optional,explicit,tag:4"` + RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` +} + +// KrbCredInfo is the KRB_CRED_INFO part of KRB_CRED. +type KrbCredInfo struct { + Key types.EncryptionKey `asn1:"explicit,tag:0"` + PRealm string `asn1:"generalstring,optional,explicit,tag:1"` + PName types.PrincipalName `asn1:"optional,explicit,tag:2"` + Flags asn1.BitString `asn1:"optional,explicit,tag:3"` + AuthTime time.Time `asn1:"generalized,optional,explicit,tag:4"` + StartTime time.Time `asn1:"generalized,optional,explicit,tag:5"` + EndTime time.Time `asn1:"generalized,optional,explicit,tag:6"` + RenewTill time.Time `asn1:"generalized,optional,explicit,tag:7"` + SRealm string `asn1:"optional,explicit,ia5,tag:8"` + SName types.PrincipalName `asn1:"optional,explicit,tag:9"` + CAddr types.HostAddresses `asn1:"optional,explicit,tag:10"` +} + +// Unmarshal bytes b into the KRBCred struct. +func (k *KRBCred) Unmarshal(b []byte) error { + var m marshalKRBCred + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBCred)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_CRED + if m.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_CRED. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) + } + k.PVNO = m.PVNO + k.MsgType = m.MsgType + k.EncPart = m.EncPart + if len(m.Tickets.Bytes) > 0 { + k.Tickets, err = unmarshalTicketsSequence(m.Tickets) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling tickets within KRB_CRED") + } + } + return nil +} + +// DecryptEncPart decrypts the encrypted part of a KRB_CRED. +func (k *KRBCred) DecryptEncPart(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_CRED_ENCPART) + if err != nil { + return krberror.Errorf(err, krberror.DecryptingError, "error decrypting KRB_CRED EncPart") + } + var denc EncKrbCredPart + err = denc.Unmarshal(b) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part of KRB_CRED") + } + k.DecryptedEncPart = denc + return nil +} + +// Unmarshal bytes b into the encrypted part of KRB_CRED. +func (k *EncKrbCredPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbCredPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling EncKrbCredPart") + } + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go new file mode 100644 index 00000000000..d2cf32d6581 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go @@ -0,0 +1,94 @@ +// Package messages implements Kerberos 5 message types and methods. +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +// KRBError implements RFC 4120 KRB_ERROR: https://tools.ietf.org/html/rfc4120#section-5.9.1. +type KRBError struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + CTime time.Time `asn1:"generalized,optional,explicit,tag:2"` + Cusec int `asn1:"optional,explicit,tag:3"` + STime time.Time `asn1:"generalized,explicit,tag:4"` + Susec int `asn1:"explicit,tag:5"` + ErrorCode int32 `asn1:"explicit,tag:6"` + CRealm string `asn1:"generalstring,optional,explicit,tag:7"` + CName types.PrincipalName `asn1:"optional,explicit,tag:8"` + Realm string `asn1:"generalstring,explicit,tag:9"` + SName types.PrincipalName `asn1:"explicit,tag:10"` + EText string `asn1:"generalstring,optional,explicit,tag:11"` + EData []byte `asn1:"optional,explicit,tag:12"` +} + +// NewKRBError creates a new KRBError. +func NewKRBError(sname types.PrincipalName, realm string, code int32, etext string) KRBError { + t := time.Now().UTC() + return KRBError{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_ERROR, + STime: t, + Susec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)), + ErrorCode: code, + SName: sname, + Realm: realm, + EText: etext, + } +} + +// Unmarshal bytes b into the KRBError struct. +func (k *KRBError) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBError)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "KRB_ERROR unmarshal error") + } + expectedMsgType := msgtype.KRB_ERROR + if k.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_ERROR. Expected: %v; Actual: %v", expectedMsgType, k.MsgType) + } + return nil +} + +// Marshal a KRBError into bytes. +func (k *KRBError) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*k) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "error marshaling KRBError") + } + b = asn1tools.AddASNAppTag(b, asnAppTag.KRBError) + return b, nil +} + +// Error method implementing error interface on KRBError struct. +func (k KRBError) Error() string { + etxt := fmt.Sprintf("KRB Error: %s", errorcode.Lookup(k.ErrorCode)) + if k.EText != "" { + etxt = fmt.Sprintf("%s - %s", etxt, k.EText) + } + return etxt +} + +func processUnmarshalReplyError(b []byte, err error) error { + switch err.(type) { + case asn1.StructuralError: + var krberr KRBError + tmperr := krberr.Unmarshal(b) + if tmperr != nil { + return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply") + } + return krberr + default: + return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply") + } +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go new file mode 100644 index 00000000000..0ca614949fd --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go @@ -0,0 +1,108 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +// KRBPriv implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.7.1. +type KRBPriv struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + EncPart types.EncryptedData `asn1:"explicit,tag:3"` + DecryptedEncPart EncKrbPrivPart `asn1:"optional,omitempty"` // Not part of ASN1 bytes so marked as optional so unmarshalling works +} + +// EncKrbPrivPart is the encrypted part of KRB_PRIV. +type EncKrbPrivPart struct { + UserData []byte `asn1:"explicit,tag:0"` + Timestamp time.Time `asn1:"generalized,optional,explicit,tag:1"` + Usec int `asn1:"optional,explicit,tag:2"` + SequenceNumber int64 `asn1:"optional,explicit,tag:3"` + SAddress types.HostAddress `asn1:"explicit,tag:4"` + RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` +} + +// NewKRBPriv returns a new KRBPriv type. +func NewKRBPriv(part EncKrbPrivPart) KRBPriv { + return KRBPriv{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_PRIV, + DecryptedEncPart: part, + } +} + +// Unmarshal bytes b into the KRBPriv struct. +func (k *KRBPriv) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBPriv)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_PRIV + if k.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_PRIV. Expected: %v; Actual: %v", expectedMsgType, k.MsgType) + } + return nil +} + +// Unmarshal bytes b into the EncKrbPrivPart struct. +func (k *EncKrbPrivPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbPrivPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "KRB_PRIV unmarshal error") + } + return nil +} + +// Marshal the KRBPriv. +func (k *KRBPriv) Marshal() ([]byte, error) { + tk := KRBPriv{ + PVNO: k.PVNO, + MsgType: k.MsgType, + EncPart: k.EncPart, + } + b, err := asn1.Marshal(tk) + if err != nil { + return []byte{}, err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.KRBPriv) + return b, nil +} + +// EncryptEncPart encrypts the DecryptedEncPart within the KRBPriv. +// Use to prepare for marshaling. +func (k *KRBPriv) EncryptEncPart(key types.EncryptionKey) error { + b, err := asn1.Marshal(k.DecryptedEncPart) + if err != nil { + return err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.EncKrbPrivPart) + k.EncPart, err = crypto.GetEncryptedData(b, key, keyusage.KRB_PRIV_ENCPART, 1) + if err != nil { + return err + } + return nil +} + +// DecryptEncPart decrypts the encrypted part of the KRBPriv message. +func (k *KRBPriv) DecryptEncPart(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_PRIV_ENCPART) + if err != nil { + return fmt.Errorf("error decrypting KRBPriv EncPart: %v", err) + } + err = k.DecryptedEncPart.Unmarshal(b) + if err != nil { + return fmt.Errorf("error unmarshaling encrypted part: %v", err) + } + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go new file mode 100644 index 00000000000..52cd2844936 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go @@ -0,0 +1,43 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +// KRBSafe implements RFC 4120 KRB_SAFE: https://tools.ietf.org/html/rfc4120#section-5.6.1. +type KRBSafe struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + SafeBody KRBSafeBody `asn1:"explicit,tag:2"` + Cksum types.Checksum `asn1:"explicit,tag:3"` +} + +// KRBSafeBody implements the KRB_SAFE_BODY of KRB_SAFE. +type KRBSafeBody struct { + UserData []byte `asn1:"explicit,tag:0"` + Timestamp time.Time `asn1:"generalized,optional,explicit,tag:1"` + Usec int `asn1:"optional,explicit,tag:2"` + SequenceNumber int64 `asn1:"optional,explicit,tag:3"` + SAddress types.HostAddress `asn1:"explicit,tag:4"` + RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` +} + +// Unmarshal bytes b into the KRBSafe struct. +func (s *KRBSafe) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, s, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBSafe)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_SAFE + if s.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_SAFE. Expected: %v; Actual: %v", expectedMsgType, s.MsgType) + } + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go new file mode 100644 index 00000000000..11efad62c2c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go @@ -0,0 +1,262 @@ +package messages + +import ( + "fmt" + "log" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/adtype" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/flags" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/pac" + "github.com/jcmturner/gokrb5/v8/types" +) + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.3 + +// Ticket implements the Kerberos ticket. +type Ticket struct { + TktVNO int `asn1:"explicit,tag:0"` + Realm string `asn1:"generalstring,explicit,tag:1"` + SName types.PrincipalName `asn1:"explicit,tag:2"` + EncPart types.EncryptedData `asn1:"explicit,tag:3"` + DecryptedEncPart EncTicketPart `asn1:"optional"` // Not part of ASN1 bytes so marked as optional so unmarshalling works +} + +// EncTicketPart is the encrypted part of the Ticket. +type EncTicketPart struct { + Flags asn1.BitString `asn1:"explicit,tag:0"` + Key types.EncryptionKey `asn1:"explicit,tag:1"` + CRealm string `asn1:"generalstring,explicit,tag:2"` + CName types.PrincipalName `asn1:"explicit,tag:3"` + Transited TransitedEncoding `asn1:"explicit,tag:4"` + AuthTime time.Time `asn1:"generalized,explicit,tag:5"` + StartTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + EndTime time.Time `asn1:"generalized,explicit,tag:7"` + RenewTill time.Time `asn1:"generalized,explicit,optional,tag:8"` + CAddr types.HostAddresses `asn1:"explicit,optional,tag:9"` + AuthorizationData types.AuthorizationData `asn1:"explicit,optional,tag:10"` +} + +// TransitedEncoding part of the ticket's encrypted part. +type TransitedEncoding struct { + TRType int32 `asn1:"explicit,tag:0"` + Contents []byte `asn1:"explicit,tag:1"` +} + +// NewTicket creates a new Ticket instance. +func NewTicket(cname types.PrincipalName, crealm string, sname types.PrincipalName, srealm string, flags asn1.BitString, sktab *keytab.Keytab, eTypeID int32, kvno int, authTime, startTime, endTime, renewTill time.Time) (Ticket, types.EncryptionKey, error) { + etype, err := crypto.GetEtype(eTypeID) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting etype for new ticket") + } + sessionKey, err := types.GenerateEncryptionKey(etype) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error generating session key") + } + + etp := EncTicketPart{ + Flags: flags, + Key: sessionKey, + CRealm: crealm, + CName: cname, + Transited: TransitedEncoding{}, + AuthTime: authTime, + StartTime: startTime, + EndTime: endTime, + RenewTill: renewTill, + } + b, err := asn1.Marshal(etp) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncodingError, "error marshalling ticket encpart") + } + b = asn1tools.AddASNAppTag(b, asnAppTag.EncTicketPart) + skey, _, err := sktab.GetEncryptionKey(sname, srealm, kvno, eTypeID) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting encryption key for new ticket") + } + ed, err := crypto.GetEncryptedData(b, skey, keyusage.KDC_REP_TICKET, kvno) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error encrypting ticket encpart") + } + tkt := Ticket{ + TktVNO: iana.PVNO, + Realm: srealm, + SName: sname, + EncPart: ed, + } + return tkt, sessionKey, nil +} + +// Unmarshal bytes b into a Ticket struct. +func (t *Ticket) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.Ticket)) + return err +} + +// Marshal the Ticket. +func (t *Ticket) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*t) + if err != nil { + return nil, err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.Ticket) + return b, nil +} + +// Unmarshal bytes b into the EncTicketPart struct. +func (t *EncTicketPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.EncTicketPart)) + return err +} + +// unmarshalTicket returns a ticket from the bytes provided. +func unmarshalTicket(b []byte) (t Ticket, err error) { + err = t.Unmarshal(b) + return +} + +// UnmarshalTicketsSequence returns a slice of Tickets from a raw ASN1 value. +func unmarshalTicketsSequence(in asn1.RawValue) ([]Ticket, error) { + //This is a workaround to a asn1 decoding issue in golang - https://github.com/golang/go/issues/17321. It's not pretty I'm afraid + //We pull out raw values from the larger raw value (that is actually the data of the sequence of raw values) and track our position moving along the data. + b := in.Bytes + // Ignore the head of the asn1 stream (1 byte for tag and those for the length) as this is what tells us its a sequence but we're handling it ourselves + p := 1 + asn1tools.GetNumberBytesInLengthHeader(in.Bytes) + var tkts []Ticket + var raw asn1.RawValue + for p < (len(b)) { + _, err := asn1.UnmarshalWithParams(b[p:], &raw, fmt.Sprintf("application,tag:%d", asnAppTag.Ticket)) + if err != nil { + return nil, fmt.Errorf("unmarshaling sequence of tickets failed getting length of ticket: %v", err) + } + t, err := unmarshalTicket(b[p:]) + if err != nil { + return nil, fmt.Errorf("unmarshaling sequence of tickets failed: %v", err) + } + p += len(raw.FullBytes) + tkts = append(tkts, t) + } + MarshalTicketSequence(tkts) + return tkts, nil +} + +// MarshalTicketSequence marshals a slice of Tickets returning an ASN1 raw value containing the ticket sequence. +func MarshalTicketSequence(tkts []Ticket) (asn1.RawValue, error) { + raw := asn1.RawValue{ + Class: 2, + IsCompound: true, + } + if len(tkts) < 1 { + // There are no tickets to marshal + return raw, nil + } + var btkts []byte + for i, t := range tkts { + b, err := t.Marshal() + if err != nil { + return raw, fmt.Errorf("error marshaling ticket number %d in sequence of tickets", i+1) + } + btkts = append(btkts, b...) + } + // The ASN1 wrapping consists of 2 bytes: + // 1st byte -> Identifier Octet - In this case an OCTET STRING (ASN TAG + // 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here. + // Application Tag: + //| Byte: | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | + //| Value: | 0 | 1 | 1 | From the RFC spec 4120 | + //| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value | + btkts = append(asn1tools.MarshalLengthBytes(len(btkts)), btkts...) + btkts = append([]byte{byte(32 + asn1.TagSequence)}, btkts...) + raw.Bytes = btkts + // If we need to create the full bytes then identifier octet is "context-specific" = 128 + "constructed" + 32 + the wrapping explicit tag (11) + //fmt.Fprintf(os.Stderr, "mRaw fb: %v\n", raw.FullBytes) + return raw, nil +} + +// DecryptEncPart decrypts the encrypted part of the ticket. +// The sname argument can be used to specify which service principal's key should be used to decrypt the ticket. +// If nil is passed as the sname then the service principal specified within the ticket it used. +func (t *Ticket) DecryptEncPart(keytab *keytab.Keytab, sname *types.PrincipalName) error { + if sname == nil { + sname = &t.SName + } + key, _, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType) + if err != nil { + return NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err)) + } + return t.Decrypt(key) +} + +// Decrypt decrypts the encrypted part of the ticket using the key provided. +func (t *Ticket) Decrypt(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(t.EncPart, key, keyusage.KDC_REP_TICKET) + if err != nil { + return fmt.Errorf("error decrypting Ticket EncPart: %v", err) + } + var denc EncTicketPart + err = denc.Unmarshal(b) + if err != nil { + return fmt.Errorf("error unmarshaling encrypted part: %v", err) + } + t.DecryptedEncPart = denc + return nil +} + +// GetPACType returns a Microsoft PAC that has been extracted from the ticket and processed. +func (t *Ticket) GetPACType(keytab *keytab.Keytab, sname *types.PrincipalName, l *log.Logger) (bool, pac.PACType, error) { + var isPAC bool + for _, ad := range t.DecryptedEncPart.AuthorizationData { + if ad.ADType == adtype.ADIfRelevant { + var ad2 types.AuthorizationData + err := ad2.Unmarshal(ad.ADData) + if err != nil { + l.Printf("PAC authorization data could not be unmarshaled: %v", err) + continue + } + if ad2[0].ADType == adtype.ADWin2KPAC { + isPAC = true + var p pac.PACType + err = p.Unmarshal(ad2[0].ADData) + if err != nil { + return isPAC, p, fmt.Errorf("error unmarshaling PAC: %v", err) + } + if sname == nil { + sname = &t.SName + } + key, _, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType) + if err != nil { + return isPAC, p, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err)) + } + err = p.ProcessPACInfoBuffers(key, l) + return isPAC, p, err + } + } + } + return isPAC, pac.PACType{}, nil +} + +// Valid checks it the ticket is currently valid. Max duration passed endtime passed in as argument. +func (t *Ticket) Valid(d time.Duration) (bool, error) { + // Check for future tickets or invalid tickets + time := time.Now().UTC() + if t.DecryptedEncPart.StartTime.Sub(time) > d || types.IsFlagSet(&t.DecryptedEncPart.Flags, flags.Invalid) { + return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_NYV, "service ticket provided is not yet valid") + } + + // Check for expired ticket + if time.Sub(t.DecryptedEncPart.EndTime) > d { + return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_EXPIRED, "service ticket provided has expired") + } + + return true, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go new file mode 100644 index 00000000000..36871e00fb4 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go @@ -0,0 +1,34 @@ +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx + +// ClientClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh536365.aspx +type ClientClaimsInfo struct { + ClaimsSetMetadata mstypes.ClaimsSetMetadata + ClaimsSet mstypes.ClaimsSet +} + +// Unmarshal bytes into the ClientClaimsInfo struct +func (k *ClientClaimsInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + m := new(mstypes.ClaimsSetMetadata) + err = dec.Decode(m) + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err) + return + } + k.ClaimsSetMetadata = *m + k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet() + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go new file mode 100644 index 00000000000..ddd957808a8 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go @@ -0,0 +1,31 @@ +package pac + +import ( + "bytes" + + "github.com/jcmturner/rpc/v2/mstypes" +) + +// ClientInfo implements https://msdn.microsoft.com/en-us/library/cc237951.aspx +type ClientInfo struct { + ClientID mstypes.FileTime // A FILETIME structure in little-endian format that contains the Kerberos initial ticket-granting ticket TGT authentication time + NameLength uint16 // An unsigned 16-bit integer in little-endian format that specifies the length, in bytes, of the Name field. + Name string // An array of 16-bit Unicode characters in little-endian format that contains the client's account name. +} + +// Unmarshal bytes into the ClientInfo struct +func (k *ClientInfo) Unmarshal(b []byte) (err error) { + //The PAC_CLIENT_INFO structure is a simple structure that is not NDR-encoded. + r := mstypes.NewReader(bytes.NewReader(b)) + + k.ClientID, err = r.FileTime() + if err != nil { + return + } + k.NameLength, err = r.Uint16() + if err != nil { + return + } + k.Name, err = r.UTF16String(int(k.NameLength)) + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go new file mode 100644 index 00000000000..0c7ccd4263e --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go @@ -0,0 +1,86 @@ +package pac + +import ( + "bytes" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/types" + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// https://msdn.microsoft.com/en-us/library/cc237931.aspx + +// CredentialsInfo implements https://msdn.microsoft.com/en-us/library/cc237953.aspx +type CredentialsInfo struct { + Version uint32 // A 32-bit unsigned integer in little-endian format that defines the version. MUST be 0x00000000. + EType uint32 + PACCredentialDataEncrypted []byte // Key usage number for encryption: KERB_NON_KERB_SALT (16) + PACCredentialData CredentialData +} + +// Unmarshal bytes into the CredentialsInfo struct +func (c *CredentialsInfo) Unmarshal(b []byte, k types.EncryptionKey) (err error) { + //The CredentialsInfo structure is a simple structure that is not NDR-encoded. + r := mstypes.NewReader(bytes.NewReader(b)) + + c.Version, err = r.Uint32() + if err != nil { + return + } + if c.Version != 0 { + err = errors.New("credentials info version is not zero") + return + } + c.EType, err = r.Uint32() + if err != nil { + return + } + c.PACCredentialDataEncrypted, err = r.ReadBytes(len(b) - 8) + if err != nil { + err = fmt.Errorf("error reading PAC Credetials Data: %v", err) + return + } + + err = c.DecryptEncPart(k) + if err != nil { + err = fmt.Errorf("error decrypting PAC Credentials Data: %v", err) + return + } + return +} + +// DecryptEncPart decrypts the encrypted part of the CredentialsInfo. +func (c *CredentialsInfo) DecryptEncPart(k types.EncryptionKey) error { + if k.KeyType != int32(c.EType) { + return fmt.Errorf("key provided is not the correct type. Type needed: %d, type provided: %d", c.EType, k.KeyType) + } + pt, err := crypto.DecryptMessage(c.PACCredentialDataEncrypted, k, keyusage.KERB_NON_KERB_SALT) + if err != nil { + return err + } + err = c.PACCredentialData.Unmarshal(pt) + if err != nil { + return err + } + return nil +} + +// CredentialData implements https://msdn.microsoft.com/en-us/library/cc237952.aspx +type CredentialData struct { + CredentialCount uint32 + Credentials []SECPKGSupplementalCred // Size is the value of CredentialCount +} + +// Unmarshal converts the bytes provided into a CredentialData type. +func (c *CredentialData) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(c) + if err != nil { + err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go new file mode 100644 index 00000000000..6eb29260050 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go @@ -0,0 +1,34 @@ +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx + +// DeviceClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh554226.aspx +type DeviceClaimsInfo struct { + ClaimsSetMetadata mstypes.ClaimsSetMetadata + ClaimsSet mstypes.ClaimsSet +} + +// Unmarshal bytes into the ClientClaimsInfo struct +func (k *DeviceClaimsInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + m := new(mstypes.ClaimsSetMetadata) + err = dec.Decode(m) + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err) + return + } + k.ClaimsSetMetadata = *m + k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet() + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go new file mode 100644 index 00000000000..ce82daa5801 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go @@ -0,0 +1,32 @@ +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// DeviceInfo implements https://msdn.microsoft.com/en-us/library/hh536402.aspx +type DeviceInfo struct { + UserID uint32 // A 32-bit unsigned integer that contains the RID of the account. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account. + PrimaryGroupID uint32 // A 32-bit unsigned integer that contains the RID for the primary group to which this account belongs. + AccountDomainID mstypes.RPCSID `ndr:"pointer"` // A SID structure that contains the SID for the domain of the account.This member is used in conjunction with the UserId, and GroupIds members to create the user and group SIDs for the client. + AccountGroupCount uint32 // A 32-bit unsigned integer that contains the number of groups within the account domain to which the account belongs + AccountGroupIDs []mstypes.GroupMembership `ndr:"pointer,conformant"` // A pointer to a list of GROUP_MEMBERSHIP (section 2.2.2) structures that contains the groups to which the account belongs in the account domain. The number of groups in this list MUST be equal to GroupCount. + SIDCount uint32 // A 32-bit unsigned integer that contains the total number of SIDs present in the ExtraSids member. + ExtraSIDs []mstypes.KerbSidAndAttributes `ndr:"pointer,conformant"` // A pointer to a list of KERB_SID_AND_ATTRIBUTES structures that contain a list of SIDs corresponding to groups not in domains. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account. + DomainGroupCount uint32 // A 32-bit unsigned integer that contains the number of domains with groups to which the account belongs. + DomainGroup []mstypes.DomainGroupMembership `ndr:"pointer,conformant"` // A pointer to a list of DOMAIN_GROUP_MEMBERSHIP structures (section 2.2.3) that contains the domains to which the account belongs to a group. The number of sets in this list MUST be equal to DomainCount. +} + +// Unmarshal bytes into the DeviceInfo struct +func (k *DeviceInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(k) + if err != nil { + err = fmt.Errorf("error unmarshaling DeviceInfo: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go new file mode 100644 index 00000000000..dde78614eee --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go @@ -0,0 +1,110 @@ +// Package pac implements Microsoft Privilege Attribute Certificate (PAC) processing. +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// KERB_VALIDATION_INFO flags. +const ( + USERFLAG_GUEST = 31 // Authentication was done via the GUEST account; no password was used. + USERFLAG_NO_ENCRYPTION_AVAILABLE = 30 // No encryption is available. + USERFLAG_LAN_MANAGER_KEY = 28 // LAN Manager key was used for authentication. + USERFLAG_SUB_AUTH = 25 // Sub-authentication used; session key came from the sub-authentication package. + USERFLAG_EXTRA_SIDS = 26 // Indicates that the ExtraSids field is populated and contains additional SIDs. + USERFLAG_MACHINE_ACCOUNT = 24 // Indicates that the account is a machine account. + USERFLAG_DC_NTLM2 = 23 // Indicates that the domain controller understands NTLMv2. + USERFLAG_RESOURCE_GROUPIDS = 22 // Indicates that the ResourceGroupIds field is populated. + USERFLAG_PROFILEPATH = 21 // Indicates that ProfilePath is populated. + USERFLAG_NTLM2_NTCHALLENGERESP = 20 // The NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation. + USERFLAG_LM2_LMCHALLENGERESP = 19 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation. + USERFLAG_AUTH_LMCHALLENGERESP_KEY_NTCHALLENGERESP = 18 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and the NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used session key generation. +) + +// KerbValidationInfo implement https://msdn.microsoft.com/en-us/library/cc237948.aspx +type KerbValidationInfo struct { + LogOnTime mstypes.FileTime + LogOffTime mstypes.FileTime + KickOffTime mstypes.FileTime + PasswordLastSet mstypes.FileTime + PasswordCanChange mstypes.FileTime + PasswordMustChange mstypes.FileTime + EffectiveName mstypes.RPCUnicodeString + FullName mstypes.RPCUnicodeString + LogonScript mstypes.RPCUnicodeString + ProfilePath mstypes.RPCUnicodeString + HomeDirectory mstypes.RPCUnicodeString + HomeDirectoryDrive mstypes.RPCUnicodeString + LogonCount uint16 + BadPasswordCount uint16 + UserID uint32 + PrimaryGroupID uint32 + GroupCount uint32 + GroupIDs []mstypes.GroupMembership `ndr:"pointer,conformant"` + UserFlags uint32 + UserSessionKey mstypes.UserSessionKey + LogonServer mstypes.RPCUnicodeString + LogonDomainName mstypes.RPCUnicodeString + LogonDomainID mstypes.RPCSID `ndr:"pointer"` + Reserved1 [2]uint32 // Has 2 elements + UserAccountControl uint32 + SubAuthStatus uint32 + LastSuccessfulILogon mstypes.FileTime + LastFailedILogon mstypes.FileTime + FailedILogonCount uint32 + Reserved3 uint32 + SIDCount uint32 + ExtraSIDs []mstypes.KerbSidAndAttributes `ndr:"pointer,conformant"` + ResourceGroupDomainSID mstypes.RPCSID `ndr:"pointer"` + ResourceGroupCount uint32 + ResourceGroupIDs []mstypes.GroupMembership `ndr:"pointer,conformant"` +} + +// Unmarshal bytes into the DeviceInfo struct +func (k *KerbValidationInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(k) + if err != nil { + err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err) + } + return +} + +// GetGroupMembershipSIDs returns a slice of strings containing the group membership SIDs found in the PAC. +func (k *KerbValidationInfo) GetGroupMembershipSIDs() []string { + var g []string + lSID := k.LogonDomainID.String() + for i := range k.GroupIDs { + g = append(g, fmt.Sprintf("%s-%d", lSID, k.GroupIDs[i].RelativeID)) + } + for _, s := range k.ExtraSIDs { + var exists = false + for _, es := range g { + if es == s.SID.String() { + exists = true + break + } + } + if !exists { + g = append(g, s.SID.String()) + } + } + for _, r := range k.ResourceGroupIDs { + var exists = false + s := fmt.Sprintf("%s-%d", k.ResourceGroupDomainSID.String(), r.RelativeID) + for _, es := range g { + if es == s { + exists = true + break + } + } + if !exists { + g = append(g, s) + } + } + return g +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go new file mode 100644 index 00000000000..fab2ad7cda7 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go @@ -0,0 +1,251 @@ +package pac + +import ( + "bytes" + "errors" + "fmt" + "log" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/types" + "github.com/jcmturner/rpc/v2/mstypes" +) + +const ( + infoTypeKerbValidationInfo uint32 = 1 + infoTypeCredentials uint32 = 2 + infoTypePACServerSignatureData uint32 = 6 + infoTypePACKDCSignatureData uint32 = 7 + infoTypePACClientInfo uint32 = 10 + infoTypeS4UDelegationInfo uint32 = 11 + infoTypeUPNDNSInfo uint32 = 12 + infoTypePACClientClaimsInfo uint32 = 13 + infoTypePACDeviceInfo uint32 = 14 + infoTypePACDeviceClaimsInfo uint32 = 15 +) + +// PACType implements: https://msdn.microsoft.com/en-us/library/cc237950.aspx +type PACType struct { + CBuffers uint32 + Version uint32 + Buffers []InfoBuffer + Data []byte + KerbValidationInfo *KerbValidationInfo + CredentialsInfo *CredentialsInfo + ServerChecksum *SignatureData + KDCChecksum *SignatureData + ClientInfo *ClientInfo + S4UDelegationInfo *S4UDelegationInfo + UPNDNSInfo *UPNDNSInfo + ClientClaimsInfo *ClientClaimsInfo + DeviceInfo *DeviceInfo + DeviceClaimsInfo *DeviceClaimsInfo + ZeroSigData []byte +} + +// InfoBuffer implements the PAC Info Buffer: https://msdn.microsoft.com/en-us/library/cc237954.aspx +type InfoBuffer struct { + ULType uint32 // A 32-bit unsigned integer in little-endian format that describes the type of data present in the buffer contained at Offset. + CBBufferSize uint32 // A 32-bit unsigned integer in little-endian format that contains the size, in bytes, of the buffer in the PAC located at Offset. + Offset uint64 // A 64-bit unsigned integer in little-endian format that contains the offset to the beginning of the buffer, in bytes, from the beginning of the PACTYPE structure. The data offset MUST be a multiple of eight. The following sections specify the format of each type of element. +} + +// Unmarshal bytes into the PACType struct +func (pac *PACType) Unmarshal(b []byte) (err error) { + pac.Data = b + zb := make([]byte, len(b), len(b)) + copy(zb, b) + pac.ZeroSigData = zb + r := mstypes.NewReader(bytes.NewReader(b)) + pac.CBuffers, err = r.Uint32() + if err != nil { + return + } + pac.Version, err = r.Uint32() + if err != nil { + return + } + buf := make([]InfoBuffer, pac.CBuffers, pac.CBuffers) + for i := range buf { + buf[i].ULType, err = r.Uint32() + if err != nil { + return + } + buf[i].CBBufferSize, err = r.Uint32() + if err != nil { + return + } + buf[i].Offset, err = r.Uint64() + if err != nil { + return + } + } + pac.Buffers = buf + return nil +} + +// ProcessPACInfoBuffers processes the PAC Info Buffers. +// https://msdn.microsoft.com/en-us/library/cc237954.aspx +func (pac *PACType) ProcessPACInfoBuffers(key types.EncryptionKey, l *log.Logger) error { + for _, buf := range pac.Buffers { + p := make([]byte, buf.CBBufferSize, buf.CBBufferSize) + copy(p, pac.Data[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)]) + switch buf.ULType { + case infoTypeKerbValidationInfo: + if pac.KerbValidationInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k KerbValidationInfo + err := k.Unmarshal(p) + if err != nil { + return fmt.Errorf("error processing KerbValidationInfo: %v", err) + } + pac.KerbValidationInfo = &k + case infoTypeCredentials: + // Currently PAC parsing is only useful on the service side in gokrb5 + // The CredentialsInfo are only useful when gokrb5 has implemented RFC4556 and only applied on the client side. + // Skipping CredentialsInfo - will be revisited under RFC4556 implementation. + continue + //if pac.CredentialsInfo != nil { + // //Must ignore subsequent buffers of this type + // continue + //} + //var k CredentialsInfo + //err := k.Unmarshal(p, key) // The encryption key used is the AS reply key only available to the client. + //if err != nil { + // return fmt.Errorf("error processing CredentialsInfo: %v", err) + //} + //pac.CredentialsInfo = &k + case infoTypePACServerSignatureData: + if pac.ServerChecksum != nil { + //Must ignore subsequent buffers of this type + continue + } + var k SignatureData + zb, err := k.Unmarshal(p) + copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb) + if err != nil { + return fmt.Errorf("error processing ServerChecksum: %v", err) + } + pac.ServerChecksum = &k + case infoTypePACKDCSignatureData: + if pac.KDCChecksum != nil { + //Must ignore subsequent buffers of this type + continue + } + var k SignatureData + zb, err := k.Unmarshal(p) + copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb) + if err != nil { + return fmt.Errorf("error processing KDCChecksum: %v", err) + } + pac.KDCChecksum = &k + case infoTypePACClientInfo: + if pac.ClientInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k ClientInfo + err := k.Unmarshal(p) + if err != nil { + return fmt.Errorf("error processing ClientInfo: %v", err) + } + pac.ClientInfo = &k + case infoTypeS4UDelegationInfo: + if pac.S4UDelegationInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k S4UDelegationInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process S4U_DelegationInfo: %v", err) + continue + } + pac.S4UDelegationInfo = &k + case infoTypeUPNDNSInfo: + if pac.UPNDNSInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k UPNDNSInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process UPN_DNSInfo: %v", err) + continue + } + pac.UPNDNSInfo = &k + case infoTypePACClientClaimsInfo: + if pac.ClientClaimsInfo != nil || len(p) < 1 { + //Must ignore subsequent buffers of this type + continue + } + var k ClientClaimsInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process ClientClaimsInfo: %v", err) + continue + } + pac.ClientClaimsInfo = &k + case infoTypePACDeviceInfo: + if pac.DeviceInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k DeviceInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process DeviceInfo: %v", err) + continue + } + pac.DeviceInfo = &k + case infoTypePACDeviceClaimsInfo: + if pac.DeviceClaimsInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k DeviceClaimsInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process DeviceClaimsInfo: %v", err) + continue + } + pac.DeviceClaimsInfo = &k + } + } + + if ok, err := pac.verify(key); !ok { + return err + } + + return nil +} + +func (pac *PACType) verify(key types.EncryptionKey) (bool, error) { + if pac.KerbValidationInfo == nil { + return false, errors.New("PAC Info Buffers does not contain a KerbValidationInfo") + } + if pac.ServerChecksum == nil { + return false, errors.New("PAC Info Buffers does not contain a ServerChecksum") + } + if pac.KDCChecksum == nil { + return false, errors.New("PAC Info Buffers does not contain a KDCChecksum") + } + if pac.ClientInfo == nil { + return false, errors.New("PAC Info Buffers does not contain a ClientInfo") + } + etype, err := crypto.GetChksumEtype(int32(pac.ServerChecksum.SignatureType)) + if err != nil { + return false, err + } + if ok := etype.VerifyChecksum(key.KeyValue, + pac.ZeroSigData, + pac.ServerChecksum.Signature, + keyusage.KERB_NON_KERB_CKSUM_SALT); !ok { + return false, errors.New("PAC service checksum verification failed") + } + + return true, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go new file mode 100644 index 00000000000..da837d4b3a4 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go @@ -0,0 +1,26 @@ +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// S4UDelegationInfo implements https://msdn.microsoft.com/en-us/library/cc237944.aspx +type S4UDelegationInfo struct { + S4U2proxyTarget mstypes.RPCUnicodeString // The name of the principal to whom the application can forward the ticket. + TransitedListSize uint32 + S4UTransitedServices []mstypes.RPCUnicodeString `ndr:"pointer,conformant"` // List of all services that have been delegated through by this client and subsequent services or servers.. Size is value of TransitedListSize +} + +// Unmarshal bytes into the S4UDelegationInfo struct +func (k *S4UDelegationInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(k) + if err != nil { + err = fmt.Errorf("error unmarshaling S4UDelegationInfo: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go new file mode 100644 index 00000000000..8f6aa58faa8 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go @@ -0,0 +1,67 @@ +package pac + +import ( + "bytes" + + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/rpc/v2/mstypes" +) + +/* +https://msdn.microsoft.com/en-us/library/cc237955.aspx + +The Key Usage Value MUST be KERB_NON_KERB_CKSUM_SALT (17) [MS-KILE] (section 3.1.5.9). + +Server Signature (SignatureType = 0x00000006) +https://msdn.microsoft.com/en-us/library/cc237957.aspx + +KDC Signature (SignatureType = 0x00000007) +https://msdn.microsoft.com/en-us/library/dd357117.aspx +*/ + +// SignatureData implements https://msdn.microsoft.com/en-us/library/cc237955.aspx +type SignatureData struct { + SignatureType uint32 // A 32-bit unsigned integer value in little-endian format that defines the cryptographic system used to calculate the checksum. This MUST be one of the following checksum types: KERB_CHECKSUM_HMAC_MD5 (signature size = 16), HMAC_SHA1_96_AES128 (signature size = 12), HMAC_SHA1_96_AES256 (signature size = 12). + Signature []byte // Size depends on the type. See comment above. + RODCIdentifier uint16 // A 16-bit unsigned integer value in little-endian format that contains the first 16 bits of the key version number ([MS-KILE] section 3.1.5.8) when the KDC is an RODC. When the KDC is not an RODC, this field does not exist. +} + +// Unmarshal bytes into the SignatureData struct +func (k *SignatureData) Unmarshal(b []byte) (rb []byte, err error) { + r := mstypes.NewReader(bytes.NewReader(b)) + + k.SignatureType, err = r.Uint32() + if err != nil { + return + } + + var c int + switch k.SignatureType { + case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED: + c = 16 + case uint32(chksumtype.HMAC_SHA1_96_AES128): + c = 12 + case uint32(chksumtype.HMAC_SHA1_96_AES256): + c = 12 + } + k.Signature, err = r.ReadBytes(c) + if err != nil { + return + } + + // When the KDC is not an Read Only Domain Controller (RODC), this field does not exist. + if len(b) >= 4+c+2 { + k.RODCIdentifier, err = r.Uint16() + if err != nil { + return + } + } + + // Create bytes with zeroed signature needed for checksum verification + rb = make([]byte, len(b), len(b)) + copy(rb, b) + z := make([]byte, len(b), len(b)) + copy(rb[4:4+c], z) + + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go new file mode 100644 index 00000000000..d40679d49b8 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go @@ -0,0 +1,87 @@ +package pac + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +const ( + // NTLMSupCredLMOWF indicates that the LM OWF member is present and valid. + NTLMSupCredLMOWF uint32 = 31 + // NTLMSupCredNTOWF indicates that the NT OWF member is present and valid. + NTLMSupCredNTOWF uint32 = 30 +) + +// NTLMSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237949.aspx +type NTLMSupplementalCred struct { + Version uint32 // A 32-bit unsigned integer that defines the credential version.This field MUST be 0x00000000. + Flags uint32 + LMPassword []byte // A 16-element array of unsigned 8-bit integers that define the LM OWF. The LMPassword member MUST be ignored if the L flag is not set in the Flags member. + NTPassword []byte // A 16-element array of unsigned 8-bit integers that define the NT OWF. The NTPassword member MUST be ignored if the N flag is not set in the Flags member. +} + +// Unmarshal converts the bytes provided into a NTLMSupplementalCred. +func (c *NTLMSupplementalCred) Unmarshal(b []byte) (err error) { + r := mstypes.NewReader(bytes.NewReader(b)) + c.Version, err = r.Uint32() + if err != nil { + return + } + if c.Version != 0 { + err = errors.New("NTLMSupplementalCred version is not zero") + return + } + c.Flags, err = r.Uint32() + if err != nil { + return + } + if isFlagSet(c.Flags, NTLMSupCredLMOWF) { + c.LMPassword, err = r.ReadBytes(16) + if err != nil { + return + } + } + if isFlagSet(c.Flags, NTLMSupCredNTOWF) { + c.NTPassword, err = r.ReadBytes(16) + if err != nil { + return + } + } + return +} + +// isFlagSet tests if a flag is set in the uint32 little endian flag +func isFlagSet(f uint32, i uint32) bool { + //Which byte? + b := int(i / 8) + //Which bit in byte + p := uint(7 - (int(i) - 8*b)) + fb := make([]byte, 4) + binary.LittleEndian.PutUint32(fb, f) + if fb[b]&(1< - no domain specified + // \ + // @ + if strings.Contains(vc[0], `\`) { + u := strings.SplitN(vc[0], `\`, 2) + domain = u[0] + username = u[1] + } else if strings.Contains(vc[0], `@`) { + u := strings.SplitN(vc[0], `@`, 2) + domain = u[1] + username = u[0] + } else { + username = vc[0] + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/service/cache.go b/vendor/github.com/jcmturner/gokrb5/v8/service/cache.go new file mode 100644 index 00000000000..038e594ec06 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/service/cache.go @@ -0,0 +1,128 @@ +// Package service provides server side integrations for Kerberos authentication. +package service + +import ( + "github.com/jcmturner/gokrb5/v8/types" + "sync" + "time" +) + +// Replay cache is required as specified in RFC 4120 section 3.2.3 + +// Cache for tickets received from clients keyed by fully qualified client name. Used to track replay of tickets. +type Cache struct { + entries map[string]clientEntries + mux sync.RWMutex +} + +// clientEntries holds entries of client details sent to the service. +type clientEntries struct { + replayMap map[time.Time]replayCacheEntry + seqNumber int64 + subKey types.EncryptionKey +} + +// Cache entry tracking client time values of tickets sent to the service. +type replayCacheEntry struct { + presentedTime time.Time + sName types.PrincipalName + cTime time.Time // This combines the ticket's CTime and Cusec +} + +func (c *Cache) getClientEntries(cname types.PrincipalName) (clientEntries, bool) { + c.mux.RLock() + defer c.mux.RUnlock() + ce, ok := c.entries[cname.PrincipalNameString()] + return ce, ok +} + +func (c *Cache) getClientEntry(cname types.PrincipalName, t time.Time) (replayCacheEntry, bool) { + if ce, ok := c.getClientEntries(cname); ok { + c.mux.RLock() + defer c.mux.RUnlock() + if e, ok := ce.replayMap[t]; ok { + return e, true + } + } + return replayCacheEntry{}, false +} + +// Instance of the ServiceCache. This needs to be a singleton. +var replayCache Cache +var once sync.Once + +// GetReplayCache returns a pointer to the Cache singleton. +func GetReplayCache(d time.Duration) *Cache { + // Create a singleton of the ReplayCache and start a background thread to regularly clean out old entries + once.Do(func() { + replayCache = Cache{ + entries: make(map[string]clientEntries), + } + go func() { + for { + // TODO consider using a context here. + time.Sleep(d) + replayCache.ClearOldEntries(d) + } + }() + }) + return &replayCache +} + +// AddEntry adds an entry to the Cache. +func (c *Cache) AddEntry(sname types.PrincipalName, a types.Authenticator) { + ct := a.CTime.Add(time.Duration(a.Cusec) * time.Microsecond) + if ce, ok := c.getClientEntries(a.CName); ok { + c.mux.Lock() + defer c.mux.Unlock() + ce.replayMap[ct] = replayCacheEntry{ + presentedTime: time.Now().UTC(), + sName: sname, + cTime: ct, + } + ce.seqNumber = a.SeqNumber + ce.subKey = a.SubKey + } else { + c.mux.Lock() + defer c.mux.Unlock() + c.entries[a.CName.PrincipalNameString()] = clientEntries{ + replayMap: map[time.Time]replayCacheEntry{ + ct: { + presentedTime: time.Now().UTC(), + sName: sname, + cTime: ct, + }, + }, + seqNumber: a.SeqNumber, + subKey: a.SubKey, + } + } +} + +// ClearOldEntries clears entries from the Cache that are older than the duration provided. +func (c *Cache) ClearOldEntries(d time.Duration) { + c.mux.Lock() + defer c.mux.Unlock() + for ke, ce := range c.entries { + for k, e := range ce.replayMap { + if time.Now().UTC().Sub(e.presentedTime) > d { + delete(ce.replayMap, k) + } + } + if len(ce.replayMap) == 0 { + delete(c.entries, ke) + } + } +} + +// IsReplay tests if the Authenticator provided is a replay within the duration defined. If this is not a replay add the entry to the cache for tracking. +func (c *Cache) IsReplay(sname types.PrincipalName, a types.Authenticator) bool { + ct := a.CTime.Add(time.Duration(a.Cusec) * time.Microsecond) + if e, ok := c.getClientEntry(a.CName, ct); ok { + if e.sName.Equal(sname) { + return true + } + } + c.AddEntry(sname, a) + return false +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/service/settings.go b/vendor/github.com/jcmturner/gokrb5/v8/service/settings.go new file mode 100644 index 00000000000..a0370ed7af5 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/service/settings.go @@ -0,0 +1,163 @@ +package service + +import ( + "log" + "net/http" + "time" + + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/types" +) + +// Settings defines service side configuration settings. +type Settings struct { + Keytab *keytab.Keytab + ktprinc *types.PrincipalName + sname string + requireHostAddr bool + disablePACDecoding bool + cAddr types.HostAddress + maxClockSkew time.Duration + logger *log.Logger + sessionMgr SessionMgr +} + +// NewSettings creates a new service Settings. +func NewSettings(kt *keytab.Keytab, settings ...func(*Settings)) *Settings { + s := new(Settings) + s.Keytab = kt + for _, set := range settings { + set(s) + } + return s +} + +// RequireHostAddr used to configure service side to required host addresses to be specified in Kerberos tickets. +// +// s := NewSettings(kt, RequireHostAddr(true)) +func RequireHostAddr(b bool) func(*Settings) { + return func(s *Settings) { + s.requireHostAddr = b + } +} + +// RequireHostAddr indicates if the service should require the host address to be included in the ticket. +func (s *Settings) RequireHostAddr() bool { + return s.requireHostAddr +} + +// DecodePAC used to configure service side to enable/disable PAC decoding if the PAC is present. +// Defaults to enabled if not specified. +// +// s := NewSettings(kt, DecodePAC(false)) +func DecodePAC(b bool) func(*Settings) { + return func(s *Settings) { + s.disablePACDecoding = !b + } +} + +// DecodePAC indicates whether the service should decode any PAC information present in the ticket. +func (s *Settings) DecodePAC() bool { + return !s.disablePACDecoding +} + +// ClientAddress used to configure service side with the clients host address to be used during validation. +// +// s := NewSettings(kt, ClientAddress(h)) +func ClientAddress(h types.HostAddress) func(*Settings) { + return func(s *Settings) { + s.cAddr = h + } +} + +// ClientAddress returns the client host address which has been provided to the service. +func (s *Settings) ClientAddress() types.HostAddress { + return s.cAddr +} + +// Logger used to configure service side with a logger. +// +// s := NewSettings(kt, Logger(l)) +func Logger(l *log.Logger) func(*Settings) { + return func(s *Settings) { + s.logger = l + } +} + +// Logger returns the logger instances configured for the service. If none is configured nill will be returned. +func (s *Settings) Logger() *log.Logger { + return s.logger +} + +// KeytabPrincipal used to override the principal name used to find the key in the keytab. +// +// s := NewSettings(kt, KeytabPrincipal("someaccount")) +func KeytabPrincipal(p string) func(*Settings) { + return func(s *Settings) { + pn, _ := types.ParseSPNString(p) + s.ktprinc = &pn + } +} + +// KeytabPrincipal returns the principal name used to find the key in the keytab if it has been overridden. +func (s *Settings) KeytabPrincipal() *types.PrincipalName { + return s.ktprinc +} + +// MaxClockSkew used to configure service side with the maximum acceptable clock skew +// between the service and the issue time of kerberos tickets +// +// s := NewSettings(kt, MaxClockSkew(d)) +func MaxClockSkew(d time.Duration) func(*Settings) { + return func(s *Settings) { + s.maxClockSkew = d + } +} + +// MaxClockSkew returns the maximum acceptable clock skew between the service and the issue time of kerberos tickets. +// If none is defined a duration of 5 minutes is returned. +func (s *Settings) MaxClockSkew() time.Duration { + if s.maxClockSkew.Nanoseconds() == 0 { + return time.Duration(5) * time.Minute + } + return s.maxClockSkew +} + +// SName used provide a specific service name to the service settings. +// +// s := NewSettings(kt, SName("HTTP/some.service.com")) +func SName(sname string) func(*Settings) { + return func(s *Settings) { + s.sname = sname + } +} + +// SName returns the specific service name to the service. +func (s *Settings) SName() string { + return s.sname +} + +// SessionManager configures a session manager to establish sessions with clients to avoid excessive authentication challenges. +// +// s := NewSettings(kt, SessionManager(sm)) +func SessionManager(sm SessionMgr) func(*Settings) { + return func(s *Settings) { + s.sessionMgr = sm + } +} + +// SessionManager returns any configured session manager. +func (s *Settings) SessionManager() SessionMgr { + return s.sessionMgr +} + +// SessionMgr must provide a ways to: +// +// - Create new sessions and in the process add a value to the session under the key provided. +// +// - Get an existing session returning the value in the session under the key provided. +// Return nil bytes and/or error if there is no session. +type SessionMgr interface { + New(w http.ResponseWriter, r *http.Request, k string, v []byte) error + Get(r *http.Request, k string) ([]byte, error) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/spnego/http.go b/vendor/github.com/jcmturner/gokrb5/v8/spnego/http.go new file mode 100644 index 00000000000..53fe1a3dbd8 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/spnego/http.go @@ -0,0 +1,401 @@ +package spnego + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/cookiejar" + "net/url" + "strings" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/goidentity/v6" + "github.com/jcmturner/gokrb5/v8/client" + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/service" + "github.com/jcmturner/gokrb5/v8/types" +) + +// Client side functionality // + +// Client will negotiate authentication with a server using SPNEGO. +type Client struct { + *http.Client + krb5Client *client.Client + spn string + reqs []*http.Request +} + +type redirectErr struct { + reqTarget *http.Request +} + +func (e redirectErr) Error() string { + return fmt.Sprintf("redirect to %v", e.reqTarget.URL) +} + +type teeReadCloser struct { + io.Reader + io.Closer +} + +// NewClient returns a SPNEGO enabled HTTP client. +// Be careful when passing in the *http.Client if it is beginning reused in multiple calls to this function. +// Ensure reuse of the provided *http.Client is for the same user as a session cookie may have been added to +// http.Client's cookie jar. +// Incorrect reuse of the provided *http.Client could lead to access to the wrong user's session. +func NewClient(krb5Cl *client.Client, httpCl *http.Client, spn string) *Client { + if httpCl == nil { + httpCl = &http.Client{} + } + // Add a cookie jar if there isn't one + if httpCl.Jar == nil { + httpCl.Jar, _ = cookiejar.New(nil) + } + // Add a CheckRedirect function that will execute any functional already defined and then error with a redirectErr + f := httpCl.CheckRedirect + httpCl.CheckRedirect = func(req *http.Request, via []*http.Request) error { + if f != nil { + err := f(req, via) + if err != nil { + return err + } + } + return redirectErr{reqTarget: req} + } + return &Client{ + Client: httpCl, + krb5Client: krb5Cl, + spn: spn, + } +} + +// Do is the SPNEGO enabled HTTP client's equivalent of the http.Client's Do method. +func (c *Client) Do(req *http.Request) (resp *http.Response, err error) { + var body bytes.Buffer + if req.Body != nil { + // Use a tee reader to capture any body sent in case we have to replay it again + teeR := io.TeeReader(req.Body, &body) + teeRC := teeReadCloser{teeR, req.Body} + req.Body = teeRC + } + resp, err = c.Client.Do(req) + if err != nil { + if ue, ok := err.(*url.Error); ok { + if e, ok := ue.Err.(redirectErr); ok { + // Picked up a redirect + e.reqTarget.Header.Del(HTTPHeaderAuthRequest) + c.reqs = append(c.reqs, e.reqTarget) + if len(c.reqs) >= 10 { + return resp, errors.New("stopped after 10 redirects") + } + if req.Body != nil { + // Refresh the body reader so the body can be sent again + e.reqTarget.Body = ioutil.NopCloser(&body) + } + return c.Do(e.reqTarget) + } + } + return resp, err + } + if respUnauthorizedNegotiate(resp) { + err := SetSPNEGOHeader(c.krb5Client, req, c.spn) + if err != nil { + return resp, err + } + if req.Body != nil { + // Refresh the body reader so the body can be sent again + req.Body = ioutil.NopCloser(&body) + } + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + return c.Do(req) + } + return resp, err +} + +// Get is the SPNEGO enabled HTTP client's equivalent of the http.Client's Get method. +func (c *Client) Get(url string) (resp *http.Response, err error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is the SPNEGO enabled HTTP client's equivalent of the http.Client's Post method. +func (c *Client) Post(url, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return c.Do(req) +} + +// PostForm is the SPNEGO enabled HTTP client's equivalent of the http.Client's PostForm method. +func (c *Client) PostForm(url string, data url.Values) (resp *http.Response, err error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// Head is the SPNEGO enabled HTTP client's equivalent of the http.Client's Head method. +func (c *Client) Head(url string) (resp *http.Response, err error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +func respUnauthorizedNegotiate(resp *http.Response) bool { + if resp.StatusCode == http.StatusUnauthorized { + if resp.Header.Get(HTTPHeaderAuthResponse) == HTTPHeaderAuthResponseValueKey { + return true + } + } + return false +} + +func setRequestSPN(r *http.Request) (types.PrincipalName, error) { + h := strings.TrimSuffix(r.URL.Host, ".") + // This if statement checks if the host includes a port number + if strings.LastIndex(r.URL.Host, ":") > strings.LastIndex(r.URL.Host, "]") { + // There is a port number in the URL + h, p, err := net.SplitHostPort(h) + if err != nil { + return types.PrincipalName{}, err + } + name, err := net.LookupCNAME(h) + if err == nil { + // Underlyng canonical name should be used for SPN + h = name + } + h = strings.TrimSuffix(h, ".") + r.Host = fmt.Sprintf("%s:%s", h, p) + return types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, "HTTP/"+h), nil + } + name, err := net.LookupCNAME(h) + if err == nil { + // Underlyng canonical name should be used for SPN + h = name + } + h = strings.TrimSuffix(h, ".") + r.Host = h + return types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, "HTTP/"+h), nil +} + +// SetSPNEGOHeader gets the service ticket and sets it as the SPNEGO authorization header on HTTP request object. +// To auto generate the SPN from the request object pass a null string "". +func SetSPNEGOHeader(cl *client.Client, r *http.Request, spn string) error { + if spn == "" { + pn, err := setRequestSPN(r) + if err != nil { + return err + } + spn = pn.PrincipalNameString() + } + cl.Log("using SPN %s", spn) + s := SPNEGOClient(cl, spn) + err := s.AcquireCred() + if err != nil { + return fmt.Errorf("could not acquire client credential: %v", err) + } + st, err := s.InitSecContext() + if err != nil { + return fmt.Errorf("could not initialize context: %v", err) + } + nb, err := st.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "could not marshal SPNEGO") + } + hs := "Negotiate " + base64.StdEncoding.EncodeToString(nb) + r.Header.Set(HTTPHeaderAuthRequest, hs) + return nil +} + +// Service side functionality // + +const ( + // spnegoNegTokenRespKRBAcceptCompleted - The response on successful authentication always has this header. Capturing as const so we don't have marshaling and encoding overhead. + spnegoNegTokenRespKRBAcceptCompleted = "Negotiate oRQwEqADCgEAoQsGCSqGSIb3EgECAg==" + // spnegoNegTokenRespReject - The response on a failed authentication always has this rejection header. Capturing as const so we don't have marshaling and encoding overhead. + spnegoNegTokenRespReject = "Negotiate oQcwBaADCgEC" + // spnegoNegTokenRespIncompleteKRB5 - Response token specifying incomplete context and KRB5 as the supported mechtype. + spnegoNegTokenRespIncompleteKRB5 = "Negotiate oRQwEqADCgEBoQsGCSqGSIb3EgECAg==" + // sessionCredentials is the session value key holding the credentials jcmturner/goidentity/Identity object. + sessionCredentials = "github.com/jcmturner/gokrb5/v8/sessionCredentials" + // ctxCredentials is the SPNEGO context key holding the credentials jcmturner/goidentity/Identity object. + ctxCredentials = "github.com/jcmturner/gokrb5/v8/ctxCredentials" + // HTTPHeaderAuthRequest is the header that will hold authn/z information. + HTTPHeaderAuthRequest = "Authorization" + // HTTPHeaderAuthResponse is the header that will hold SPNEGO data from the server. + HTTPHeaderAuthResponse = "WWW-Authenticate" + // HTTPHeaderAuthResponseValueKey is the key in the auth header for SPNEGO. + HTTPHeaderAuthResponseValueKey = "Negotiate" + // UnauthorizedMsg is the message returned in the body when authentication fails. + UnauthorizedMsg = "Unauthorised.\n" +) + +// SPNEGOKRB5Authenticate is a Kerberos SPNEGO authentication HTTP handler wrapper. +func SPNEGOKRB5Authenticate(inner http.Handler, kt *keytab.Keytab, settings ...func(*service.Settings)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Set up the SPNEGO GSS-API mechanism + var spnego *SPNEGO + h, err := types.GetHostAddress(r.RemoteAddr) + if err == nil { + // put in this order so that if the user provides a ClientAddress it will override the one here. + o := append([]func(*service.Settings){service.ClientAddress(h)}, settings...) + spnego = SPNEGOService(kt, o...) + } else { + spnego = SPNEGOService(kt, settings...) + spnego.Log("%s - SPNEGO could not parse client address: %v", r.RemoteAddr, err) + } + + // Check if there is a session manager and if there is an already established session for this client + id, err := getSessionCredentials(spnego, r) + if err == nil && id.Authenticated() { + // There is an established session so bypass auth and serve + spnego.Log("%s - SPNEGO request served under session %s", r.RemoteAddr, id.SessionID()) + inner.ServeHTTP(w, goidentity.AddToHTTPRequestContext(&id, r)) + return + } + + st, err := getAuthorizationNegotiationHeaderAsSPNEGOToken(spnego, r, w) + if st == nil || err != nil { + // response to client and logging handled in function above so just return + return + } + + // Validate the context token + authed, ctx, status := spnego.AcceptSecContext(st) + if status.Code != gssapi.StatusComplete && status.Code != gssapi.StatusContinueNeeded { + spnegoResponseReject(spnego, w, "%s - SPNEGO validation error: %v", r.RemoteAddr, status) + return + } + if status.Code == gssapi.StatusContinueNeeded { + spnegoNegotiateKRB5MechType(spnego, w, "%s - SPNEGO GSS-API continue needed", r.RemoteAddr) + return + } + + if authed { + // Authentication successful; get user's credentials from the context + id := ctx.Value(ctxCredentials).(*credentials.Credentials) + // Create a new session if a session manager has been configured + err = newSession(spnego, r, w, id) + if err != nil { + return + } + spnegoResponseAcceptCompleted(spnego, w, "%s %s@%s - SPNEGO authentication succeeded", r.RemoteAddr, id.UserName(), id.Domain()) + // Add the identity to the context and serve the inner/wrapped handler + inner.ServeHTTP(w, goidentity.AddToHTTPRequestContext(id, r)) + return + } + // If we get to here we have not authenticationed so just reject + spnegoResponseReject(spnego, w, "%s - SPNEGO Kerberos authentication failed", r.RemoteAddr) + return + }) +} + +func getAuthorizationNegotiationHeaderAsSPNEGOToken(spnego *SPNEGO, r *http.Request, w http.ResponseWriter) (*SPNEGOToken, error) { + s := strings.SplitN(r.Header.Get(HTTPHeaderAuthRequest), " ", 2) + if len(s) != 2 || s[0] != HTTPHeaderAuthResponseValueKey { + // No Authorization header set so return 401 with WWW-Authenticate Negotiate header + w.Header().Set(HTTPHeaderAuthResponse, HTTPHeaderAuthResponseValueKey) + http.Error(w, UnauthorizedMsg, http.StatusUnauthorized) + return nil, errors.New("client did not provide a negotiation authorization header") + } + + // Decode the header into an SPNEGO context token + b, err := base64.StdEncoding.DecodeString(s[1]) + if err != nil { + err = fmt.Errorf("error in base64 decoding negotiation header: %v", err) + spnegoNegotiateKRB5MechType(spnego, w, "%s - SPNEGO %v", r.RemoteAddr, err) + return nil, err + } + var st SPNEGOToken + err = st.Unmarshal(b) + if err != nil { + // Check if this is a raw KRB5 context token - issue #347. + var k5t KRB5Token + if k5t.Unmarshal(b) != nil { + err = fmt.Errorf("error in unmarshaling SPNEGO token: %v", err) + spnegoNegotiateKRB5MechType(spnego, w, "%s - SPNEGO %v", r.RemoteAddr, err) + return nil, err + } + // Wrap it into an SPNEGO context token + st.Init = true + st.NegTokenInit = NegTokenInit{ + MechTypes: []asn1.ObjectIdentifier{k5t.OID}, + MechTokenBytes: b, + } + } + return &st, nil +} + +func getSessionCredentials(spnego *SPNEGO, r *http.Request) (credentials.Credentials, error) { + var creds credentials.Credentials + // Check if there is a session manager and if there is an already established session for this client + if sm := spnego.serviceSettings.SessionManager(); sm != nil { + cb, err := sm.Get(r, sessionCredentials) + if err != nil || cb == nil || len(cb) < 1 { + return creds, fmt.Errorf("%s - SPNEGO error getting session and credentials for request: %v", r.RemoteAddr, err) + } + err = creds.Unmarshal(cb) + if err != nil { + return creds, fmt.Errorf("%s - SPNEGO credentials malformed in session: %v", r.RemoteAddr, err) + } + return creds, nil + } + return creds, errors.New("no session manager configured") +} + +func newSession(spnego *SPNEGO, r *http.Request, w http.ResponseWriter, id *credentials.Credentials) error { + if sm := spnego.serviceSettings.SessionManager(); sm != nil { + // create new session + idb, err := id.Marshal() + if err != nil { + spnegoInternalServerError(spnego, w, "SPNEGO could not marshal credentials to add to the session: %v", err) + return err + } + err = sm.New(w, r, sessionCredentials, idb) + if err != nil { + spnegoInternalServerError(spnego, w, "SPNEGO could not create new session: %v", err) + return err + } + spnego.Log("%s %s@%s - SPNEGO new session (%s) created", r.RemoteAddr, id.UserName(), id.Domain(), id.SessionID()) + } + return nil +} + +// Log and respond to client for error conditions + +func spnegoNegotiateKRB5MechType(s *SPNEGO, w http.ResponseWriter, format string, v ...interface{}) { + s.Log(format, v...) + w.Header().Set(HTTPHeaderAuthResponse, spnegoNegTokenRespIncompleteKRB5) + http.Error(w, UnauthorizedMsg, http.StatusUnauthorized) +} + +func spnegoResponseReject(s *SPNEGO, w http.ResponseWriter, format string, v ...interface{}) { + s.Log(format, v...) + w.Header().Set(HTTPHeaderAuthResponse, spnegoNegTokenRespReject) + http.Error(w, UnauthorizedMsg, http.StatusUnauthorized) +} + +func spnegoResponseAcceptCompleted(s *SPNEGO, w http.ResponseWriter, format string, v ...interface{}) { + s.Log(format, v...) + w.Header().Set(HTTPHeaderAuthResponse, spnegoNegTokenRespKRBAcceptCompleted) +} + +func spnegoInternalServerError(s *SPNEGO, w http.ResponseWriter, format string, v ...interface{}) { + s.Log(format, v...) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/spnego/krb5Token.go b/vendor/github.com/jcmturner/gokrb5/v8/spnego/krb5Token.go new file mode 100644 index 00000000000..43f8a82c8a0 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/spnego/krb5Token.go @@ -0,0 +1,218 @@ +package spnego + +import ( + "context" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/client" + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/service" + "github.com/jcmturner/gokrb5/v8/types" +) + +// GSSAPI KRB5 MechToken IDs. +const ( + TOK_ID_KRB_AP_REQ = "0100" + TOK_ID_KRB_AP_REP = "0200" + TOK_ID_KRB_ERROR = "0300" +) + +// KRB5Token context token implementation for GSSAPI. +type KRB5Token struct { + OID asn1.ObjectIdentifier + tokID []byte + APReq messages.APReq + APRep messages.APRep + KRBError messages.KRBError + settings *service.Settings + context context.Context +} + +// Marshal a KRB5Token into a slice of bytes. +func (m *KRB5Token) Marshal() ([]byte, error) { + // Create the header + b, _ := asn1.Marshal(m.OID) + b = append(b, m.tokID...) + var tb []byte + var err error + switch hex.EncodeToString(m.tokID) { + case TOK_ID_KRB_AP_REQ: + tb, err = m.APReq.Marshal() + if err != nil { + return []byte{}, fmt.Errorf("error marshalling AP_REQ for MechToken: %v", err) + } + case TOK_ID_KRB_AP_REP: + return []byte{}, errors.New("marshal of AP_REP GSSAPI MechToken not supported by gokrb5") + case TOK_ID_KRB_ERROR: + return []byte{}, errors.New("marshal of KRB_ERROR GSSAPI MechToken not supported by gokrb5") + } + if err != nil { + return []byte{}, fmt.Errorf("error mashalling kerberos message within mech token: %v", err) + } + b = append(b, tb...) + return asn1tools.AddASNAppTag(b, 0), nil +} + +// Unmarshal a KRB5Token. +func (m *KRB5Token) Unmarshal(b []byte) error { + var oid asn1.ObjectIdentifier + r, err := asn1.UnmarshalWithParams(b, &oid, fmt.Sprintf("application,explicit,tag:%v", 0)) + if err != nil { + return fmt.Errorf("error unmarshalling KRB5Token OID: %v", err) + } + if !oid.Equal(gssapi.OIDKRB5.OID()) { + return fmt.Errorf("error unmarshalling KRB5Token, OID is %s not %s", oid.String(), gssapi.OIDKRB5.OID().String()) + } + m.OID = oid + if len(r) < 2 { + return fmt.Errorf("krb5token too short") + } + m.tokID = r[0:2] + switch hex.EncodeToString(m.tokID) { + case TOK_ID_KRB_AP_REQ: + var a messages.APReq + err = a.Unmarshal(r[2:]) + if err != nil { + return fmt.Errorf("error unmarshalling KRB5Token AP_REQ: %v", err) + } + m.APReq = a + case TOK_ID_KRB_AP_REP: + var a messages.APRep + err = a.Unmarshal(r[2:]) + if err != nil { + return fmt.Errorf("error unmarshalling KRB5Token AP_REP: %v", err) + } + m.APRep = a + case TOK_ID_KRB_ERROR: + var a messages.KRBError + err = a.Unmarshal(r[2:]) + if err != nil { + return fmt.Errorf("error unmarshalling KRB5Token KRBError: %v", err) + } + m.KRBError = a + } + return nil +} + +// Verify a KRB5Token. +func (m *KRB5Token) Verify() (bool, gssapi.Status) { + switch hex.EncodeToString(m.tokID) { + case TOK_ID_KRB_AP_REQ: + ok, creds, err := service.VerifyAPREQ(&m.APReq, m.settings) + if err != nil { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: err.Error()} + } + if !ok { + return false, gssapi.Status{Code: gssapi.StatusDefectiveCredential, Message: "KRB5_AP_REQ token not valid"} + } + m.context = context.Background() + m.context = context.WithValue(m.context, ctxCredentials, creds) + return true, gssapi.Status{Code: gssapi.StatusComplete} + case TOK_ID_KRB_AP_REP: + // Client side + // TODO how to verify the AP_REP - not yet implemented + return false, gssapi.Status{Code: gssapi.StatusFailure, Message: "verifying an AP_REP is not currently supported by gokrb5"} + case TOK_ID_KRB_ERROR: + if m.KRBError.MsgType != msgtype.KRB_ERROR { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "KRB5_Error token not valid"} + } + return true, gssapi.Status{Code: gssapi.StatusUnavailable} + } + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "unknown TOK_ID in KRB5 token"} +} + +// IsAPReq tests if the MechToken contains an AP_REQ. +func (m *KRB5Token) IsAPReq() bool { + if hex.EncodeToString(m.tokID) == TOK_ID_KRB_AP_REQ { + return true + } + return false +} + +// IsAPRep tests if the MechToken contains an AP_REP. +func (m *KRB5Token) IsAPRep() bool { + if hex.EncodeToString(m.tokID) == TOK_ID_KRB_AP_REP { + return true + } + return false +} + +// IsKRBError tests if the MechToken contains an KRB_ERROR. +func (m *KRB5Token) IsKRBError() bool { + if hex.EncodeToString(m.tokID) == TOK_ID_KRB_ERROR { + return true + } + return false +} + +// Context returns the KRB5 token's context which will contain any verify user identity information. +func (m *KRB5Token) Context() context.Context { + return m.context +} + +// NewKRB5TokenAPREQ creates a new KRB5 token with AP_REQ +func NewKRB5TokenAPREQ(cl *client.Client, tkt messages.Ticket, sessionKey types.EncryptionKey, GSSAPIFlags []int, APOptions []int) (KRB5Token, error) { + // TODO consider providing the SPN rather than the specific tkt and key and get these from the krb client. + var m KRB5Token + m.OID = gssapi.OIDKRB5.OID() + tb, _ := hex.DecodeString(TOK_ID_KRB_AP_REQ) + m.tokID = tb + + auth, err := krb5TokenAuthenticator(cl.Credentials, GSSAPIFlags) + if err != nil { + return m, err + } + APReq, err := messages.NewAPReq( + tkt, + sessionKey, + auth, + ) + if err != nil { + return m, err + } + for _, o := range APOptions { + types.SetFlag(&APReq.APOptions, o) + } + m.APReq = APReq + return m, nil +} + +// krb5TokenAuthenticator creates a new kerberos authenticator for kerberos MechToken +func krb5TokenAuthenticator(creds *credentials.Credentials, flags []int) (types.Authenticator, error) { + //RFC 4121 Section 4.1.1 + auth, err := types.NewAuthenticator(creds.Domain(), creds.CName()) + if err != nil { + return auth, krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") + } + auth.Cksum = types.Checksum{ + CksumType: chksumtype.GSSAPI, + Checksum: newAuthenticatorChksum(flags), + } + return auth, nil +} + +// Create new authenticator checksum for kerberos MechToken +func newAuthenticatorChksum(flags []int) []byte { + a := make([]byte, 24) + binary.LittleEndian.PutUint32(a[:4], 16) + for _, i := range flags { + if i == gssapi.ContextFlagDeleg { + x := make([]byte, 28-len(a)) + a = append(a, x...) + } + f := binary.LittleEndian.Uint32(a[20:24]) + f |= uint32(i) + binary.LittleEndian.PutUint32(a[20:24], f) + } + return a +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/spnego/negotiationToken.go b/vendor/github.com/jcmturner/gokrb5/v8/spnego/negotiationToken.go new file mode 100644 index 00000000000..b2199aed28c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/spnego/negotiationToken.go @@ -0,0 +1,302 @@ +package spnego + +import ( + "context" + "errors" + "fmt" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/client" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/service" + "github.com/jcmturner/gokrb5/v8/types" +) + +// https://msdn.microsoft.com/en-us/library/ms995330.aspx + +// Negotiation state values. +const ( + NegStateAcceptCompleted NegState = 0 + NegStateAcceptIncomplete NegState = 1 + NegStateReject NegState = 2 + NegStateRequestMIC NegState = 3 +) + +// NegState is a type to indicate the SPNEGO negotiation state. +type NegState int + +// NegTokenInit implements Negotiation Token of type Init. +type NegTokenInit struct { + MechTypes []asn1.ObjectIdentifier + ReqFlags asn1.BitString + MechTokenBytes []byte + MechListMIC []byte + mechToken gssapi.ContextToken + settings *service.Settings +} + +type marshalNegTokenInit struct { + MechTypes []asn1.ObjectIdentifier `asn1:"explicit,tag:0"` + ReqFlags asn1.BitString `asn1:"explicit,optional,tag:1"` + MechTokenBytes []byte `asn1:"explicit,optional,omitempty,tag:2"` + MechListMIC []byte `asn1:"explicit,optional,omitempty,tag:3"` // This field is not used when negotiating Kerberos tokens +} + +// NegTokenResp implements Negotiation Token of type Resp/Targ +type NegTokenResp struct { + NegState asn1.Enumerated + SupportedMech asn1.ObjectIdentifier + ResponseToken []byte + MechListMIC []byte + mechToken gssapi.ContextToken + settings *service.Settings +} + +type marshalNegTokenResp struct { + NegState asn1.Enumerated `asn1:"explicit,tag:0"` + SupportedMech asn1.ObjectIdentifier `asn1:"explicit,optional,tag:1"` + ResponseToken []byte `asn1:"explicit,optional,omitempty,tag:2"` + MechListMIC []byte `asn1:"explicit,optional,omitempty,tag:3"` // This field is not used when negotiating Kerberos tokens +} + +// NegTokenTarg implements Negotiation Token of type Resp/Targ +type NegTokenTarg NegTokenResp + +// Marshal an Init negotiation token +func (n *NegTokenInit) Marshal() ([]byte, error) { + m := marshalNegTokenInit{ + MechTypes: n.MechTypes, + ReqFlags: n.ReqFlags, + MechTokenBytes: n.MechTokenBytes, + MechListMIC: n.MechListMIC, + } + b, err := asn1.Marshal(m) + if err != nil { + return nil, err + } + nt := asn1.RawValue{ + Tag: 0, + Class: 2, + IsCompound: true, + Bytes: b, + } + nb, err := asn1.Marshal(nt) + if err != nil { + return nil, err + } + return nb, nil +} + +// Unmarshal an Init negotiation token +func (n *NegTokenInit) Unmarshal(b []byte) error { + init, nt, err := UnmarshalNegToken(b) + if err != nil { + return err + } + if !init { + return errors.New("bytes were not that of a NegTokenInit") + } + nInit := nt.(NegTokenInit) + n.MechTokenBytes = nInit.MechTokenBytes + n.MechListMIC = nInit.MechListMIC + n.MechTypes = nInit.MechTypes + n.ReqFlags = nInit.ReqFlags + return nil +} + +// Verify an Init negotiation token +func (n *NegTokenInit) Verify() (bool, gssapi.Status) { + // Check if supported mechanisms are in the MechTypeList + var mtSupported bool + for _, m := range n.MechTypes { + if m.Equal(gssapi.OIDKRB5.OID()) || m.Equal(gssapi.OIDMSLegacyKRB5.OID()) { + if n.mechToken == nil && n.MechTokenBytes == nil { + return false, gssapi.Status{Code: gssapi.StatusContinueNeeded} + } + mtSupported = true + break + } + } + if !mtSupported { + return false, gssapi.Status{Code: gssapi.StatusBadMech, Message: "no supported mechanism specified in negotiation"} + } + // There should be some mechtoken bytes for a KRB5Token (other mech types are not supported) + mt := new(KRB5Token) + mt.settings = n.settings + if n.mechToken == nil { + err := mt.Unmarshal(n.MechTokenBytes) + if err != nil { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: err.Error()} + } + n.mechToken = mt + } else { + var ok bool + mt, ok = n.mechToken.(*KRB5Token) + if !ok { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "MechToken is not a KRB5 token as expected"} + } + } + // Verify the mechtoken + return n.mechToken.Verify() +} + +// Context returns the SPNEGO context which will contain any verify user identity information. +func (n *NegTokenInit) Context() context.Context { + if n.mechToken != nil { + mt, ok := n.mechToken.(*KRB5Token) + if !ok { + return nil + } + return mt.Context() + } + return nil +} + +// Marshal a Resp/Targ negotiation token +func (n *NegTokenResp) Marshal() ([]byte, error) { + m := marshalNegTokenResp{ + NegState: n.NegState, + SupportedMech: n.SupportedMech, + ResponseToken: n.ResponseToken, + MechListMIC: n.MechListMIC, + } + b, err := asn1.Marshal(m) + if err != nil { + return nil, err + } + nt := asn1.RawValue{ + Tag: 1, + Class: 2, + IsCompound: true, + Bytes: b, + } + nb, err := asn1.Marshal(nt) + if err != nil { + return nil, err + } + return nb, nil +} + +// Unmarshal a Resp/Targ negotiation token +func (n *NegTokenResp) Unmarshal(b []byte) error { + init, nt, err := UnmarshalNegToken(b) + if err != nil { + return err + } + if init { + return errors.New("bytes were not that of a NegTokenResp") + } + nResp := nt.(NegTokenResp) + n.MechListMIC = nResp.MechListMIC + n.NegState = nResp.NegState + n.ResponseToken = nResp.ResponseToken + n.SupportedMech = nResp.SupportedMech + return nil +} + +// Verify a Resp/Targ negotiation token +func (n *NegTokenResp) Verify() (bool, gssapi.Status) { + if n.SupportedMech.Equal(gssapi.OIDKRB5.OID()) || n.SupportedMech.Equal(gssapi.OIDMSLegacyKRB5.OID()) { + if n.mechToken == nil && n.ResponseToken == nil { + return false, gssapi.Status{Code: gssapi.StatusContinueNeeded} + } + mt := new(KRB5Token) + mt.settings = n.settings + if n.mechToken == nil { + err := mt.Unmarshal(n.ResponseToken) + if err != nil { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: err.Error()} + } + n.mechToken = mt + } else { + var ok bool + mt, ok = n.mechToken.(*KRB5Token) + if !ok { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "MechToken is not a KRB5 token as expected"} + } + } + if mt == nil { + return false, gssapi.Status{Code: gssapi.StatusContinueNeeded} + } + // Verify the mechtoken + return mt.Verify() + } + return false, gssapi.Status{Code: gssapi.StatusBadMech, Message: "no supported mechanism specified in negotiation"} +} + +// State returns the negotiation state of the negotiation response. +func (n *NegTokenResp) State() NegState { + return NegState(n.NegState) +} + +// Context returns the SPNEGO context which will contain any verify user identity information. +func (n *NegTokenResp) Context() context.Context { + if n.mechToken != nil { + mt, ok := n.mechToken.(*KRB5Token) + if !ok { + return nil + } + return mt.Context() + } + return nil +} + +// UnmarshalNegToken umarshals and returns either a NegTokenInit or a NegTokenResp. +// +// The boolean indicates if the response is a NegTokenInit. +// If error is nil and the boolean is false the response is a NegTokenResp. +func UnmarshalNegToken(b []byte) (bool, interface{}, error) { + var a asn1.RawValue + _, err := asn1.Unmarshal(b, &a) + if err != nil { + return false, nil, fmt.Errorf("error unmarshalling NegotiationToken: %v", err) + } + switch a.Tag { + case 0: + var n marshalNegTokenInit + _, err = asn1.Unmarshal(a.Bytes, &n) + if err != nil { + return false, nil, fmt.Errorf("error unmarshalling NegotiationToken type %d (Init): %v", a.Tag, err) + } + nt := NegTokenInit{ + MechTypes: n.MechTypes, + ReqFlags: n.ReqFlags, + MechTokenBytes: n.MechTokenBytes, + MechListMIC: n.MechListMIC, + } + return true, nt, nil + case 1: + var n marshalNegTokenResp + _, err = asn1.Unmarshal(a.Bytes, &n) + if err != nil { + return false, nil, fmt.Errorf("error unmarshalling NegotiationToken type %d (Resp/Targ): %v", a.Tag, err) + } + nt := NegTokenResp{ + NegState: n.NegState, + SupportedMech: n.SupportedMech, + ResponseToken: n.ResponseToken, + MechListMIC: n.MechListMIC, + } + return false, nt, nil + default: + return false, nil, errors.New("unknown choice type for NegotiationToken") + } + +} + +// NewNegTokenInitKRB5 creates new Init negotiation token for Kerberos 5 +func NewNegTokenInitKRB5(cl *client.Client, tkt messages.Ticket, sessionKey types.EncryptionKey) (NegTokenInit, error) { + mt, err := NewKRB5TokenAPREQ(cl, tkt, sessionKey, []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}, []int{}) + if err != nil { + return NegTokenInit{}, fmt.Errorf("error getting KRB5 token; %v", err) + } + mtb, err := mt.Marshal() + if err != nil { + return NegTokenInit{}, fmt.Errorf("error marshalling KRB5 token; %v", err) + } + return NegTokenInit{ + MechTypes: []asn1.ObjectIdentifier{gssapi.OIDKRB5.OID()}, + MechTokenBytes: mtb, + }, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/spnego/spnego.go b/vendor/github.com/jcmturner/gokrb5/v8/spnego/spnego.go new file mode 100644 index 00000000000..ebb818b9e61 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/spnego/spnego.go @@ -0,0 +1,203 @@ +// Package spnego implements the Simple and Protected GSSAPI Negotiation Mechanism for Kerberos authentication. +package spnego + +import ( + "context" + "errors" + "fmt" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/client" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/service" +) + +// SPNEGO implements the GSS-API mechanism for RFC 4178 +type SPNEGO struct { + serviceSettings *service.Settings + client *client.Client + spn string +} + +// SPNEGOClient configures the SPNEGO mechanism suitable for client side use. +func SPNEGOClient(cl *client.Client, spn string) *SPNEGO { + s := new(SPNEGO) + s.client = cl + s.spn = spn + s.serviceSettings = service.NewSettings(nil, service.SName(spn)) + return s +} + +// SPNEGOService configures the SPNEGO mechanism suitable for service side use. +func SPNEGOService(kt *keytab.Keytab, options ...func(*service.Settings)) *SPNEGO { + s := new(SPNEGO) + s.serviceSettings = service.NewSettings(kt, options...) + return s +} + +// OID returns the GSS-API assigned OID for SPNEGO. +func (s *SPNEGO) OID() asn1.ObjectIdentifier { + return gssapi.OIDSPNEGO.OID() +} + +// AcquireCred is the GSS-API method to acquire a client credential via Kerberos for SPNEGO. +func (s *SPNEGO) AcquireCred() error { + return s.client.AffirmLogin() +} + +// InitSecContext is the GSS-API method for the client to a generate a context token to the service via Kerberos. +func (s *SPNEGO) InitSecContext() (gssapi.ContextToken, error) { + tkt, key, err := s.client.GetServiceTicket(s.spn) + if err != nil { + return &SPNEGOToken{}, err + } + negTokenInit, err := NewNegTokenInitKRB5(s.client, tkt, key) + if err != nil { + return &SPNEGOToken{}, fmt.Errorf("could not create NegTokenInit: %v", err) + } + return &SPNEGOToken{ + Init: true, + NegTokenInit: negTokenInit, + settings: s.serviceSettings, + }, nil +} + +// AcceptSecContext is the GSS-API method for the service to verify the context token provided by the client and +// establish a context. +func (s *SPNEGO) AcceptSecContext(ct gssapi.ContextToken) (bool, context.Context, gssapi.Status) { + var ctx context.Context + t, ok := ct.(*SPNEGOToken) + if !ok { + return false, ctx, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "context token provided was not an SPNEGO token"} + } + t.settings = s.serviceSettings + var oid asn1.ObjectIdentifier + if t.Init { + oid = t.NegTokenInit.MechTypes[0] + } + if t.Resp { + oid = t.NegTokenResp.SupportedMech + } + if !(oid.Equal(gssapi.OIDKRB5.OID()) || oid.Equal(gssapi.OIDMSLegacyKRB5.OID())) { + return false, ctx, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "SPNEGO OID of MechToken is not of type KRB5"} + } + // Flags in the NegInit must be used t.NegTokenInit.ReqFlags + ok, status := t.Verify() + ctx = t.Context() + return ok, ctx, status +} + +// Log will write to the service's logger if it is configured. +func (s *SPNEGO) Log(format string, v ...interface{}) { + if s.serviceSettings.Logger() != nil { + s.serviceSettings.Logger().Output(2, fmt.Sprintf(format, v...)) + } +} + +// SPNEGOToken is a GSS-API context token +type SPNEGOToken struct { + Init bool + Resp bool + NegTokenInit NegTokenInit + NegTokenResp NegTokenResp + settings *service.Settings + context context.Context +} + +// Marshal SPNEGO context token +func (s *SPNEGOToken) Marshal() ([]byte, error) { + var b []byte + if s.Init { + hb, _ := asn1.Marshal(gssapi.OIDSPNEGO.OID()) + tb, err := s.NegTokenInit.Marshal() + if err != nil { + return b, fmt.Errorf("could not marshal NegTokenInit: %v", err) + } + b = append(hb, tb...) + return asn1tools.AddASNAppTag(b, 0), nil + } + if s.Resp { + b, err := s.NegTokenResp.Marshal() + if err != nil { + return b, fmt.Errorf("could not marshal NegTokenResp: %v", err) + } + return b, nil + } + return b, errors.New("SPNEGO cannot be marshalled. It contains neither a NegTokenInit or NegTokenResp") +} + +// Unmarshal SPNEGO context token +func (s *SPNEGOToken) Unmarshal(b []byte) error { + var r []byte + var err error + // We need some data in the array + if len(b) < 1 { + return fmt.Errorf("provided byte array is empty") + } + if b[0] != byte(161) { + // Not a NegTokenResp/Targ could be a NegTokenInit + var oid asn1.ObjectIdentifier + r, err = asn1.UnmarshalWithParams(b, &oid, fmt.Sprintf("application,explicit,tag:%v", 0)) + if err != nil { + return fmt.Errorf("not a valid SPNEGO token: %v", err) + } + // Check the OID is the SPNEGO OID value + SPNEGOOID := gssapi.OIDSPNEGO.OID() + if !oid.Equal(SPNEGOOID) { + return fmt.Errorf("OID %s does not match SPNEGO OID %s", oid.String(), SPNEGOOID.String()) + } + } else { + // Could be a NegTokenResp/Targ + r = b + } + + _, nt, err := UnmarshalNegToken(r) + if err != nil { + return err + } + switch v := nt.(type) { + case NegTokenInit: + s.Init = true + s.NegTokenInit = v + s.NegTokenInit.settings = s.settings + case NegTokenResp: + s.Resp = true + s.NegTokenResp = v + s.NegTokenResp.settings = s.settings + default: + return errors.New("unknown choice type for NegotiationToken") + } + return nil +} + +// Verify the SPNEGOToken +func (s *SPNEGOToken) Verify() (bool, gssapi.Status) { + if (!s.Init && !s.Resp) || (s.Init && s.Resp) { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "invalid SPNEGO token, unclear if NegTokenInit or NegTokenResp"} + } + if s.Init { + s.NegTokenInit.settings = s.settings + ok, status := s.NegTokenInit.Verify() + if ok { + s.context = s.NegTokenInit.Context() + } + return ok, status + } + if s.Resp { + s.NegTokenResp.settings = s.settings + ok, status := s.NegTokenResp.Verify() + if ok { + s.context = s.NegTokenResp.Context() + } + return ok, status + } + // should not be possible to get here + return false, gssapi.Status{Code: gssapi.StatusFailure, Message: "unable to verify SPNEGO token"} +} + +// Context returns the SPNEGO context which will contain any verify user identity information. +func (s *SPNEGOToken) Context() context.Context { + return s.context +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go new file mode 100644 index 00000000000..1fdba78a37d --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go @@ -0,0 +1,81 @@ +// Package types provides Kerberos 5 data types. +package types + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" +) + +// Authenticator - A record containing information that can be shown to have been recently generated using the session +// key known only by the client and server. +// https://tools.ietf.org/html/rfc4120#section-5.5.1 +type Authenticator struct { + AVNO int `asn1:"explicit,tag:0"` + CRealm string `asn1:"generalstring,explicit,tag:1"` + CName PrincipalName `asn1:"explicit,tag:2"` + Cksum Checksum `asn1:"explicit,optional,tag:3"` + Cusec int `asn1:"explicit,tag:4"` + CTime time.Time `asn1:"generalized,explicit,tag:5"` + SubKey EncryptionKey `asn1:"explicit,optional,tag:6"` + SeqNumber int64 `asn1:"explicit,optional,tag:7"` + AuthorizationData AuthorizationData `asn1:"explicit,optional,tag:8"` +} + +// NewAuthenticator creates a new Authenticator. +func NewAuthenticator(realm string, cname PrincipalName) (Authenticator, error) { + seq, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32)) + if err != nil { + return Authenticator{}, err + } + t := time.Now().UTC() + return Authenticator{ + AVNO: iana.PVNO, + CRealm: realm, + CName: cname, + Cksum: Checksum{}, + Cusec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)), + CTime: t, + SeqNumber: seq.Int64(), + }, nil +} + +// GenerateSeqNumberAndSubKey sets the Authenticator's sequence number and subkey. +func (a *Authenticator) GenerateSeqNumberAndSubKey(keyType int32, keySize int) error { + seq, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32)) + if err != nil { + return err + } + a.SeqNumber = seq.Int64() + //Generate subkey value + sk := make([]byte, keySize, keySize) + rand.Read(sk) + a.SubKey = EncryptionKey{ + KeyType: keyType, + KeyValue: sk, + } + return nil +} + +// Unmarshal bytes into the Authenticator. +func (a *Authenticator) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.Authenticator)) + return err +} + +// Marshal the Authenticator. +func (a *Authenticator) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*a) + if err != nil { + return nil, err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.Authenticator) + return b, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go b/vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go new file mode 100644 index 00000000000..80c477cef71 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go @@ -0,0 +1,55 @@ +package types + +import ( + "github.com/jcmturner/gofork/encoding/asn1" +) + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.2.6 + +// AuthorizationData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6 +type AuthorizationData []AuthorizationDataEntry + +// AuthorizationDataEntry implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6 +type AuthorizationDataEntry struct { + ADType int32 `asn1:"explicit,tag:0"` + ADData []byte `asn1:"explicit,tag:1"` +} + +// ADIfRelevant implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.1 +type ADIfRelevant AuthorizationData + +// ADKDCIssued implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.2 +type ADKDCIssued struct { + ADChecksum Checksum `asn1:"explicit,tag:0"` + IRealm string `asn1:"optional,generalstring,explicit,tag:1"` + Isname PrincipalName `asn1:"optional,explicit,tag:2"` + Elements AuthorizationData `asn1:"explicit,tag:3"` +} + +// ADAndOr implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.3 +type ADAndOr struct { + ConditionCount int32 `asn1:"explicit,tag:0"` + Elements AuthorizationData `asn1:"explicit,tag:1"` +} + +// ADMandatoryForKDC implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.4 +type ADMandatoryForKDC AuthorizationData + +// Unmarshal bytes into the ADKDCIssued. +func (a *ADKDCIssued) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} + +// Unmarshal bytes into the AuthorizationData. +func (a *AuthorizationData) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} + +// Unmarshal bytes into the AuthorizationDataEntry. +func (a *AuthorizationDataEntry) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go new file mode 100644 index 00000000000..2f354ea7a6e --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go @@ -0,0 +1,72 @@ +package types + +import ( + "crypto/rand" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.2.9 + +// EncryptedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9 +type EncryptedData struct { + EType int32 `asn1:"explicit,tag:0"` + KVNO int `asn1:"explicit,optional,tag:1"` + Cipher []byte `asn1:"explicit,tag:2"` +} + +// EncryptionKey implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9 +// AKA KeyBlock +type EncryptionKey struct { + KeyType int32 `asn1:"explicit,tag:0"` + KeyValue []byte `asn1:"explicit,tag:1" json:"-"` +} + +// Checksum implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9 +type Checksum struct { + CksumType int32 `asn1:"explicit,tag:0"` + Checksum []byte `asn1:"explicit,tag:1"` +} + +// Unmarshal bytes into the EncryptedData. +func (a *EncryptedData) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} + +// Marshal the EncryptedData. +func (a *EncryptedData) Marshal() ([]byte, error) { + edb, err := asn1.Marshal(*a) + if err != nil { + return edb, err + } + return edb, nil +} + +// Unmarshal bytes into the EncryptionKey. +func (a *EncryptionKey) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} + +// Unmarshal bytes into the Checksum. +func (a *Checksum) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} + +// GenerateEncryptionKey creates a new EncryptionKey with a random key value. +func GenerateEncryptionKey(etype etype.EType) (EncryptionKey, error) { + k := EncryptionKey{ + KeyType: etype.GetETypeID(), + } + b := make([]byte, etype.GetKeyByteSize(), etype.GetKeyByteSize()) + _, err := rand.Read(b) + if err != nil { + return k, err + } + k.KeyValue = b + return k, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go b/vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go new file mode 100644 index 00000000000..895fe805391 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go @@ -0,0 +1,180 @@ +package types + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.2.5 + +import ( + "bytes" + "fmt" + "net" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/iana/addrtype" +) + +// HostAddresses implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.5 +type HostAddresses []HostAddress + +// HostAddress implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.5 +type HostAddress struct { + AddrType int32 `asn1:"explicit,tag:0"` + Address []byte `asn1:"explicit,tag:1"` +} + +// GetHostAddress returns a HostAddress struct from a string in the format : +func GetHostAddress(s string) (HostAddress, error) { + var h HostAddress + cAddr, _, err := net.SplitHostPort(s) + if err != nil { + return h, fmt.Errorf("invalid format of client address: %v", err) + } + ip := net.ParseIP(cAddr) + var ht int32 + if ip.To4() != nil { + ht = addrtype.IPv4 + ip = ip.To4() + } else if ip.To16() != nil { + ht = addrtype.IPv6 + ip = ip.To16() + } else { + return h, fmt.Errorf("could not determine client's address types: %v", err) + } + h = HostAddress{ + AddrType: ht, + Address: ip, + } + return h, nil +} + +// GetAddress returns a string representation of the HostAddress. +func (h *HostAddress) GetAddress() (string, error) { + var b []byte + _, err := asn1.Unmarshal(h.Address, &b) + return string(b), err +} + +// LocalHostAddresses returns a HostAddresses struct for the local machines interface IP addresses. +func LocalHostAddresses() (ha HostAddresses, err error) { + ifs, err := net.Interfaces() + if err != nil { + return + } + for _, iface := range ifs { + if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 { + // Interface is either loopback of not up + continue + } + addrs, err := iface.Addrs() + if err != nil { + continue + } + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + var a HostAddress + if ip.To16() == nil { + //neither IPv4 or IPv6 + continue + } + if ip.To4() != nil { + //Is IPv4 + a.AddrType = addrtype.IPv4 + a.Address = ip.To4() + } else { + a.AddrType = addrtype.IPv6 + a.Address = ip.To16() + } + ha = append(ha, a) + } + } + return ha, nil +} + +// HostAddressesFromNetIPs returns a HostAddresses type from a slice of net.IP +func HostAddressesFromNetIPs(ips []net.IP) (ha HostAddresses) { + for _, ip := range ips { + ha = append(ha, HostAddressFromNetIP(ip)) + } + return ha +} + +// HostAddressFromNetIP returns a HostAddress type from a net.IP +func HostAddressFromNetIP(ip net.IP) HostAddress { + if ip.To4() != nil { + //Is IPv4 + return HostAddress{ + AddrType: addrtype.IPv4, + Address: ip.To4(), + } + } + return HostAddress{ + AddrType: addrtype.IPv6, + Address: ip.To16(), + } +} + +// HostAddressesEqual tests if two HostAddress slices are equal. +func HostAddressesEqual(h, a []HostAddress) bool { + if len(h) != len(a) { + return false + } + for _, e := range a { + var found bool + for _, i := range h { + if e.Equal(i) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// HostAddressesContains tests if a HostAddress is contained in a HostAddress slice. +func HostAddressesContains(h []HostAddress, a HostAddress) bool { + for _, e := range h { + if e.Equal(a) { + return true + } + } + return false +} + +// Equal tests if the HostAddress is equal to another HostAddress provided. +func (h *HostAddress) Equal(a HostAddress) bool { + if h.AddrType != a.AddrType { + return false + } + return bytes.Equal(h.Address, a.Address) +} + +// Contains tests if a HostAddress is contained within the HostAddresses struct. +func (h *HostAddresses) Contains(a HostAddress) bool { + for _, e := range *h { + if e.Equal(a) { + return true + } + } + return false +} + +// Equal tests if a HostAddress slice is equal to the HostAddresses struct. +func (h *HostAddresses) Equal(a []HostAddress) bool { + if len(*h) != len(a) { + return false + } + for _, e := range a { + if !h.Contains(e) { + return false + } + } + return true +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go b/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go new file mode 100644 index 00000000000..0f2038340dd --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go @@ -0,0 +1,68 @@ +package types + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.2.8 + +import ( + "github.com/jcmturner/gofork/encoding/asn1" +) + +// NewKrbFlags returns an ASN1 BitString struct of the right size for KrbFlags. +func NewKrbFlags() asn1.BitString { + f := asn1.BitString{} + f.Bytes = make([]byte, 4) + f.BitLength = len(f.Bytes) * 8 + return f +} + +// SetFlags sets the flags of an ASN1 BitString. +func SetFlags(f *asn1.BitString, j []int) { + for _, i := range j { + SetFlag(f, i) + } +} + +// SetFlag sets a flag in an ASN1 BitString. +func SetFlag(f *asn1.BitString, i int) { + for l := len(f.Bytes); l < 4; l++ { + (*f).Bytes = append((*f).Bytes, byte(0)) + (*f).BitLength = len((*f).Bytes) * 8 + } + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + (*f).Bytes[b] = (*f).Bytes[b] | (1 << p) +} + +// UnsetFlags unsets flags in an ASN1 BitString. +func UnsetFlags(f *asn1.BitString, j []int) { + for _, i := range j { + UnsetFlag(f, i) + } +} + +// UnsetFlag unsets a flag in an ASN1 BitString. +func UnsetFlag(f *asn1.BitString, i int) { + for l := len(f.Bytes); l < 4; l++ { + (*f).Bytes = append((*f).Bytes, byte(0)) + (*f).BitLength = len((*f).Bytes) * 8 + } + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + (*f).Bytes[b] = (*f).Bytes[b] &^ (1 << p) +} + +// IsFlagSet tests if a flag is set in the ASN1 BitString. +func IsFlagSet(f *asn1.BitString, i int) bool { + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + if (*f).Bytes[b]&(1</@ +// a PrincipalName type will be returned with the name type set to KRB_NT_PRINCIPAL(1) +// and the realm will be returned as a string. If the "@" suffix +// is not included in the SPN then the value of realm string returned will be "" +func ParseSPNString(spn string) (pn PrincipalName, realm string) { + if strings.Contains(spn, "@") { + s := strings.Split(spn, "@") + realm = s[len(s)-1] + spn = strings.TrimSuffix(spn, "@"+realm) + } + pn = NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn) + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go b/vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go new file mode 100644 index 00000000000..19e9f4961f4 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go @@ -0,0 +1,18 @@ +package types + +import "github.com/jcmturner/gofork/encoding/asn1" + +// TypedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1 +type TypedData struct { + DataType int32 `asn1:"explicit,tag:0"` + DataValue []byte `asn1:"optional,explicit,tag:1"` +} + +// TypedDataSequence implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1 +type TypedDataSequence []TypedData + +// Unmarshal bytes into the TypedDataSequence. +func (a *TypedDataSequence) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} diff --git a/vendor/github.com/jcmturner/rpc/v2/LICENSE b/vendor/github.com/jcmturner/rpc/v2/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go new file mode 100644 index 00000000000..b9f535f5b97 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go @@ -0,0 +1,152 @@ +package mstypes + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/rpc/v2/ndr" + "golang.org/x/net/http2/hpack" +) + +// Compression format assigned numbers. https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-xca/a8b7cb0a-92a6-4187-a23b-5e14273b96f8 +const ( + CompressionFormatNone uint16 = 0 + CompressionFormatLZNT1 uint16 = 2 // LZNT1 aka ntfs compression + CompressionFormatXPress uint16 = 3 // plain LZ77 + CompressionFormatXPressHuff uint16 = 4 // LZ77+Huffman - The Huffman variant of the XPRESS compression format uses LZ77-style dictionary compression combined with Huffman coding. +) + +// ClaimsSourceTypeAD https://msdn.microsoft.com/en-us/library/hh553809.aspx +const ClaimsSourceTypeAD uint16 = 1 + +// Claim Type assigned numbers +const ( + ClaimTypeIDInt64 uint16 = 1 + ClaimTypeIDUInt64 uint16 = 2 + ClaimTypeIDString uint16 = 3 + ClaimsTypeIDBoolean uint16 = 6 +) + +// ClaimsBlob implements https://msdn.microsoft.com/en-us/library/hh554119.aspx +type ClaimsBlob struct { + Size uint32 + EncodedBlob EncodedBlob +} + +// EncodedBlob are the bytes of the encoded Claims +type EncodedBlob []byte + +// Size returns the size of the bytes of the encoded Claims +func (b EncodedBlob) Size(c interface{}) int { + cb := c.(ClaimsBlob) + return int(cb.Size) +} + +// ClaimsSetMetadata implements https://msdn.microsoft.com/en-us/library/hh554073.aspx +type ClaimsSetMetadata struct { + ClaimsSetSize uint32 + ClaimsSetBytes []byte `ndr:"pointer,conformant"` + CompressionFormat uint16 // Enum see constants for options + UncompressedClaimsSetSize uint32 + ReservedType uint16 + ReservedFieldSize uint32 + ReservedField []byte `ndr:"pointer,conformant"` +} + +// ClaimsSet reads the ClaimsSet type from the NDR encoded ClaimsSetBytes in the ClaimsSetMetadata +func (m *ClaimsSetMetadata) ClaimsSet() (c ClaimsSet, err error) { + if len(m.ClaimsSetBytes) < 1 { + err = errors.New("no bytes available for ClaimsSet") + return + } + // TODO switch statement to decompress ClaimsSetBytes + switch m.CompressionFormat { + case CompressionFormatLZNT1: + s := hex.EncodeToString(m.ClaimsSetBytes) + err = fmt.Errorf("ClaimsSet compressed, format LZNT1 not currently supported: %s", s) + return + case CompressionFormatXPress: + s := hex.EncodeToString(m.ClaimsSetBytes) + err = fmt.Errorf("ClaimsSet compressed, format XPress not currently supported: %s", s) + return + case CompressionFormatXPressHuff: + var b []byte + buff := bytes.NewBuffer(b) + _, e := hpack.HuffmanDecode(buff, m.ClaimsSetBytes) + if e != nil { + err = fmt.Errorf("error deflating: %v", e) + return + } + m.ClaimsSetBytes = buff.Bytes() + } + dec := ndr.NewDecoder(bytes.NewReader(m.ClaimsSetBytes)) + err = dec.Decode(&c) + return +} + +// ClaimsSet implements https://msdn.microsoft.com/en-us/library/hh554122.aspx +type ClaimsSet struct { + ClaimsArrayCount uint32 + ClaimsArrays []ClaimsArray `ndr:"pointer,conformant"` + ReservedType uint16 + ReservedFieldSize uint32 + ReservedField []byte `ndr:"pointer,conformant"` +} + +// ClaimsArray implements https://msdn.microsoft.com/en-us/library/hh536458.aspx +type ClaimsArray struct { + ClaimsSourceType uint16 + ClaimsCount uint32 + ClaimEntries []ClaimEntry `ndr:"pointer,conformant"` +} + +// ClaimEntry is a NDR union that implements https://msdn.microsoft.com/en-us/library/hh536374.aspx +type ClaimEntry struct { + ID string `ndr:"pointer,conformant,varying"` + Type uint16 `ndr:"unionTag"` + TypeInt64 ClaimTypeInt64 `ndr:"unionField"` + TypeUInt64 ClaimTypeUInt64 `ndr:"unionField"` + TypeString ClaimTypeString `ndr:"unionField"` + TypeBool ClaimTypeBoolean `ndr:"unionField"` +} + +// SwitchFunc is the ClaimEntry union field selection function +func (u ClaimEntry) SwitchFunc(_ interface{}) string { + switch u.Type { + case ClaimTypeIDInt64: + return "TypeInt64" + case ClaimTypeIDUInt64: + return "TypeUInt64" + case ClaimTypeIDString: + return "TypeString" + case ClaimsTypeIDBoolean: + return "TypeBool" + } + return "" +} + +// ClaimTypeInt64 is a claim of type int64 +type ClaimTypeInt64 struct { + ValueCount uint32 + Value []int64 `ndr:"pointer,conformant"` +} + +// ClaimTypeUInt64 is a claim of type uint64 +type ClaimTypeUInt64 struct { + ValueCount uint32 + Value []uint64 `ndr:"pointer,conformant"` +} + +// ClaimTypeString is a claim of type string +type ClaimTypeString struct { + ValueCount uint32 + Value []LPWSTR `ndr:"pointer,conformant"` +} + +// ClaimTypeBoolean is a claim of type bool +type ClaimTypeBoolean struct { + ValueCount uint32 + Value []bool `ndr:"pointer,conformant"` +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/common.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/common.go new file mode 100644 index 00000000000..fb6510d1cbc --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/common.go @@ -0,0 +1,12 @@ +// Package mstypes provides implemnations of some Microsoft data types [MS-DTYP] https://msdn.microsoft.com/en-us/library/cc230283.aspx +package mstypes + +// LPWSTR implements https://msdn.microsoft.com/en-us/library/cc230355.aspx +type LPWSTR struct { + Value string `ndr:"pointer,conformant,varying"` +} + +// String returns the string representation of LPWSTR data type. +func (s *LPWSTR) String() string { + return s.Value +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go new file mode 100644 index 00000000000..5cc952fa9d1 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go @@ -0,0 +1,52 @@ +// Package mstypes implements representations of Microsoft types +package mstypes + +import ( + "time" +) + +/* +FILETIME is a windows data structure. +Ref: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724284%28v=vs.85%29.aspx +It contains two parts that are 32bit integers: + dwLowDateTime + dwHighDateTime +We need to combine these two into one 64bit integer. +This gives the number of 100 nano second period from January 1, 1601, Coordinated Universal Time (UTC) +*/ + +const unixEpochDiff = 116444736000000000 + +// FileTime implements the Microsoft FILETIME type https://msdn.microsoft.com/en-us/library/cc230324.aspx +type FileTime struct { + LowDateTime uint32 + HighDateTime uint32 +} + +// Time return a golang Time type from the FileTime +func (ft FileTime) Time() time.Time { + ns := (ft.MSEpoch() - unixEpochDiff) * 100 + return time.Unix(0, int64(ns)).UTC() +} + +// MSEpoch returns the FileTime as a Microsoft epoch, the number of 100 nano second periods elapsed from January 1, 1601 UTC. +func (ft FileTime) MSEpoch() int64 { + return (int64(ft.HighDateTime) << 32) + int64(ft.LowDateTime) +} + +// Unix returns the FileTime as a Unix time, the number of seconds elapsed since January 1, 1970 UTC. +func (ft FileTime) Unix() int64 { + return (ft.MSEpoch() - unixEpochDiff) / 10000000 +} + +// GetFileTime returns a FileTime type from the provided Golang Time type. +func GetFileTime(t time.Time) FileTime { + ns := t.UnixNano() + fp := (ns / 100) + unixEpochDiff + hd := fp >> 32 + ld := fp - (hd << 32) + return FileTime{ + LowDateTime: uint32(ld), + HighDateTime: uint32(hd), + } +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go new file mode 100644 index 00000000000..79151378fcb --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go @@ -0,0 +1,19 @@ +package mstypes + +// GroupMembership implements https://msdn.microsoft.com/en-us/library/cc237945.aspx +// RelativeID : A 32-bit unsigned integer that contains the RID of a particular group. +// The possible values for the Attributes flags are identical to those specified in KERB_SID_AND_ATTRIBUTES +type GroupMembership struct { + RelativeID uint32 + Attributes uint32 +} + +// DomainGroupMembership implements https://msdn.microsoft.com/en-us/library/hh536344.aspx +// DomainId: A SID structure that contains the SID for the domain.This member is used in conjunction with the GroupIds members to create group SIDs for the device. +// GroupCount: A 32-bit unsigned integer that contains the number of groups within the domain to which the account belongs. +// GroupIds: A pointer to a list of GROUP_MEMBERSHIP structures that contain the groups to which the account belongs in the domain. The number of groups in this list MUST be equal to GroupCount. +type DomainGroupMembership struct { + DomainID RPCSID `ndr:"pointer"` + GroupCount uint32 + GroupIDs []GroupMembership `ndr:"pointer,conformant"` // Size is value of GroupCount +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go new file mode 100644 index 00000000000..61ac39bbf36 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go @@ -0,0 +1,23 @@ +package mstypes + +// Attributes of a security group membership and can be combined by using the bitwise OR operation. +// They are used by an access check mechanism to specify whether the membership is to be used in an access check decision. +const ( + SEGroupMandatory = 31 + SEGroupEnabledByDefault = 30 + SEGroupEnabled = 29 + SEGroupOwner = 28 + SEGroupResource = 2 + //All other bits MUST be set to zero and MUST be ignored on receipt. +) + +// KerbSidAndAttributes implements https://msdn.microsoft.com/en-us/library/cc237947.aspx +type KerbSidAndAttributes struct { + SID RPCSID `ndr:"pointer"` // A pointer to an RPC_SID structure. + Attributes uint32 +} + +// SetFlag sets a flag in a uint32 attribute value. +func SetFlag(a *uint32, i uint) { + *a = *a | (1 << (31 - i)) +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go new file mode 100644 index 00000000000..24495bca0a9 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go @@ -0,0 +1,109 @@ +package mstypes + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" +) + +// Byte sizes of primitive types +const ( + SizeBool = 1 + SizeChar = 1 + SizeUint8 = 1 + SizeUint16 = 2 + SizeUint32 = 4 + SizeUint64 = 8 + SizeEnum = 2 + SizeSingle = 4 + SizeDouble = 8 + SizePtr = 4 +) + +// Reader reads simple byte stream data into a Go representations +type Reader struct { + r *bufio.Reader // source of the data +} + +// NewReader creates a new instance of a simple Reader. +func NewReader(r io.Reader) *Reader { + reader := new(Reader) + reader.r = bufio.NewReader(r) + return reader +} + +func (r *Reader) Read(p []byte) (n int, err error) { + return r.r.Read(p) +} + +func (r *Reader) Uint8() (uint8, error) { + b, err := r.r.ReadByte() + if err != nil { + return uint8(0), err + } + return uint8(b), nil +} + +func (r *Reader) Uint16() (uint16, error) { + b, err := r.ReadBytes(SizeUint16) + if err != nil { + return uint16(0), err + } + return binary.LittleEndian.Uint16(b), nil +} + +func (r *Reader) Uint32() (uint32, error) { + b, err := r.ReadBytes(SizeUint32) + if err != nil { + return uint32(0), err + } + return binary.LittleEndian.Uint32(b), nil +} + +func (r *Reader) Uint64() (uint64, error) { + b, err := r.ReadBytes(SizeUint64) + if err != nil { + return uint64(0), err + } + return binary.LittleEndian.Uint64(b), nil +} + +func (r *Reader) FileTime() (f FileTime, err error) { + f.LowDateTime, err = r.Uint32() + if err != nil { + return + } + f.HighDateTime, err = r.Uint32() + if err != nil { + return + } + return +} + +// UTF16String returns a string that is UTF16 encoded in a byte slice. n is the number of bytes representing the string +func (r *Reader) UTF16String(n int) (str string, err error) { + //Length divided by 2 as each run is 16bits = 2bytes + s := make([]rune, n/2, n/2) + for i := 0; i < len(s); i++ { + var u uint16 + u, err = r.Uint16() + if err != nil { + return + } + s[i] = rune(u) + } + str = string(s) + return +} + +// readBytes returns a number of bytes from the NDR byte stream. +func (r *Reader) ReadBytes(n int) ([]byte, error) { + //TODO make this take an int64 as input to allow for larger values on all systems? + b := make([]byte, n, n) + m, err := r.r.Read(b) + if err != nil || m != n { + return b, fmt.Errorf("error reading bytes from stream: %v", err) + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go new file mode 100644 index 00000000000..4bf02e0ed68 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go @@ -0,0 +1,13 @@ +package mstypes + +// RPCUnicodeString implements https://msdn.microsoft.com/en-us/library/cc230365.aspx +type RPCUnicodeString struct { + Length uint16 // The length, in bytes, of the string pointed to by the Buffer member, not including the terminating null character if any. The length MUST be a multiple of 2. The length SHOULD equal the entire size of the Buffer, in which case there is no terminating null character. Any method that accesses this structure MUST use the Length specified instead of relying on the presence or absence of a null character. + MaximumLength uint16 // The maximum size, in bytes, of the string pointed to by Buffer. The size MUST be a multiple of 2. If not, the size MUST be decremented by 1 prior to use. This value MUST not be less than Length. + Value string `ndr:"pointer,conformant,varying"` +} + +// String returns the RPCUnicodeString string value +func (r *RPCUnicodeString) String() string { + return r.Value +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go new file mode 100644 index 00000000000..8e347058c00 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go @@ -0,0 +1,36 @@ +package mstypes + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "math" + "strings" +) + +// RPCSID implements https://msdn.microsoft.com/en-us/library/cc230364.aspx +type RPCSID struct { + Revision uint8 // An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01. + SubAuthorityCount uint8 // An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15. + IdentifierAuthority [6]byte // An RPC_SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority. + SubAuthority []uint32 `ndr:"conformant"` // A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount. +} + +// String returns the string representation of the RPC_SID. +func (s *RPCSID) String() string { + var strb strings.Builder + strb.WriteString("S-1-") + + b := append(make([]byte, 2, 2), s.IdentifierAuthority[:]...) + // For a strange reason this is read big endian: https://msdn.microsoft.com/en-us/library/dd302645.aspx + i := binary.BigEndian.Uint64(b) + if i > math.MaxUint32 { + fmt.Fprintf(&strb, "0x%s", hex.EncodeToString(s.IdentifierAuthority[:])) + } else { + fmt.Fprintf(&strb, "%d", i) + } + for _, sub := range s.SubAuthority { + fmt.Fprintf(&strb, "-%d", sub) + } + return strb.String() +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go new file mode 100644 index 00000000000..fcf0a5d9a66 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go @@ -0,0 +1,11 @@ +package mstypes + +// CypherBlock implements https://msdn.microsoft.com/en-us/library/cc237040.aspx +type CypherBlock struct { + Data [8]byte // size = 8 +} + +// UserSessionKey implements https://msdn.microsoft.com/en-us/library/cc237080.aspx +type UserSessionKey struct { + CypherBlock [2]CypherBlock // size = 2 +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go b/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go new file mode 100644 index 00000000000..5e2def2a829 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go @@ -0,0 +1,413 @@ +package ndr + +import ( + "errors" + "fmt" + "reflect" + "strconv" +) + +// intFromTag returns an int that is a value in a struct tag key/value pair +func intFromTag(tag reflect.StructTag, key string) (int, error) { + ndrTag := parseTags(tag) + d := 1 + if n, ok := ndrTag.Map[key]; ok { + i, err := strconv.Atoi(n) + if err != nil { + return d, fmt.Errorf("invalid dimensions tag [%s]: %v", n, err) + } + d = i + } + return d, nil +} + +// parseDimensions returns the a slice of the size of each dimension and type of the member at the deepest level. +func parseDimensions(v reflect.Value) (l []int, tb reflect.Type) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + t := v.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Array && t.Kind() != reflect.Slice { + return + } + l = append(l, v.Len()) + if t.Elem().Kind() == reflect.Array || t.Elem().Kind() == reflect.Slice { + // contains array or slice + var m []int + m, tb = parseDimensions(v.Index(0)) + l = append(l, m...) + } else { + tb = t.Elem() + } + return +} + +// sliceDimensions returns the count of dimensions a slice has. +func sliceDimensions(t reflect.Type) (d int, tb reflect.Type) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Slice { + d++ + var n int + n, tb = sliceDimensions(t.Elem()) + d += n + } else { + tb = t + } + return +} + +// makeSubSlices is a deep recursive creation/initialisation of multi-dimensional slices. +// Takes the reflect.Value of the 1st dimension and a slice of the lengths of the sub dimensions +func makeSubSlices(v reflect.Value, l []int) { + ty := v.Type().Elem() + if ty.Kind() != reflect.Slice { + return + } + for i := 0; i < v.Len(); i++ { + s := reflect.MakeSlice(ty, l[0], l[0]) + v.Index(i).Set(s) + // Are there more sub dimensions? + if len(l) > 1 { + makeSubSlices(v.Index(i), l[1:]) + } + } + return +} + +// multiDimensionalIndexPermutations returns all the permutations of the indexes of a multi-dimensional slice. +// The input is a slice of integers that indicates the max size/length of each dimension +func multiDimensionalIndexPermutations(l []int) (ps [][]int) { + z := make([]int, len(l), len(l)) // The zeros permutation + ps = append(ps, z) + // for each dimension, in reverse + for i := len(l) - 1; i >= 0; i-- { + ws := make([][]int, len(ps)) + copy(ws, ps) + //create a permutation for each of the iterations of the current dimension + for j := 1; j <= l[i]-1; j++ { + // For each existing permutation + for _, p := range ws { + np := make([]int, len(p), len(p)) + copy(np, p) + np[i] = j + ps = append(ps, np) + } + } + } + return +} + +// precedingMax reads off the next conformant max value +func (dec *Decoder) precedingMax() uint32 { + m := dec.conformantMax[0] + dec.conformantMax = dec.conformantMax[1:] + return m +} + +// fillFixedArray establishes if the fixed array is uni or multi dimensional and then fills it. +func (dec *Decoder) fillFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + l, t := parseDimensions(v) + if t.Kind() == reflect.String { + tag = reflect.StructTag(subStringArrayTag) + } + if len(l) < 1 { + return errors.New("could not establish dimensions of fixed array") + } + if len(l) == 1 { + err := dec.fillUniDimensionalFixedArray(v, tag, def) + if err != nil { + return fmt.Errorf("could not fill uni-dimensional fixed array: %v", err) + } + return nil + } + // Fixed array is multidimensional + ps := multiDimensionalIndexPermutations(l[:len(l)-1]) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + for _, i := range p { + a = a.Index(i) + } + // fill with the last dimension array + err := dec.fillUniDimensionalFixedArray(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill dimension %v of multi-dimensional fixed array: %v", p, err) + } + } + return nil +} + +// readUniDimensionalFixedArray reads an array (not slice) from the byte stream. +func (dec *Decoder) fillUniDimensionalFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + for i := 0; i < v.Len(); i++ { + err := dec.fill(v.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of fixed array: %v", i, err) + } + } + return nil +} + +// fillConformantArray establishes if the conformant array is uni or multi dimensional and then fills the slice. +func (dec *Decoder) fillConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, _ := sliceDimensions(v.Type()) + if d > 1 { + err := dec.fillMultiDimensionalConformantArray(v, d, tag, def) + if err != nil { + return err + } + } else { + err := dec.fillUniDimensionalConformantArray(v, tag, def) + if err != nil { + return err + } + } + return nil +} + +// fillUniDimensionalConformantArray fills the uni-dimensional slice value. +func (dec *Decoder) fillUniDimensionalConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + m := dec.precedingMax() + n := int(m) + a := reflect.MakeSlice(v.Type(), n, n) + for i := 0; i < n; i++ { + err := dec.fill(a.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of uni-dimensional conformant array: %v", i, err) + } + } + v.Set(a) + return nil +} + +// fillMultiDimensionalConformantArray fills the multi-dimensional slice value provided from conformant array data. +// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this +// method not to panic. +func (dec *Decoder) fillMultiDimensionalConformantArray(v reflect.Value, d int, tag reflect.StructTag, def *[]deferedPtr) error { + // Read the max size of each dimensions from the ndr stream + l := make([]int, d, d) + for i := range l { + l[i] = int(dec.precedingMax()) + } + // Initialise size of slices + // Initialise the size of the 1st dimension + ty := v.Type() + v.Set(reflect.MakeSlice(ty, l[0], l[0])) + // Initialise the size of the other dimensions recursively + makeSubSlices(v, l[1:]) + + // Get all permutations of the indexes and go through each and fill + ps := multiDimensionalIndexPermutations(l) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + for _, i := range p { + a = a.Index(i) + } + err := dec.fill(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill index %v of slice: %v", p, err) + } + } + return nil +} + +// fillVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice. +func (dec *Decoder) fillVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, t := sliceDimensions(v.Type()) + if d > 1 { + err := dec.fillMultiDimensionalVaryingArray(v, t, d, tag, def) + if err != nil { + return err + } + } else { + err := dec.fillUniDimensionalVaryingArray(v, tag, def) + if err != nil { + return err + } + } + return nil +} + +// fillUniDimensionalVaryingArray fills the uni-dimensional slice value. +func (dec *Decoder) fillUniDimensionalVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + o, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of uni-dimensional varying array: %v", err) + } + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not establish actual count of uni-dimensional varying array: %v", err) + } + t := v.Type() + // Total size of the array is the offset in the index being passed plus the actual count of elements being passed. + n := int(s + o) + a := reflect.MakeSlice(t, n, n) + // Populate the array starting at the offset specified + for i := int(o); i < n; i++ { + err := dec.fill(a.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of uni-dimensional varying array: %v", i, err) + } + } + v.Set(a) + return nil +} + +// fillMultiDimensionalVaryingArray fills the multi-dimensional slice value provided from varying array data. +// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this +// method not to panic. +func (dec *Decoder) fillMultiDimensionalVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error { + // Read the offset and actual count of each dimensions from the ndr stream + o := make([]int, d, d) + l := make([]int, d, d) + for i := range l { + off, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err) + } + o[i] = int(off) + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read size of dimension %d: %v", i+1, err) + } + l[i] = int(s) + int(off) + } + // Initialise size of slices + // Initialise the size of the 1st dimension + ty := v.Type() + v.Set(reflect.MakeSlice(ty, l[0], l[0])) + // Initialise the size of the other dimensions recursively + makeSubSlices(v, l[1:]) + + // Get all permutations of the indexes and go through each and fill + ps := multiDimensionalIndexPermutations(l) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + var os bool // should this permutation be skipped due to the offset of any of the dimensions? + for i, j := range p { + if j < o[i] { + os = true + break + } + a = a.Index(j) + } + if os { + // This permutation should be skipped as it is less than the offset for one of the dimensions. + continue + } + err := dec.fill(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill index %v of slice: %v", p, err) + } + } + return nil +} + +// fillConformantVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice. +func (dec *Decoder) fillConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, t := sliceDimensions(v.Type()) + if d > 1 { + err := dec.fillMultiDimensionalConformantVaryingArray(v, t, d, tag, def) + if err != nil { + return err + } + } else { + err := dec.fillUniDimensionalConformantVaryingArray(v, tag, def) + if err != nil { + return err + } + } + return nil +} + +// fillUniDimensionalConformantVaryingArray fills the uni-dimensional slice value. +func (dec *Decoder) fillUniDimensionalConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + m := dec.precedingMax() + o, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of uni-dimensional conformant varying array: %v", err) + } + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not establish actual count of uni-dimensional conformant varying array: %v", err) + } + if m < o+s { + return errors.New("max count is less than the offset plus actual count") + } + t := v.Type() + n := int(s) + a := reflect.MakeSlice(t, n, n) + for i := int(o); i < n; i++ { + err := dec.fill(a.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of uni-dimensional conformant varying array: %v", i, err) + } + } + v.Set(a) + return nil +} + +// fillMultiDimensionalConformantVaryingArray fills the multi-dimensional slice value provided from conformant varying array data. +// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this +// method not to panic. +func (dec *Decoder) fillMultiDimensionalConformantVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error { + // Read the offset and actual count of each dimensions from the ndr stream + m := make([]int, d, d) + for i := range m { + m[i] = int(dec.precedingMax()) + } + o := make([]int, d, d) + l := make([]int, d, d) + for i := range l { + off, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err) + } + o[i] = int(off) + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read actual count of dimension %d: %v", i+1, err) + } + if m[i] < int(s)+int(off) { + m[i] = int(s) + int(off) + } + l[i] = int(s) + } + // Initialise size of slices + // Initialise the size of the 1st dimension + ty := v.Type() + v.Set(reflect.MakeSlice(ty, m[0], m[0])) + // Initialise the size of the other dimensions recursively + makeSubSlices(v, m[1:]) + + // Get all permutations of the indexes and go through each and fill + ps := multiDimensionalIndexPermutations(m) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + var os bool // should this permutation be skipped due to the offset of any of the dimensions or max is higher than the actual count being passed + for i, j := range p { + if j < o[i] || j >= l[i] { + os = true + break + } + a = a.Index(j) + } + if os { + // This permutation should be skipped as it is less than the offset for one of the dimensions. + continue + } + err := dec.fill(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill index %v of slice: %v", p, err) + } + } + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go b/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go new file mode 100644 index 00000000000..6157b4ef9e2 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go @@ -0,0 +1,393 @@ +// Package ndr provides the ability to unmarshal NDR encoded byte steams into Go data structures +package ndr + +import ( + "bufio" + "fmt" + "io" + "reflect" + "strings" +) + +// Struct tag values +const ( + TagConformant = "conformant" + TagVarying = "varying" + TagPointer = "pointer" + TagPipe = "pipe" +) + +// Decoder unmarshals NDR byte stream data into a Go struct representation +type Decoder struct { + r *bufio.Reader // source of the data + size int // initial size of bytes in buffer + ch CommonHeader // NDR common header + ph PrivateHeader // NDR private header + conformantMax []uint32 // conformant max values that were moved to the beginning of the structure + s interface{} // pointer to the structure being populated + current []string // keeps track of the current field being populated +} + +type deferedPtr struct { + v reflect.Value + tag reflect.StructTag +} + +// NewDecoder creates a new instance of a NDR Decoder. +func NewDecoder(r io.Reader) *Decoder { + dec := new(Decoder) + dec.r = bufio.NewReader(r) + dec.r.Peek(int(commonHeaderBytes)) // For some reason an operation is needed on the buffer to initialise it so Buffered() != 0 + dec.size = dec.r.Buffered() + return dec +} + +// Decode unmarshals the NDR encoded bytes into the pointer of a struct provided. +func (dec *Decoder) Decode(s interface{}) error { + dec.s = s + err := dec.readCommonHeader() + if err != nil { + return err + } + err = dec.readPrivateHeader() + if err != nil { + return err + } + _, err = dec.r.Discard(4) //The next 4 bytes are an RPC unique pointer referent. We just skip these. + if err != nil { + return Errorf("unable to process byte stream: %v", err) + } + + return dec.process(s, reflect.StructTag("")) +} + +func (dec *Decoder) process(s interface{}, tag reflect.StructTag) error { + // Scan for conformant fields as their max counts are moved to the beginning + // http://pubs.opengroup.org/onlinepubs/9629399/chap14.htm#tagfcjh_37 + err := dec.scanConformantArrays(s, tag) + if err != nil { + return err + } + // Recursively fill the struct fields + var localDef []deferedPtr + err = dec.fill(s, tag, &localDef) + if err != nil { + return Errorf("could not decode: %v", err) + } + // Read any deferred referents associated with pointers + for _, p := range localDef { + err = dec.process(p.v, p.tag) + if err != nil { + return fmt.Errorf("could not decode deferred referent: %v", err) + } + } + return nil +} + +// scanConformantArrays scans the structure for embedded conformant fields and captures the maximum element counts for +// dimensions of the array that are moved to the beginning of the structure. +func (dec *Decoder) scanConformantArrays(s interface{}, tag reflect.StructTag) error { + err := dec.conformantScan(s, tag) + if err != nil { + return fmt.Errorf("failed to scan for embedded conformant arrays: %v", err) + } + for i := range dec.conformantMax { + dec.conformantMax[i], err = dec.readUint32() + if err != nil { + return fmt.Errorf("could not read preceding conformant max count index %d: %v", i, err) + } + } + return nil +} + +// conformantScan inspects the structure's fields for whether they are conformant. +func (dec *Decoder) conformantScan(s interface{}, tag reflect.StructTag) error { + ndrTag := parseTags(tag) + if ndrTag.HasValue(TagPointer) { + return nil + } + v := getReflectValue(s) + switch v.Kind() { + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + err := dec.conformantScan(v.Field(i), v.Type().Field(i).Tag) + if err != nil { + return err + } + } + case reflect.String: + if !ndrTag.HasValue(TagConformant) { + break + } + dec.conformantMax = append(dec.conformantMax, uint32(0)) + case reflect.Slice: + if !ndrTag.HasValue(TagConformant) { + break + } + d, t := sliceDimensions(v.Type()) + for i := 0; i < d; i++ { + dec.conformantMax = append(dec.conformantMax, uint32(0)) + } + // For string arrays there is a common max for the strings within the array. + if t.Kind() == reflect.String { + dec.conformantMax = append(dec.conformantMax, uint32(0)) + } + } + return nil +} + +func (dec *Decoder) isPointer(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) (bool, error) { + // Pointer so defer filling the referent + ndrTag := parseTags(tag) + if ndrTag.HasValue(TagPointer) { + p, err := dec.readUint32() + if err != nil { + return true, fmt.Errorf("could not read pointer: %v", err) + } + ndrTag.delete(TagPointer) + if p != 0 { + // if pointer is not zero add to the deferred items at end of stream + *def = append(*def, deferedPtr{v, ndrTag.StructTag()}) + } + return true, nil + } + return false, nil +} + +func getReflectValue(s interface{}) (v reflect.Value) { + if r, ok := s.(reflect.Value); ok { + v = r + } else { + if reflect.ValueOf(s).Kind() == reflect.Ptr { + v = reflect.ValueOf(s).Elem() + } + } + return +} + +// fill populates fields with values from the NDR byte stream. +func (dec *Decoder) fill(s interface{}, tag reflect.StructTag, localDef *[]deferedPtr) error { + v := getReflectValue(s) + + //// Pointer so defer filling the referent + ptr, err := dec.isPointer(v, tag, localDef) + if err != nil { + return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + if ptr { + return nil + } + + // Populate the value from the byte stream + switch v.Kind() { + case reflect.Struct: + dec.current = append(dec.current, v.Type().Name()) //Track the current field being filled + // in case struct is a union, track this and the selected union field for efficiency + var unionTag reflect.Value + var unionField string // field to fill if struct is a union + // Go through each field in the struct and recursively fill + for i := 0; i < v.NumField(); i++ { + fieldName := v.Type().Field(i).Name + dec.current = append(dec.current, fieldName) //Track the current field being filled + //fmt.Fprintf(os.Stderr, "DEBUG Decoding: %s\n", strings.Join(dec.current, "/")) + structTag := v.Type().Field(i).Tag + ndrTag := parseTags(structTag) + + // Union handling + if !unionTag.IsValid() { + // Is this field a union tag? + unionTag = dec.isUnion(v.Field(i), structTag) + } else { + // What is the selected field value of the union if we don't already know + if unionField == "" { + unionField, err = unionSelectedField(v, unionTag) + if err != nil { + return fmt.Errorf("could not determine selected union value field for %s with discriminat"+ + " tag %s: %v", v.Type().Name(), unionTag, err) + } + } + if ndrTag.HasValue(TagUnionField) && fieldName != unionField { + // is a union and this field has not been selected so will skip it. + dec.current = dec.current[:len(dec.current)-1] //This field has been skipped so remove it from the current field tracker + continue + } + } + + // Check if field is a pointer + if v.Field(i).Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) && + v.Field(i).Type().Kind() == reflect.Slice && v.Field(i).Type().Elem().Kind() == reflect.Uint8 { + //field is for rawbytes + structTag, err = addSizeToTag(v, v.Field(i), structTag) + if err != nil { + return fmt.Errorf("could not get rawbytes field(%s) size: %v", strings.Join(dec.current, "/"), err) + } + ptr, err := dec.isPointer(v.Field(i), structTag, localDef) + if err != nil { + return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + if !ptr { + err := dec.readRawBytes(v.Field(i), structTag) + if err != nil { + return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + } + } else { + err := dec.fill(v.Field(i), structTag, localDef) + if err != nil { + return fmt.Errorf("could not fill struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + } + dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker + } + dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker + case reflect.Bool: + i, err := dec.readBool() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint8: + i, err := dec.readUint8() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint16: + i, err := dec.readUint16() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint32: + i, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint64: + i, err := dec.readUint64() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int8: + i, err := dec.readInt8() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int16: + i, err := dec.readInt16() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int32: + i, err := dec.readInt32() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int64: + i, err := dec.readInt64() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.String: + ndrTag := parseTags(tag) + conformant := ndrTag.HasValue(TagConformant) + // strings are always varying so this is assumed without an explicit tag + var s string + var err error + if conformant { + s, err = dec.readConformantVaryingString(localDef) + if err != nil { + return fmt.Errorf("could not fill with conformant varying string: %v", err) + } + } else { + s, err = dec.readVaryingString(localDef) + if err != nil { + return fmt.Errorf("could not fill with varying string: %v", err) + } + } + v.Set(reflect.ValueOf(s)) + case reflect.Float32: + i, err := dec.readFloat32() + if err != nil { + return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Float64: + i, err := dec.readFloat64() + if err != nil { + return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Array: + err := dec.fillFixedArray(v, tag, localDef) + if err != nil { + return err + } + case reflect.Slice: + if v.Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) && v.Type().Elem().Kind() == reflect.Uint8 { + //field is for rawbytes + err := dec.readRawBytes(v, tag) + if err != nil { + return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + break + } + ndrTag := parseTags(tag) + conformant := ndrTag.HasValue(TagConformant) + varying := ndrTag.HasValue(TagVarying) + if ndrTag.HasValue(TagPipe) { + err := dec.fillPipe(v, tag) + if err != nil { + return err + } + break + } + _, t := sliceDimensions(v.Type()) + if t.Kind() == reflect.String && !ndrTag.HasValue(subStringArrayValue) { + // String array + err := dec.readStringsArray(v, tag, localDef) + if err != nil { + return err + } + break + } + // varying is assumed as fixed arrays use the Go array type rather than slice + if conformant && varying { + err := dec.fillConformantVaryingArray(v, tag, localDef) + if err != nil { + return err + } + } else if !conformant && varying { + err := dec.fillVaryingArray(v, tag, localDef) + if err != nil { + return err + } + } else { + //default to conformant and not varying + err := dec.fillConformantArray(v, tag, localDef) + if err != nil { + return err + } + } + default: + return fmt.Errorf("unsupported type") + } + return nil +} + +// readBytes returns a number of bytes from the NDR byte stream. +func (dec *Decoder) readBytes(n int) ([]byte, error) { + //TODO make this take an int64 as input to allow for larger values on all systems? + b := make([]byte, n, n) + m, err := dec.r.Read(b) + if err != nil || m != n { + return b, fmt.Errorf("error reading bytes from stream: %v", err) + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/error.go b/vendor/github.com/jcmturner/rpc/v2/ndr/error.go new file mode 100644 index 00000000000..9971194d0ec --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/error.go @@ -0,0 +1,18 @@ +package ndr + +import "fmt" + +// Malformed implements the error interface for malformed NDR encoding errors. +type Malformed struct { + EText string +} + +// Error implements the error interface on the Malformed struct. +func (e Malformed) Error() string { + return fmt.Sprintf("malformed NDR stream: %s", e.EText) +} + +// Errorf formats an error message into a malformed NDR error. +func Errorf(format string, a ...interface{}) Malformed { + return Malformed{EText: fmt.Sprintf(format, a...)} +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/header.go b/vendor/github.com/jcmturner/rpc/v2/ndr/header.go new file mode 100644 index 00000000000..1970ddb600e --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/header.go @@ -0,0 +1,116 @@ +package ndr + +import ( + "encoding/binary" + "fmt" +) + +/* +Serialization Version 1 +https://msdn.microsoft.com/en-us/library/cc243563.aspx + +Common Header - https://msdn.microsoft.com/en-us/library/cc243890.aspx +8 bytes in total: +- First byte - Version: Must equal 1 +- Second byte - 1st 4 bits: Endianess (0=Big; 1=Little); 2nd 4 bits: Character Encoding (0=ASCII; 1=EBCDIC) +- 3rd - Floating point representation (This does not seem to be the case in examples for Microsoft test sources) +- 4th - Common Header Length: Must equal 8 +- 5th - 8th - Filler: MUST be set to 0xcccccccc on marshaling, and SHOULD be ignored during unmarshaling. + +Private Header - https://msdn.microsoft.com/en-us/library/cc243919.aspx +8 bytes in total: +- First 4 bytes - Indicates the length of a serialized top-level type in the octet stream. It MUST include the padding length and exclude the header itself. +- Second 4 bytes - Filler: MUST be set to 0 (zero) during marshaling, and SHOULD be ignored during unmarshaling. +*/ + +const ( + protocolVersion uint8 = 1 + commonHeaderBytes uint16 = 8 + bigEndian = 0 + littleEndian = 1 + ascii uint8 = 0 + ebcdic uint8 = 1 + ieee uint8 = 0 + vax uint8 = 1 + cray uint8 = 2 + ibm uint8 = 3 +) + +// CommonHeader implements the NDR common header: https://msdn.microsoft.com/en-us/library/cc243889.aspx +type CommonHeader struct { + Version uint8 + Endianness binary.ByteOrder + CharacterEncoding uint8 + FloatRepresentation uint8 + HeaderLength uint16 + Filler []byte +} + +// PrivateHeader implements the NDR private header: https://msdn.microsoft.com/en-us/library/cc243919.aspx +type PrivateHeader struct { + ObjectBufferLength uint32 + Filler []byte +} + +func (dec *Decoder) readCommonHeader() error { + // Version + vb, err := dec.r.ReadByte() + if err != nil { + return Malformed{EText: "could not read first byte of common header for version"} + } + dec.ch.Version = uint8(vb) + if dec.ch.Version != protocolVersion { + return Malformed{EText: fmt.Sprintf("byte stream does not indicate a RPC Type serialization of version %v", protocolVersion)} + } + // Read Endianness & Character Encoding + eb, err := dec.r.ReadByte() + if err != nil { + return Malformed{EText: "could not read second byte of common header for endianness"} + } + endian := int(eb >> 4 & 0xF) + if endian != 0 && endian != 1 { + return Malformed{EText: "common header does not indicate a valid endianness"} + } + dec.ch.CharacterEncoding = uint8(vb & 0xF) + if dec.ch.CharacterEncoding != 0 && dec.ch.CharacterEncoding != 1 { + return Malformed{EText: "common header does not indicate a valid character encoding"} + } + switch endian { + case littleEndian: + dec.ch.Endianness = binary.LittleEndian + case bigEndian: + dec.ch.Endianness = binary.BigEndian + } + // Common header length + lb, err := dec.readBytes(2) + if err != nil { + return Malformed{EText: fmt.Sprintf("could not read common header length: %v", err)} + } + dec.ch.HeaderLength = dec.ch.Endianness.Uint16(lb) + if dec.ch.HeaderLength != commonHeaderBytes { + return Malformed{EText: "common header does not indicate a valid length"} + } + // Filler bytes + dec.ch.Filler, err = dec.readBytes(4) + if err != nil { + return Malformed{EText: fmt.Sprintf("could not read common header filler: %v", err)} + } + return nil +} + +func (dec *Decoder) readPrivateHeader() error { + // The next 8 bytes after the common header comprise the RPC type marshalling private header for constructed types. + err := binary.Read(dec.r, dec.ch.Endianness, &dec.ph.ObjectBufferLength) + if err != nil { + return Malformed{EText: "could not read private header object buffer length"} + } + if dec.ph.ObjectBufferLength%8 != 0 { + return Malformed{EText: "object buffer length not a multiple of 8"} + } + // Filler bytes + dec.ph.Filler, err = dec.readBytes(4) + if err != nil { + return Malformed{EText: fmt.Sprintf("could not read private header filler: %v", err)} + } + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go b/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go new file mode 100644 index 00000000000..5fd27da0192 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go @@ -0,0 +1,31 @@ +package ndr + +import ( + "fmt" + "reflect" +) + +func (dec *Decoder) fillPipe(v reflect.Value, tag reflect.StructTag) error { + s, err := dec.readUint32() // read element count of first chunk + if err != nil { + return err + } + a := reflect.MakeSlice(v.Type(), 0, 0) + c := reflect.MakeSlice(v.Type(), int(s), int(s)) + for s != 0 { + for i := 0; i < int(s); i++ { + err := dec.fill(c.Index(i), tag, &[]deferedPtr{}) + if err != nil { + return fmt.Errorf("could not fill element %d of pipe: %v", i, err) + } + } + s, err = dec.readUint32() // read element count of first chunk + if err != nil { + return err + } + a = reflect.AppendSlice(a, c) + c = reflect.MakeSlice(v.Type(), int(s), int(s)) + } + v.Set(a) + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go b/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go new file mode 100644 index 00000000000..7eb1d1afbf0 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go @@ -0,0 +1,211 @@ +package ndr + +import ( + "bytes" + "encoding/binary" + "math" +) + +// Byte sizes of primitive types +const ( + SizeBool = 1 + SizeChar = 1 + SizeUint8 = 1 + SizeUint16 = 2 + SizeUint32 = 4 + SizeUint64 = 8 + SizeEnum = 2 + SizeSingle = 4 + SizeDouble = 8 + SizePtr = 4 +) + +// Bool is an NDR Boolean which is a logical quantity that assumes one of two values: TRUE or FALSE. +// NDR represents a Boolean as one octet. +// It represents a value of FALSE as a zero octet, an octet in which every bit is reset. +// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set. + +// Char is an NDR character. +// NDR represents a character as one octet. +// Characters have two representation formats: ASCII and EBCDIC. + +// USmall is an unsigned 8 bit integer + +// UShort is an unsigned 16 bit integer + +// ULong is an unsigned 32 bit integer + +// UHyper is an unsigned 64 bit integer + +// Small is an signed 8 bit integer + +// Short is an signed 16 bit integer + +// Long is an signed 32 bit integer + +// Hyper is an signed 64 bit integer + +// Enum is the NDR representation of enumerated types as signed short integers (2 octets) + +// Single is an NDR defined single-precision floating-point data type + +// Double is an NDR defined double-precision floating-point data type + +// readBool reads a byte representing a boolean. +// NDR represents a Boolean as one octet. +// It represents a value of FALSE as a zero octet, an octet in which every bit is reset. +// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set. +func (dec *Decoder) readBool() (bool, error) { + i, err := dec.readUint8() + if err != nil { + return false, err + } + if i != 0 { + return true, nil + } + return false, nil +} + +// readChar reads bytes representing a 8bit ASCII integer cast to a rune. +func (dec *Decoder) readChar() (rune, error) { + var r rune + a, err := dec.readUint8() + if err != nil { + return r, err + } + return rune(a), nil +} + +// readUint8 reads bytes representing a 8bit unsigned integer. +func (dec *Decoder) readUint8() (uint8, error) { + b, err := dec.r.ReadByte() + if err != nil { + return uint8(0), err + } + return uint8(b), nil +} + +// readUint16 reads bytes representing a 16bit unsigned integer. +func (dec *Decoder) readUint16() (uint16, error) { + dec.ensureAlignment(SizeUint16) + b, err := dec.readBytes(SizeUint16) + if err != nil { + return uint16(0), err + } + return dec.ch.Endianness.Uint16(b), nil +} + +// readUint32 reads bytes representing a 32bit unsigned integer. +func (dec *Decoder) readUint32() (uint32, error) { + dec.ensureAlignment(SizeUint32) + b, err := dec.readBytes(SizeUint32) + if err != nil { + return uint32(0), err + } + return dec.ch.Endianness.Uint32(b), nil +} + +// readUint32 reads bytes representing a 32bit unsigned integer. +func (dec *Decoder) readUint64() (uint64, error) { + dec.ensureAlignment(SizeUint64) + b, err := dec.readBytes(SizeUint64) + if err != nil { + return uint64(0), err + } + return dec.ch.Endianness.Uint64(b), nil +} + +func (dec *Decoder) readInt8() (int8, error) { + dec.ensureAlignment(SizeUint8) + b, err := dec.readBytes(SizeUint8) + if err != nil { + return 0, err + } + var i int8 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +func (dec *Decoder) readInt16() (int16, error) { + dec.ensureAlignment(SizeUint16) + b, err := dec.readBytes(SizeUint16) + if err != nil { + return 0, err + } + var i int16 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +func (dec *Decoder) readInt32() (int32, error) { + dec.ensureAlignment(SizeUint32) + b, err := dec.readBytes(SizeUint32) + if err != nil { + return 0, err + } + var i int32 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +func (dec *Decoder) readInt64() (int64, error) { + dec.ensureAlignment(SizeUint64) + b, err := dec.readBytes(SizeUint64) + if err != nil { + return 0, err + } + var i int64 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +// https://en.wikipedia.org/wiki/IEEE_754-1985 +func (dec *Decoder) readFloat32() (f float32, err error) { + dec.ensureAlignment(SizeSingle) + b, err := dec.readBytes(SizeSingle) + if err != nil { + return + } + bits := dec.ch.Endianness.Uint32(b) + f = math.Float32frombits(bits) + return +} + +func (dec *Decoder) readFloat64() (f float64, err error) { + dec.ensureAlignment(SizeDouble) + b, err := dec.readBytes(SizeDouble) + if err != nil { + return + } + bits := dec.ch.Endianness.Uint64(b) + f = math.Float64frombits(bits) + return +} + +// NDR enforces NDR alignment of primitive data; that is, any primitive of size n octets is aligned at a octet stream +// index that is a multiple of n. (In this version of NDR, n is one of {1, 2, 4, 8}.) An octet stream index indicates +// the number of an octet in an octet stream when octets are numbered, beginning with 0, from the first octet in the +// stream. Where necessary, an alignment gap, consisting of octets of unspecified value, precedes the representation +// of a primitive. The gap is of the smallest size sufficient to align the primitive. +func (dec *Decoder) ensureAlignment(n int) { + p := dec.size - dec.r.Buffered() + if s := p % n; s != 0 { + dec.r.Discard(n - s) + } +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go b/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go new file mode 100644 index 00000000000..9ee59fb10e4 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go @@ -0,0 +1,61 @@ +package ndr + +import ( + "errors" + "fmt" + "reflect" + "strconv" +) + +// type MyBytes []byte +// implement RawBytes interface + +const ( + sizeMethod = "Size" +) + +// RawBytes interface should be implemented if reading just a number of bytes from the NDR stream +type RawBytes interface { + Size(interface{}) int +} + +func rawBytesSize(parent reflect.Value, v reflect.Value) (int, error) { + sf := v.MethodByName(sizeMethod) + if !sf.IsValid() { + return 0, fmt.Errorf("could not find a method called %s on the implementation of RawBytes", sizeMethod) + } + in := []reflect.Value{parent} + f := sf.Call(in) + if f[0].Kind() != reflect.Int { + return 0, errors.New("the RawBytes size function did not return an integer") + } + return int(f[0].Int()), nil +} + +func addSizeToTag(parent reflect.Value, v reflect.Value, tag reflect.StructTag) (reflect.StructTag, error) { + size, err := rawBytesSize(parent, v) + if err != nil { + return tag, err + } + ndrTag := parseTags(tag) + ndrTag.Map["size"] = strconv.Itoa(size) + return ndrTag.StructTag(), nil +} + +func (dec *Decoder) readRawBytes(v reflect.Value, tag reflect.StructTag) error { + ndrTag := parseTags(tag) + sizeStr, ok := ndrTag.Map["size"] + if !ok { + return errors.New("size tag not available") + } + size, err := strconv.Atoi(sizeStr) + if err != nil { + return fmt.Errorf("size not valid: %v", err) + } + b, err := dec.readBytes(size) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b).Convert(v.Type())) + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go b/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go new file mode 100644 index 00000000000..b7a910b3d1b --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go @@ -0,0 +1,70 @@ +package ndr + +import ( + "fmt" + "reflect" +) + +const ( + subStringArrayTag = `ndr:"varying,X-subStringArray"` + subStringArrayValue = "X-subStringArray" +) + +func uint16SliceToString(a []uint16) string { + s := make([]rune, len(a), len(a)) + for i := range s { + s[i] = rune(a[i]) + } + if len(s) > 0 { + // Remove any null terminator + if s[len(s)-1] == rune(0) { + s = s[:len(s)-1] + } + } + return string(s) +} + +func (dec *Decoder) readVaryingString(def *[]deferedPtr) (string, error) { + a := new([]uint16) + v := reflect.ValueOf(a) + var t reflect.StructTag + err := dec.fillUniDimensionalVaryingArray(v.Elem(), t, def) + if err != nil { + return "", err + } + s := uint16SliceToString(*a) + return s, nil +} + +func (dec *Decoder) readConformantVaryingString(def *[]deferedPtr) (string, error) { + a := new([]uint16) + v := reflect.ValueOf(a) + var t reflect.StructTag + err := dec.fillUniDimensionalConformantVaryingArray(v.Elem(), t, def) + if err != nil { + return "", err + } + s := uint16SliceToString(*a) + return s, nil +} + +func (dec *Decoder) readStringsArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, _ := sliceDimensions(v.Type()) + ndrTag := parseTags(tag) + var m []int + //var ms int + if ndrTag.HasValue(TagConformant) { + for i := 0; i < d; i++ { + m = append(m, int(dec.precedingMax())) + } + //common max size + _ = dec.precedingMax() + //ms = int(n) + } + tag = reflect.StructTag(subStringArrayTag) + err := dec.fillVaryingArray(v, tag, def) + if err != nil { + return fmt.Errorf("could not read string array: %v", err) + } + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go b/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go new file mode 100644 index 00000000000..01657e0a1d7 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go @@ -0,0 +1,69 @@ +package ndr + +import ( + "fmt" + "reflect" + "strings" +) + +const ndrNameSpace = "ndr" + +type tags struct { + Values []string + Map map[string]string +} + +// parse the struct field tags and extract the ndr related ones. +// format of tag ndr:"value,key:value1,value2" +func parseTags(st reflect.StructTag) tags { + s := st.Get(ndrNameSpace) + t := tags{ + Values: []string{}, + Map: make(map[string]string), + } + if s != "" { + ndrTags := strings.Trim(s, `"`) + for _, tag := range strings.Split(ndrTags, ",") { + if strings.Contains(tag, ":") { + m := strings.SplitN(tag, ":", 2) + t.Map[m[0]] = m[1] + } else { + t.Values = append(t.Values, tag) + } + } + } + return t +} + +func appendTag(t reflect.StructTag, s string) reflect.StructTag { + ts := t.Get(ndrNameSpace) + ts = fmt.Sprintf(`%s"%s,%s"`, ndrNameSpace, ts, s) + return reflect.StructTag(ts) +} + +func (t *tags) StructTag() reflect.StructTag { + mv := t.Values + for key, val := range t.Map { + mv = append(mv, key+":"+val) + } + s := ndrNameSpace + ":" + `"` + strings.Join(mv, ",") + `"` + return reflect.StructTag(s) +} + +func (t *tags) delete(s string) { + for i, x := range t.Values { + if x == s { + t.Values = append(t.Values[:i], t.Values[i+1:]...) + } + } + delete(t.Map, s) +} + +func (t *tags) HasValue(s string) bool { + for _, v := range t.Values { + if v == s { + return true + } + } + return false +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/union.go b/vendor/github.com/jcmturner/rpc/v2/ndr/union.go new file mode 100644 index 00000000000..6a657fa6f66 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/union.go @@ -0,0 +1,57 @@ +package ndr + +import ( + "errors" + "fmt" + "reflect" +) + +// Union interface must be implemented by structs that will be unmarshaled into from the NDR byte stream union representation. +// The union's discriminating tag will be passed to the SwitchFunc method. +// The discriminating tag field must have the struct tag: `ndr:"unionTag"` +// If the union is encapsulated the discriminating tag field must have the struct tag: `ndr:"encapsulated"` +// The possible value fields that can be selected from must have the struct tag: `ndr:"unionField"` +type Union interface { + SwitchFunc(t interface{}) string +} + +// Union related constants such as struct tag values +const ( + unionSelectionFuncName = "SwitchFunc" + TagEncapsulated = "encapsulated" + TagUnionTag = "unionTag" + TagUnionField = "unionField" +) + +func (dec *Decoder) isUnion(field reflect.Value, tag reflect.StructTag) (r reflect.Value) { + ndrTag := parseTags(tag) + if !ndrTag.HasValue(TagUnionTag) { + return + } + r = field + // For a non-encapsulated union, the discriminant is marshalled into the transmitted data stream twice: once as the + // field or parameter, which is referenced by the switch_is construct, in the procedure argument list; and once as + // the first part of the union representation. + if !ndrTag.HasValue(TagEncapsulated) { + dec.r.Discard(int(r.Type().Size())) + } + return +} + +// unionSelectedField returns the field name of which of the union values to fill +func unionSelectedField(union, discriminant reflect.Value) (string, error) { + if !union.Type().Implements(reflect.TypeOf(new(Union)).Elem()) { + return "", errors.New("struct does not implement union interface") + } + args := []reflect.Value{discriminant} + // Call the SelectFunc of the union struct to find the name of the field to fill with the value selected. + sf := union.MethodByName(unionSelectionFuncName) + if !sf.IsValid() { + return "", fmt.Errorf("could not find a selection function called %s in the unions struct representation", unionSelectionFuncName) + } + f := sf.Call(args) + if f[0].Kind() != reflect.String || f[0].String() == "" { + return "", fmt.Errorf("the union select function did not return a string for the name of the field to fill") + } + return f[0].String(), nil +} diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 52b111d5f36..c589addf98c 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -8,8 +8,6 @@ A high-performance 100% compatible drop-in replacement of "encoding/json" -You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) - # Benchmark ![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod index e05c42ff58b..e817cccbf6f 100644 --- a/vendor/github.com/json-iterator/go/go.mod +++ b/vendor/github.com/json-iterator/go/go.mod @@ -6,6 +6,6 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/google/gofuzz v1.0.0 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/modern-go/reflect2 v1.0.2 github.com/stretchr/testify v1.3.0 ) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum index be00a6df969..4b7bb8a2952 100644 --- a/vendor/github.com/json-iterator/go/go.sum +++ b/vendor/github.com/json-iterator/go/go.sum @@ -5,11 +5,10 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/k0sproject/dig/LICENSE b/vendor/github.com/k0sproject/dig/LICENSE new file mode 100644 index 00000000000..f36581594ee --- /dev/null +++ b/vendor/github.com/k0sproject/dig/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021, Mirantis Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/k0sproject/dig/README.md b/vendor/github.com/k0sproject/dig/README.md new file mode 100644 index 00000000000..c063fa68b1e --- /dev/null +++ b/vendor/github.com/k0sproject/dig/README.md @@ -0,0 +1,61 @@ +# Dig + +A go package that provides a Ruby-like `Hash.dig` mechanism for `map[string]interface{}`, which in YAML terminology is refered to as "Mapping". + +## Usage + +Dig's Mapping is useful for example to use as the Unmarshal target of arbitrary YAML/JSON documents. + +### Example +```go +package main + +import ( + "fmt" + + "github.com/k0sproject/dig" + "gopkg.in/yaml.v2" +) + +var yamlDoc = []byte(`--- +i18n: + hello: + se: Hejsan + fi: Morjens + world: + se: Värld + fi: Maailma +`) + +func main() { + m := dig.Mapping{} + if err := yaml.Unmarshal(yamlDoc, &m); err != nil { + panic(err) + } + + // You can use DigMapping to access a deeply nested map and set values. + // Any missing Mapping level in between will be created. + m.DigMapping("i18n", "hello")["es"] = "Hola" + m.DigMapping("i18n", "world")["es"] = "Mundo" + + langs := []string{"fi", "se", "es"} + for _, l := range langs { + + // You can use Dig to access a deeply nested value + greeting := m.Dig("i18n", "hello", l).(string) + + // ..or DigString to avoid having to cast it to string. + target := m.DigString("i18n", "world", l) + + fmt.Printf("%s, %s!\n", greeting, target) + } +} +``` + +Output: + +``` +Morjens, Maailma! +Hejsan, Värld! +Hola, Mundo! +``` diff --git a/vendor/github.com/k0sproject/dig/go.mod b/vendor/github.com/k0sproject/dig/go.mod new file mode 100644 index 00000000000..7a6b738ab6a --- /dev/null +++ b/vendor/github.com/k0sproject/dig/go.mod @@ -0,0 +1,8 @@ +module github.com/k0sproject/dig + +go 1.15 + +require ( + github.com/stretchr/testify v1.7.0 + gopkg.in/yaml.v2 v2.4.0 +) diff --git a/vendor/github.com/k0sproject/dig/go.sum b/vendor/github.com/k0sproject/dig/go.sum new file mode 100644 index 00000000000..8fc3599bc6e --- /dev/null +++ b/vendor/github.com/k0sproject/dig/go.sum @@ -0,0 +1,13 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/k0sproject/dig/mapping.go b/vendor/github.com/k0sproject/dig/mapping.go new file mode 100644 index 00000000000..84297d160a5 --- /dev/null +++ b/vendor/github.com/k0sproject/dig/mapping.go @@ -0,0 +1,143 @@ +// Package dig provides a map[string]interface{} Mapping type that has ruby-like "dig" functionality. +// +// It can be used for example to access and manipulate arbitary nested YAML/JSON structures. +package dig + +import "fmt" + +// Mapping is a nested key-value map where the keys are strings and values are interface{}. In Ruby it is called a Hash (with string keys), in YAML it's called a "mapping". +type Mapping map[string]interface{} + +// UnmarshalYAML for supporting yaml.Unmarshal +func (m *Mapping) UnmarshalYAML(unmarshal func(interface{}) error) error { + var result map[interface{}]interface{} + if err := unmarshal(&result); err != nil { + return err + } + *m = cleanUpInterfaceMap(result) + return nil +} + +// Dig is a simplistic implementation of a Ruby-like Hash.dig functionality. +// +// It returns a value from a (deeply) nested tree structure. +func (m *Mapping) Dig(keys ...string) interface{} { + v, ok := (*m)[keys[0]] + if !ok { + return nil + } + switch v := v.(type) { + case Mapping: + if len(keys) == 1 { + return v + } + return v.Dig(keys[1:]...) + default: + if len(keys) > 1 { + return nil + } + return v + } +} + +// DigString is like Dig but returns the value as string +func (m *Mapping) DigString(keys ...string) string { + v := m.Dig(keys...) + val, ok := v.(string) + if !ok { + return "" + } + return val +} + +// DigMapping always returns a mapping, creating missing or overwriting non-mapping branches in between +func (m *Mapping) DigMapping(keys ...string) Mapping { + k := keys[0] + cur := (*m)[k] + switch v := cur.(type) { + case Mapping: + if len(keys) > 1 { + return v.DigMapping(keys[1:]...) + } + return v + default: + n := Mapping{} + (*m)[k] = n + if len(keys) > 1 { + return n.DigMapping(keys[1:]...) + } + return n + } +} + +// Dup creates a dereferenced copy of the Mapping +func (m *Mapping) Dup() Mapping { + new := make(Mapping, len(*m)) + for k, v := range *m { + switch vt := v.(type) { + case Mapping: + new[k] = vt.Dup() + case *Mapping: + new[k] = vt.Dup() + case []Mapping: + var ns []Mapping + for _, sv := range vt { + ns = append(ns, sv.Dup()) + } + new[k] = ns + case []*Mapping: + var ns []Mapping + for _, sv := range vt { + ns = append(ns, sv.Dup()) + } + new[k] = ns + case []string: + var ns []string + ns = append(ns, vt...) + new[k] = ns + case []int: + var ns []int + ns = append(ns, vt...) + new[k] = ns + case []bool: + var ns []bool + ns = append(ns, vt...) + new[k] = ns + default: + new[k] = vt + } + } + return new +} + +// Cleans up a slice of interfaces into slice of actual values +func cleanUpInterfaceArray(in []interface{}) []interface{} { + result := make([]interface{}, len(in)) + for i, v := range in { + result[i] = cleanUpMapValue(v) + } + return result +} + +// Cleans up the map keys to be strings +func cleanUpInterfaceMap(in map[interface{}]interface{}) Mapping { + result := make(Mapping) + for k, v := range in { + result[fmt.Sprintf("%v", k)] = cleanUpMapValue(v) + } + return result +} + +// Cleans up the value in the map, recurses in case of arrays and maps +func cleanUpMapValue(v interface{}) interface{} { + switch v := v.(type) { + case []interface{}: + return cleanUpInterfaceArray(v) + case map[interface{}]interface{}: + return cleanUpInterfaceMap(v) + case string, int, bool, nil: + return v + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/vendor/github.com/k0sproject/k0sctl/LICENSE b/vendor/github.com/k0sproject/k0sctl/LICENSE new file mode 100644 index 00000000000..e3988493c97 --- /dev/null +++ b/vendor/github.com/k0sproject/k0sctl/LICENSE @@ -0,0 +1,209 @@ +Portions of this software are licensed as follows: + +* All content residing under the "docs/" directory of this repository is licensed under "Creative Commons Attribution Share Alike 4.0 International" (CC-BY-SA-4.0). See docs/LICENCE for details. +* Content outside of the above mentioned directories or restrictions above is available under the "Apache License 2.0" as defined below. + +The k0sctl binary will send anonymous telemetry when used. + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021, k0sctl authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags.go b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags.go new file mode 100644 index 00000000000..db515ed142e --- /dev/null +++ b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags.go @@ -0,0 +1,124 @@ +package cluster + +import ( + "strconv" + "strings" +) + +// Flags is a slice of strings with added functions to ease manipulating lists of command-line flags +type Flags []string + +// Add adds a flag regardless if it exists already or not +func (f *Flags) Add(s string) { + *f = append(*f, s) +} + +// Add a flag with a value +func (f *Flags) AddWithValue(key, value string) { + *f = append(*f, key+" "+value) +} + +// AddUnlessExist adds a flag unless one with the same prefix exists +func (f *Flags) AddUnlessExist(s string) { + if f.Include(s) { + return + } + *f = append(*f, s) +} + +// AddOrReplace replaces a flag with the same prefix or adds a new one if one does not exist +func (f *Flags) AddOrReplace(s string) { + idx := f.Index(s) + if idx > -1 { + (*f)[idx] = s + return + } + *f = append(*f, s) +} + +// Include returns true if a flag with a matching prefix can be found +func (f Flags) Include(s string) bool { + return f.Index(s) > -1 +} + +// Index returns an index to a flag with a matching prefix +func (f Flags) Index(s string) int { + var flag string + sepidx := strings.IndexAny(s, "= ") + if sepidx < 0 { + flag = s + } else { + flag = s[:sepidx] + } + for i, v := range f { + if v == s || strings.HasPrefix(v, flag+"=") || strings.HasPrefix(v, flag+" ") { + return i + } + } + return -1 +} + +// Get returns the full flag with the possible value such as "--san=10.0.0.1" or "" when not found +func (f Flags) Get(s string) string { + idx := f.Index(s) + if idx < 0 { + return "" + } + return f[idx] +} + +// GetValue returns the value part of a flag such as "10.0.0.1" for a flag like "--san=10.0.0.1" +func (f Flags) GetValue(s string) string { + fl := f.Get(s) + if fl == "" { + return "" + } + + idx := strings.IndexAny(fl, "= ") + if idx < 0 { + return "" + } + + val := fl[idx+1:] + s, err := strconv.Unquote(val) + if err == nil { + return s + } + + return val +} + +// Delete removes a matching flag from the list +func (f *Flags) Delete(s string) { + idx := f.Index(s) + if idx < 0 { + return + } + *f = append((*f)[:idx], (*f)[idx+1:]...) +} + +// Merge takes the flags from another Flags and adds them to this one unless this already has that flag set +func (f *Flags) Merge(b Flags) { + for _, flag := range b { + f.AddUnlessExist(flag) + } +} + +// MergeOverwrite takes the flags from another Flags and adds or replaces them into this one +func (f *Flags) MergeOverwrite(b Flags) { + for _, flag := range b { + f.AddOrReplace(flag) + } +} + +// MergeAdd takes the flags from another Flags and adds them into this one even if they exist +func (f *Flags) MergeAdd(b Flags) { + for _, flag := range b { + f.Add(flag) + } +} + +// Join creates a string separated by spaces +func (f *Flags) Join() string { + return strings.Join(*f, " ") +} diff --git a/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hook.go b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hook.go new file mode 100644 index 00000000000..6fcfc066b08 --- /dev/null +++ b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hook.go @@ -0,0 +1,12 @@ +package cluster + +// Hooks define a list of hooks such as hooks["apply"]["before"] = ["ls -al", "rm foo.txt"] +type Hooks map[string]map[string][]string + +// ForActionAndStage return hooks for given action and stage +func (h Hooks) ForActionAndStage(action, stage string) []string { + if len(h[action]) > 0 { + return h[action][stage] + } + return nil +} diff --git a/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go new file mode 100644 index 00000000000..cec9c7c3bdc --- /dev/null +++ b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go @@ -0,0 +1,541 @@ +package cluster + +import ( + "encoding/json" + "fmt" + gos "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/avast/retry-go" + "github.com/creasty/defaults" + validation "github.com/go-ozzo/ozzo-validation/v4" + "github.com/go-ozzo/ozzo-validation/v4/is" + "github.com/go-playground/validator/v10" + "github.com/k0sproject/rig" + "github.com/k0sproject/rig/exec" + "github.com/k0sproject/rig/os" + "github.com/k0sproject/rig/os/registry" + "github.com/k0sproject/version" + log "github.com/sirupsen/logrus" +) + +// Host contains all the needed details to work with hosts +type Host struct { + rig.Connection `yaml:",inline"` + + Role string `yaml:"role"` + PrivateInterface string `yaml:"privateInterface,omitempty"` + PrivateAddress string `yaml:"privateAddress,omitempty"` + Environment map[string]string `yaml:"environment,flow,omitempty"` + UploadBinary bool `yaml:"uploadBinary,omitempty"` + K0sBinaryPath string `yaml:"k0sBinaryPath,omitempty"` + InstallFlags Flags `yaml:"installFlags,omitempty"` + Files []*UploadFile `yaml:"files,omitempty"` + OSIDOverride string `yaml:"os,omitempty"` + HostnameOverride string `yaml:"hostname,omitempty"` + NoTaints bool `yaml:"noTaints,omitempty"` + Hooks Hooks `yaml:"hooks,omitempty"` + + UploadBinaryPath string `yaml:"-"` + Metadata HostMetadata `yaml:"-"` + Configurer configurer `yaml:"-"` +} + +func (h *Host) SetDefaults() { + if h.OSIDOverride != "" { + h.OSVersion = &rig.OSVersion{ID: h.OSIDOverride} + } + + _ = defaults.Set(h.Connection) + + if h.InstallFlags.Get("--single") != "" && h.InstallFlags.GetValue("--single") != "false" && h.Role != "single" { + log.Debugf("%s: changed role from '%s' to 'single' because of --single installFlag", h, h.Role) + h.Role = "single" + } + if h.InstallFlags.Get("--enable-worker") != "" && h.InstallFlags.GetValue("--enable-worker") != "false" && h.Role != "controller+worker" { + log.Debugf("%s: changed role from '%s' to 'controller+worker' because of --enable-worker installFlag", h, h.Role) + h.Role = "controller+worker" + } + + if h.InstallFlags.Get("--no-taints") != "" && h.InstallFlags.GetValue("--no-taints") != "false" { + h.NoTaints = true + } +} + +func (h *Host) Validate() error { + // For rig validation + v := validator.New() + if err := v.Struct(h); err != nil { + return err + } + + return validation.ValidateStruct(h, + validation.Field(&h.Role, validation.In("controller", "worker", "controller+worker", "single").Error("unknown role "+h.Role)), + validation.Field(&h.PrivateAddress, is.IP), + validation.Field(&h.Files), + validation.Field(&h.NoTaints, validation.When(h.Role != "controller+worker", validation.NotIn(true).Error("noTaints can only be true for controller+worker role"))), + ) +} + +type configurer interface { + Kind() string + CheckPrivilege(os.Host) error + StartService(os.Host, string) error + StopService(os.Host, string) error + RestartService(os.Host, string) error + ServiceIsRunning(os.Host, string) bool + Arch(os.Host) (string, error) + K0sCmdf(string, ...interface{}) string + K0sBinaryPath() string + K0sBinaryVersion(os.Host) (*version.Version, error) + K0sConfigPath() string + K0sJoinTokenPath() string + WriteFile(os.Host, string, string, string) error + UpdateEnvironment(os.Host, map[string]string) error + DaemonReload(os.Host) error + ReplaceK0sTokenPath(os.Host, string) error + ServiceScriptPath(os.Host, string) (string, error) + ReadFile(os.Host, string) (string, error) + FileExist(os.Host, string) bool + Chmod(os.Host, string, string, ...exec.Option) error + DownloadK0s(os.Host, *version.Version, string) error + DownloadURL(os.Host, string, string, ...exec.Option) error + InstallPackage(os.Host, ...string) error + FileContains(os.Host, string, string) bool + MoveFile(os.Host, string, string) error + MkDir(os.Host, string, ...exec.Option) error + DeleteFile(os.Host, string) error + CommandExist(os.Host, string) bool + Hostname(os.Host) string + KubectlCmdf(string, ...interface{}) string + KubeconfigPath() string + IsContainer(os.Host) bool + FixContainer(os.Host) error + HTTPStatus(os.Host, string) (int, error) + PrivateInterface(os.Host) (string, error) + PrivateAddress(os.Host, string, string) (string, error) + TempDir(os.Host) (string, error) + TempFile(os.Host) (string, error) + UpdateServiceEnvironment(os.Host, string, map[string]string) error + CleanupServiceEnvironment(os.Host, string) error + Stat(os.Host, string, ...exec.Option) (*os.FileInfo, error) + Touch(os.Host, string, time.Time, ...exec.Option) error + DeleteDir(os.Host, string, ...exec.Option) error + K0sctlLockFilePath(os.Host) string + UpsertFile(os.Host, string, string) error +} + +// HostMetadata resolved metadata for host +type HostMetadata struct { + K0sBinaryVersion string + K0sRunningVersion string + Arch string + IsK0sLeader bool + Hostname string + Ready bool + NeedsUpgrade bool +} + +// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml +func (h *Host) UnmarshalYAML(unmarshal func(interface{}) error) error { + type host Host + yh := (*host)(h) + + yh.Environment = make(map[string]string) + + if err := unmarshal(yh); err != nil { + return err + } + + return defaults.Set(h) +} + +// Address returns an address for the host +func (h *Host) Address() string { + if h.SSH != nil { + return h.SSH.Address + } + + if h.WinRM != nil { + return h.WinRM.Address + } + + return "127.0.0.1" +} + +// Protocol returns host communication protocol +func (h *Host) Protocol() string { + if h.SSH != nil { + return "ssh" + } + + if h.WinRM != nil { + return "winrm" + } + + if h.Localhost != nil { + return "local" + } + + return "nil" +} + +// ResolveConfigurer assigns a rig-style configurer to the Host (see configurer/) +func (h *Host) ResolveConfigurer() error { + bf, err := registry.GetOSModuleBuilder(*h.OSVersion) + if err != nil { + return err + } + + if c, ok := bf().(configurer); ok { + h.Configurer = c + + return nil + } + + return fmt.Errorf("unsupported OS") +} + +// K0sJoinTokenPath returns the token file path from install flags or configurer +func (h *Host) K0sJoinTokenPath() string { + if path := h.InstallFlags.GetValue("--token-file"); path != "" { + return path + } + + return h.Configurer.K0sJoinTokenPath() +} + +// K0sConfigPath returns the config file path from install flags or configurer +func (h *Host) K0sConfigPath() string { + if path := h.InstallFlags.GetValue("--config"); path != "" { + return path + } + + if path := h.InstallFlags.GetValue("-c"); path != "" { + return path + } + + return h.Configurer.K0sConfigPath() +} + +// unquote + unescape a string +func unQE(s string) string { + unq, err := strconv.Unquote(s) + if err != nil { + return s + } + + c := string(s[0]) // string was quoted, c now has the quote char + re := regexp.MustCompile(fmt.Sprintf(`(?:^|[^\\])\\%s`, c)) // replace \" with " (remove escaped quotes inside quoted string) + return string(re.ReplaceAllString(unq, c)) +} + +// K0sInstallCommand returns a full command that will install k0s service with necessary flags +func (h *Host) K0sInstallCommand() string { + role := h.Role + flags := h.InstallFlags + + switch role { + case "controller+worker": + role = "controller" + flags.AddUnlessExist("--enable-worker") + if h.NoTaints { + flags.AddUnlessExist("--no-taints") + } + case "single": + role = "controller" + flags.AddUnlessExist("--single") + } + + if !h.Metadata.IsK0sLeader { + flags.AddUnlessExist(fmt.Sprintf(`--token-file "%s"`, h.K0sJoinTokenPath())) + } + + if h.IsController() { + flags.AddUnlessExist(fmt.Sprintf(`--config "%s"`, h.K0sConfigPath())) + } + + if strings.HasSuffix(h.Role, "worker") { + var extra Flags + if old := flags.GetValue("--kubelet-extra-args"); old != "" { + extra = Flags{unQE(old)} + } + // set worker's private address to --node-ip in --extra-kubelet-args + if h.PrivateAddress != "" { + extra.AddUnlessExist(fmt.Sprintf("--node-ip=%s", h.PrivateAddress)) + } + if h.HostnameOverride != "" { + extra.AddOrReplace(fmt.Sprintf("--hostname-override=%s", h.HostnameOverride)) + } + if extra != nil { + flags.AddOrReplace(fmt.Sprintf("--kubelet-extra-args=%s", strconv.Quote(extra.Join()))) + } + } + + cmd := h.Configurer.K0sCmdf("install %s %s", role, flags.Join()) + sudocmd, err := h.Sudo(cmd) + if err != nil { + log.Warnf("%s: %s", h, err.Error()) + return cmd + } + return sudocmd +} + +// K0sBackupCommand returns a full command to be used as run k0s backup +func (h *Host) K0sBackupCommand(targetDir string) string { + return h.Configurer.K0sCmdf("backup --save-path %s", targetDir) +} + +// K0sRestoreCommand returns a full command to restore cluster state from a backup +func (h *Host) K0sRestoreCommand(backupfile string) string { + return h.Configurer.K0sCmdf("restore %s", backupfile) +} + +// IsController returns true for controller and controller+worker roles +func (h *Host) IsController() bool { + return h.Role == "controller" || h.Role == "controller+worker" || h.Role == "single" +} + +// K0sServiceName returns correct service name +func (h *Host) K0sServiceName() string { + switch h.Role { + case "controller", "controller+worker", "single": + return "k0scontroller" + default: + return "k0sworker" + } +} + +// UpdateK0sBinary updates the binary on the host either by downloading or uploading, based on the config +func (h *Host) UpdateK0sBinary(version *version.Version) error { + if h.UploadBinaryPath != "" { + if err := h.Upload(h.UploadBinaryPath, h.Configurer.K0sBinaryPath(), exec.Sudo(h)); err != nil { + return err + } + if err := h.Configurer.Chmod(h, h.Configurer.K0sBinaryPath(), "0700", exec.Sudo(h)); err != nil { + return err + } + } else { + if err := h.Configurer.DownloadK0s(h, version, h.Metadata.Arch); err != nil { + return err + } + } + + updatedVersion, err := h.Configurer.K0sBinaryVersion(h) + if err != nil { + return fmt.Errorf("failed to get updated k0s binary version: %w", err) + } + if !version.Equal(updatedVersion) { + return fmt.Errorf("updated k0s binary version is %s not %s", updatedVersion, version) + } + + h.Metadata.K0sBinaryVersion = version.String() + + return nil +} + +type kubeNodeStatus struct { + Items []struct { + Status struct { + Conditions []struct { + Status string `json:"status"` + Type string `json:"type"` + } `json:"conditions"` + } `json:"status"` + } `json:"items"` +} + +// KubeNodeReady runs kubectl on the host and returns true if the given node is marked as ready +func (h *Host) KubeNodeReady(node *Host) (bool, error) { + output, err := h.ExecOutput(h.Configurer.KubectlCmdf("get node -l kubernetes.io/hostname=%s -o json", node.Metadata.Hostname), exec.HideOutput(), exec.Sudo(h)) + if err != nil { + return false, err + } + log.Tracef("node status output:\n%s\n", output) + status := kubeNodeStatus{} + if err := json.Unmarshal([]byte(output), &status); err != nil { + return false, fmt.Errorf("failed to decode kubectl output: %s", err.Error()) + } + for _, i := range status.Items { + for _, c := range i.Status.Conditions { + log.Debugf("%s: node status condition %s = %s", node, c.Type, c.Status) + if c.Type == "Ready" { + return c.Status == "True", nil + } + } + } + + log.Debugf("%s: failed to find Ready=True state in kubectl output", node) + return false, nil +} + +// WaitKubeNodeReady blocks until node becomes ready. TODO should probably use Context +func (h *Host) WaitKubeNodeReady(node *Host) error { + return retry.Do( + func() error { + status, err := h.KubeNodeReady(node) + if err != nil { + return err + } + if !status { + return fmt.Errorf("%s: node %s status not reported as ready", h, node.Metadata.Hostname) + } + return nil + }, + retry.DelayType(retry.CombineDelay(retry.FixedDelay, retry.RandomDelay)), + retry.MaxJitter(time.Second*2), + retry.Delay(time.Second*3), + retry.Attempts(120), + retry.LastErrorOnly(true), + ) +} + +// DrainNode drains the given node +func (h *Host) DrainNode(node *Host) error { + return h.Exec(h.Configurer.KubectlCmdf("drain --grace-period=120 --force --timeout=5m --ignore-daemonsets --delete-local-data %s", node.Metadata.Hostname), exec.Sudo(h)) +} + +// UncordonNode marks the node schedulable again +func (h *Host) UncordonNode(node *Host) error { + return h.Exec(h.Configurer.KubectlCmdf("uncordon %s", node.Metadata.Hostname), exec.Sudo(h)) +} + +// CheckHTTPStatus will perform a web request to the url and return an error if the http status is not the expected +func (h *Host) CheckHTTPStatus(url string, expected ...int) error { + status, err := h.Configurer.HTTPStatus(h, url) + if err != nil { + return err + } + + for _, e := range expected { + if status == e { + return nil + } + } + + return fmt.Errorf("expected response code %d but received %d", expected, status) + +} + +// WaitHTTPStatus waits until http status received for a GET from the URL is the expected one +func (h *Host) WaitHTTPStatus(url string, expected ...int) error { + return retry.Do( + func() error { + return h.CheckHTTPStatus(url, expected...) + }, + retry.DelayType(retry.CombineDelay(retry.FixedDelay, retry.RandomDelay)), + retry.MaxJitter(time.Second*2), + retry.Delay(time.Second*3), + retry.Attempts(60), + retry.LastErrorOnly(true), + ) +} + +// WaitK0sServiceRunning blocks until the k0s service is running on the host +func (h *Host) WaitK0sServiceRunning() error { + return retry.Do( + func() error { + if !h.Configurer.ServiceIsRunning(h, h.K0sServiceName()) { + return fmt.Errorf("not running") + } + return h.Exec(h.Configurer.K0sCmdf("status"), exec.Sudo(h)) + }, + retry.DelayType(retry.CombineDelay(retry.FixedDelay, retry.RandomDelay)), + retry.MaxJitter(time.Second*2), + retry.Delay(time.Second*3), + retry.Attempts(60), + retry.LastErrorOnly(true), + ) +} + +// WaitK0sServiceStopped blocks until the k0s service is no longer running on the host +func (h *Host) WaitK0sServiceStopped() error { + return retry.Do( + func() error { + if h.Configurer.ServiceIsRunning(h, h.K0sServiceName()) { + return fmt.Errorf("k0s still running") + } + if h.Exec(h.Configurer.K0sCmdf("status"), exec.Sudo(h)) == nil { + return fmt.Errorf("k0s still running") + } + return nil + }, + retry.DelayType(retry.CombineDelay(retry.FixedDelay, retry.RandomDelay)), + retry.MaxJitter(time.Second*2), + retry.Delay(time.Second*3), + retry.Attempts(60), + retry.LastErrorOnly(true), + ) +} + +// NeedCurl returns true when the curl package is needed on the host +func (h *Host) NeedCurl() bool { + // Windows does not need any packages for web requests + if h.Configurer.Kind() == "windows" { + return false + } + + return !h.Configurer.CommandExist(h, "curl") +} + +// NeedIPTables returns true when the iptables package is needed on the host +func (h *Host) NeedIPTables() bool { + // Windows does not need iptables + if h.Configurer.Kind() == "windows" { + return false + } + + // Controllers do not need iptables + if h.IsController() { + return false + } + + return !h.Configurer.CommandExist(h, "iptables") +} + +// NeedInetUtils returns true when the inetutils package is needed on the host to run `hostname`. +func (h *Host) NeedInetUtils() bool { + // Windows does not need inetutils + if h.Configurer.Kind() == "windows" { + return false + } + + return !h.Configurer.CommandExist(h, "hostname") +} + +// WaitKubeAPIReady blocks until the local kube api responds to /version +func (h *Host) WaitKubeAPIReady(port int) error { + // If the anon-auth is disabled on kube api the version endpoint will give 401 + // thus we need to accept both 200 and 401 as valid statuses when checking kube api + return h.WaitHTTPStatus(fmt.Sprintf("https://localhost:%d/version", port), 200, 401) +} + +// FileChanged returns true when a remote file has different size or mtime compared to local +// or if an error occurs +func (h *Host) FileChanged(lpath, rpath string) bool { + lstat, err := gos.Stat(lpath) + if err != nil { + log.Debugf("%s: local stat failed: %s", h, err) + return true + } + rstat, err := h.Configurer.Stat(h, rpath, exec.Sudo(h)) + if err != nil { + log.Debugf("%s: remote stat failed: %s", h, err) + return true + } + + if lstat.Size() != rstat.Size() { + log.Debugf("%s: file sizes for %s differ (%d vs %d)", h, lpath, lstat.Size(), rstat.Size()) + return true + } + + if !lstat.ModTime().Equal(rstat.ModTime()) { + log.Debugf("%s: file modtimes for %s differ (%s vs %s)", h, lpath, lstat.ModTime(), rstat.ModTime()) + return true + } + + return false +} diff --git a/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hosts.go b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hosts.go new file mode 100644 index 00000000000..8f515a7ddd5 --- /dev/null +++ b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hosts.go @@ -0,0 +1,136 @@ +package cluster + +import ( + "fmt" + "strings" + "sync" +) + +//Hosts are destnation hosts +type Hosts []*Host + +func (hosts Hosts) Validate() error { + if len(hosts) == 0 { + return fmt.Errorf("at least one host required") + } + + if len(hosts) > 1 { + hostmap := make(map[string]struct{}, len(hosts)) + for idx, h := range hosts { + if err := h.Validate(); err != nil { + return fmt.Errorf("host #%d: %v", idx+1, err) + } + if h.Role == "single" { + return fmt.Errorf("%d hosts defined but includes a host with role 'single': %s", len(hosts), h) + } + if _, ok := hostmap[h.String()]; ok { + return fmt.Errorf("%s: is not unique", h) + } + hostmap[h.String()] = struct{}{} + } + } + + if len(hosts.Controllers()) < 1 { + return fmt.Errorf("no hosts with a controller role defined") + } + + return nil +} + +// First returns the first host +func (hosts Hosts) First() *Host { + if len(hosts) == 0 { + return nil + } + return (hosts)[0] +} + +// Last returns the last host +func (hosts Hosts) Last() *Host { + c := len(hosts) - 1 + + if c < 0 { + return nil + } + + return hosts[c] +} + +// Find returns the first matching Host. The finder function should return true for a Host matching the criteria. +func (hosts Hosts) Find(filter func(h *Host) bool) *Host { + for _, h := range hosts { + if filter(h) { + return (h) + } + } + return nil +} + +// Filter returns a filtered list of Hosts. The filter function should return true for hosts matching the criteria. +func (hosts Hosts) Filter(filter func(h *Host) bool) Hosts { + result := make(Hosts, 0, len(hosts)) + + for _, h := range hosts { + if filter(h) { + result = append(result, h) + } + } + + return result +} + +// WithRole returns a ltered list of Hosts that have the given role +func (hosts Hosts) WithRole(s string) Hosts { + return hosts.Filter(func(h *Host) bool { + return h.Role == s + }) +} + +// Controllers returns hosts with the role "controller" +func (hosts Hosts) Controllers() Hosts { + return hosts.Filter(func(h *Host) bool { return h.IsController() }) +} + +// Workers returns hosts with the role "worker" +func (hosts Hosts) Workers() Hosts { + return hosts.WithRole("worker") +} + +// ParallelEach runs a function (or multiple functions chained) on every Host parallelly. +// Any errors will be concatenated and returned. +func (hosts Hosts) ParallelEach(filter ...func(h *Host) error) error { + var wg sync.WaitGroup + var errors []string + type erritem struct { + address string + err error + } + ec := make(chan erritem, 1) + + for _, f := range filter { + wg.Add(len(hosts)) + + for _, h := range hosts { + go func(h *Host) { + ec <- erritem{h.String(), f(h)} + }(h) + } + + go func() { + for e := range ec { + if e.err != nil { + errors = append(errors, fmt.Sprintf("%s: %s", e.address, e.err.Error())) + } + wg.Done() + } + }() + + wg.Wait() + } + + if len(errors) > 0 { + return fmt.Errorf("failed on %d hosts:\n - %s", len(errors), strings.Join(errors, "\n - ")) + } + + return nil +} diff --git a/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go new file mode 100644 index 00000000000..a65705f4894 --- /dev/null +++ b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go @@ -0,0 +1,216 @@ +package cluster + +import ( + "compress/gzip" + "encoding/base64" + "fmt" + "io" + "strings" + "time" + + "github.com/Masterminds/semver" + "github.com/alessio/shellescape" + "github.com/avast/retry-go" + "github.com/creasty/defaults" + validation "github.com/go-ozzo/ozzo-validation/v4" + "github.com/k0sproject/dig" + k0sctl "github.com/k0sproject/k0sctl/version" + "github.com/k0sproject/rig/exec" + "github.com/k0sproject/version" + "gopkg.in/yaml.v2" +) + +// K0sMinVersion is the minimum k0s version supported +const K0sMinVersion = "0.11.0-rc1" + +// K0s holds configuration for bootstraping a k0s cluster +type K0s struct { + Version string `yaml:"version"` + DynamicConfig bool `yaml:"dynamicConfig"` + Config dig.Mapping `yaml:"config,omitempty"` + Metadata K0sMetadata `yaml:"-"` +} + +// K0sMetadata contains gathered information about k0s cluster +type K0sMetadata struct { + ClusterID string + VersionDefaulted bool +} + +// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml +func (k *K0s) UnmarshalYAML(unmarshal func(interface{}) error) error { + type k0s K0s + yk := (*k0s)(k) + + if err := unmarshal(yk); err != nil { + return err + } + + return defaults.Set(k) +} + +const k0sDynamicSince = "1.22.2+k0s.2" + +func validateVersion(value interface{}) error { + vs, ok := value.(string) + if !ok { + return fmt.Errorf("not a string") + } + + v, err := version.NewVersion(vs) + if err != nil { + return err + } + + min, err := version.NewVersion(K0sMinVersion) + if err != nil { + return fmt.Errorf("internal error: k0sminversion can't be parsed: %s", err) + } + + if v.LessThan(min) { + return fmt.Errorf("version: minimum supported k0s version is %s", K0sMinVersion) + } + + return nil +} + +func (k *K0s) Validate() error { + return validation.ValidateStruct(k, + validation.Field(&k.Version, validation.Required), + validation.Field(&k.Version, validation.By(validateVersion)), + validation.Field(&k.DynamicConfig, validation.By(k.validateMinDynamic())), + ) +} + +func (k *K0s) validateMinDynamic() func(interface{}) error { + return func(value interface{}) error { + dc, ok := value.(bool) + if !ok { + return fmt.Errorf("not a boolean") + } + if !dc { + return nil + } + v, err := semver.NewVersion(k.Version) + if err != nil { + return fmt.Errorf("failed to parse k0s version: %w", err) + } + dynamicSince, _ := semver.NewVersion(k0sDynamicSince) + if v.LessThan(dynamicSince) { + return fmt.Errorf("dynamic config only available since k0s version %s", k0sDynamicSince) + } + return nil + } +} + +// SetDefaults (implements defaults Setter interface) defaults the version to latest k0s version +func (k *K0s) SetDefaults() { + if k.Version != "" { + return + } + + latest, err := version.LatestByPrerelease(k0sctl.IsPre() || k0sctl.Version == "0.0.0") + if err == nil { + k.Version = latest.String() + k.Metadata.VersionDefaulted = true + } + + k.Version = strings.TrimPrefix(k.Version, "v") +} + +func (k *K0s) NodeConfig() dig.Mapping { + return dig.Mapping{ + "apiVersion": k.Config.DigString("apiVersion"), + "kind": k.Config.DigString("kind"), + "Metadata": dig.Mapping{ + "name": k.Config.DigMapping("metadata")["name"], + }, + "spec": dig.Mapping{ + "api": k.Config.DigMapping("spec", "api"), + "storage": k.Config.DigMapping("spec", "storage"), + }, + } +} + +// GenerateToken runs the k0s token create command +func (k K0s) GenerateToken(h *Host, role string, expiry time.Duration) (string, error) { + var k0sFlags Flags + k0sFlags.Add(fmt.Sprintf("--role %s", role)) + k0sFlags.Add(fmt.Sprintf("--expiry %s", expiry)) + + out, err := h.ExecOutput(h.Configurer.K0sCmdf("token create --help"), exec.Sudo(h)) + if err == nil && strings.Contains(out, "--config") { + k0sFlags.Add(fmt.Sprintf("--config %s", shellescape.Quote(h.K0sConfigPath()))) + } + + var token string + err = retry.Do( + func() error { + output, err := h.ExecOutput(h.Configurer.K0sCmdf("token create %s", k0sFlags.Join()), exec.HideOutput(), exec.Sudo(h)) + if err != nil { + return err + } + token = output + return nil + }, + retry.DelayType(retry.CombineDelay(retry.FixedDelay, retry.RandomDelay)), + retry.MaxJitter(time.Second*2), + retry.Delay(time.Second*3), + retry.Attempts(60), + retry.LastErrorOnly(true), + ) + return token, err +} + +// GetClusterID uses kubectl to fetch the kube-system namespace uid +func (k K0s) GetClusterID(h *Host) (string, error) { + return h.ExecOutput(h.Configurer.KubectlCmdf("get -n kube-system namespace kube-system -o template={{.metadata.uid}}"), exec.Sudo(h)) +} + +// TokenID returns a token id from a token string that can be used to invalidate the token +func TokenID(s string) (string, error) { + b64 := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + _, err := base64.StdEncoding.Decode(b64, []byte(s)) + if err != nil { + return "", fmt.Errorf("failed to decode token: %w", err) + } + + sr := strings.NewReader(s) + b64r := base64.NewDecoder(base64.StdEncoding, sr) + gzr, err := gzip.NewReader(b64r) + if err != nil { + return "", fmt.Errorf("failed to create a reader for token: %w", err) + } + defer gzr.Close() + + c, err := io.ReadAll(gzr) + if err != nil { + return "", fmt.Errorf("failed to uncompress token: %w", err) + } + cfg := dig.Mapping{} + err = yaml.Unmarshal(c, &cfg) + if err != nil { + return "", fmt.Errorf("failed to unmarshal token: %w", err) + } + + users, ok := cfg.Dig("users").([]interface{}) + if !ok || len(users) < 1 { + return "", fmt.Errorf("failed to find users in token") + } + + user, ok := users[0].(dig.Mapping) + if !ok { + return "", fmt.Errorf("failed to find user in token") + } + + token, ok := user.Dig("user", "token").(string) + if !ok { + return "", fmt.Errorf("failed to find user token in token") + } + + idx := strings.IndexRune(token, '.') + if idx < 0 { + return "", fmt.Errorf("failed to find separator in token") + } + return token[0:idx], nil +} diff --git a/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec.go b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec.go new file mode 100644 index 00000000000..19f19f67c1c --- /dev/null +++ b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec.go @@ -0,0 +1,82 @@ +package cluster + +import ( + "fmt" + + "github.com/creasty/defaults" + validation "github.com/go-ozzo/ozzo-validation/v4" +) + +// Spec defines cluster config spec section +type Spec struct { + Hosts Hosts `yaml:"hosts"` + K0s *K0s `yaml:"k0s"` + + k0sLeader *Host +} + +// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml +func (s *Spec) UnmarshalYAML(unmarshal func(interface{}) error) error { + type spec Spec + ys := (*spec)(s) + ys.K0s = &K0s{} + + if err := unmarshal(ys); err != nil { + return err + } + + return defaults.Set(s) +} + +// K0sLeader returns a controller host that is selected to be a "leader", +// or an initial node, a node that creates join tokens for other controllers. +func (s *Spec) K0sLeader() *Host { + if s.k0sLeader == nil { + controllers := s.Hosts.Controllers() + + // Pick the first controller that reports to be running and persist the choice + for _, h := range controllers { + if h.Metadata.K0sBinaryVersion != "" && h.Metadata.K0sRunningVersion != "" { + s.k0sLeader = h + break + } + } + + // Still nil? Fall back to first "controller" host, do not persist selection. + if s.k0sLeader == nil { + return controllers.First() + } + } + + return s.k0sLeader +} + +func (s *Spec) Validate() error { + return validation.ValidateStruct(s, + validation.Field(&s.Hosts, validation.Required), + validation.Field(&s.Hosts), + validation.Field(&s.K0s), + ) +} + +// KubeAPIURL returns an url to the cluster's kube api +func (s *Spec) KubeAPIURL() string { + var caddr string + if a := s.K0s.Config.DigString("spec", "api", "externalAddress"); a != "" { + caddr = a + } else { + leader := s.K0sLeader() + if leader.PrivateAddress != "" { + caddr = leader.PrivateAddress + } else { + caddr = leader.Address() + } + } + + cport := 6443 + if p, ok := s.K0s.Config.Dig("spec", "api", "port").(int); ok { + cport = p + } + + return fmt.Sprintf("https://%s:%d", caddr, cport) +} diff --git a/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/uploadfile.go b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/uploadfile.go new file mode 100644 index 00000000000..bae6e2e0511 --- /dev/null +++ b/vendor/github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/uploadfile.go @@ -0,0 +1,213 @@ +package cluster + +import ( + "fmt" + "os" + "path" + "strconv" + "strings" + + "github.com/bmatcuk/doublestar/v4" + validation "github.com/go-ozzo/ozzo-validation/v4" + log "github.com/sirupsen/logrus" +) + +type LocalFile struct { + Path string + PermMode string +} + +// UploadFile describes a file to be uploaded for the host +type UploadFile struct { + Name string `yaml:"name,omitempty"` + Source string `yaml:"src"` + DestinationDir string `yaml:"dstDir"` + DestinationFile string `yaml:"dst"` + PermMode interface{} `yaml:"perm"` + DirPermMode interface{} `yaml:"dirPerm"` + User string `yaml:"user"` + Group string `yaml:"group"` + PermString string `yaml:"-"` + DirPermString string `yaml:"-"` + Sources []*LocalFile `yaml:"-"` + Base string `yaml:"-"` +} + +func (u UploadFile) Validate() error { + return validation.ValidateStruct(&u, + validation.Field(&u.Source, validation.Required), + validation.Field(&u.DestinationFile, validation.Required.When(u.DestinationDir == "").Error("dst or dstdir required")), + validation.Field(&u.DestinationDir, validation.Required.When(u.DestinationFile == "").Error("dst or dstdir required")), + ) +} + +// converts string or integer value to octal string for chmod +func permToString(val interface{}) (string, error) { + var s string + switch t := val.(type) { + case int, float64: + var num int + if n, ok := t.(float64); ok { + num = int(n) + } else { + num = t.(int) + } + + if num < 0 { + return s, fmt.Errorf("invalid permission: %d: must be a positive value", num) + } + if num == 0 { + return s, fmt.Errorf("invalid nil permission") + } + s = fmt.Sprintf("%#o", num) + case string: + s = t + default: + return "", nil + } + + for i, c := range s { + n, err := strconv.Atoi(string(c)) + if err != nil { + return s, fmt.Errorf("failed to parse permission %s: %w", s, err) + } + + // These could catch some weird octal conversion mistakes + if i == 1 && n < 4 { + return s, fmt.Errorf("invalid permission %s: owner would have unconventional access", s) + } + if n > 7 { + return s, fmt.Errorf("invalid permission %s: octal value can't have numbers over 7", s) + } + } + + return s, nil +} + +// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml +func (u *UploadFile) UnmarshalYAML(unmarshal func(interface{}) error) error { + type uploadFile UploadFile + yu := (*uploadFile)(u) + + if err := unmarshal(yu); err != nil { + return err + } + + fp, err := permToString(u.PermMode) + if err != nil { + return err + } + u.PermString = fp + + dp, err := permToString(u.DirPermMode) + if err != nil { + return err + } + u.DirPermString = dp + + return u.resolve() +} + +// String returns the file bundle name or if it is empty, the source. +func (u *UploadFile) String() string { + if u.Name == "" { + return u.Source + } + return u.Name +} + +// Owner returns a chown compatible user:group string from User and Group, or empty when neither are set. +func (u *UploadFile) Owner() string { + return strings.TrimSuffix(fmt.Sprintf("%s:%s", u.User, u.Group), ":") +} + +// returns true if the string contains any glob characters +func isGlob(s string) bool { + return strings.ContainsAny(s, "*%?[]{}") +} + +// sets the destination and resolves any globs/local paths into u.Sources +func (u *UploadFile) resolve() error { + if u.IsURL() { + if u.DestinationFile == "" { + if u.DestinationDir != "" { + u.DestinationFile = path.Join(u.DestinationDir, path.Base(u.Source)) + } else { + u.DestinationFile = path.Base(u.Source) + } + } + return nil + } + + if isGlob(u.Source) { + return u.glob(u.Source) + } + + stat, err := os.Stat(u.Source) + if err != nil { + return fmt.Errorf("failed to stat local path for %s: %w", u, err) + } + + if stat.IsDir() { + log.Tracef("source %s is a directory, assuming %s/**/*", u.Source, u.Source) + return u.glob(path.Join(u.Source, "**/*")) + } + + perm := u.PermString + if perm == "" { + perm = fmt.Sprintf("%o", stat.Mode()) + } + u.Base = path.Dir(u.Source) + u.Sources = []*LocalFile{ + {Path: path.Base(u.Source), PermMode: perm}, + } + + return nil +} + +// finds files based on a glob pattern +func (u *UploadFile) glob(src string) error { + base, pattern := doublestar.SplitPattern(src) + u.Base = base + fsys := os.DirFS(base) + sources, err := doublestar.Glob(fsys, pattern) + if err != nil { + return err + } + + for _, s := range sources { + abs := path.Join(base, s) + log.Tracef("glob %s found: %s", abs, s) + stat, err := os.Stat(abs) + if err != nil { + return fmt.Errorf("failed to stat file %s: %w", u, err) + } + + if stat.IsDir() { + log.Tracef("%s is a directory", abs) + continue + } + + perm := u.PermString + if perm == "" { + perm = fmt.Sprintf("%o", stat.Mode()) + } + + u.Sources = append(u.Sources, &LocalFile{Path: s, PermMode: perm}) + } + + if len(u.Sources) == 0 { + return fmt.Errorf("no files found for %s", u) + } + + if u.DestinationFile != "" && len(u.Sources) > 1 { + return fmt.Errorf("found multiple files for %s but single file dst %s defined", u, u.DestinationFile) + } + + return nil +} + +// IsURL returns true if the source is a URL +func (u *UploadFile) IsURL() bool { + return strings.Contains(u.Source, "://") +} diff --git a/vendor/github.com/k0sproject/k0sctl/version/version.go b/vendor/github.com/k0sproject/k0sctl/version/version.go new file mode 100644 index 00000000000..b39d965553a --- /dev/null +++ b/vendor/github.com/k0sproject/k0sctl/version/version.go @@ -0,0 +1,17 @@ +package version + +import "strings" + +var ( + // Version of the product, is set during the build + Version = "0.0.0" + // GitCommit is set during the build + GitCommit = "HEAD" + // Environment of the product, is set during the build + Environment = "development" +) + +// IsPre is true when the current version is a prerelease +func IsPre() bool { + return strings.Contains(Version, "-") +} diff --git a/vendor/github.com/k0sproject/rig/LICENSE b/vendor/github.com/k0sproject/rig/LICENSE new file mode 100644 index 00000000000..ed0039e2454 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2021 Mirantis, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/k0sproject/rig/README.md b/vendor/github.com/k0sproject/rig/README.md new file mode 100644 index 00000000000..d368793c27e --- /dev/null +++ b/vendor/github.com/k0sproject/rig/README.md @@ -0,0 +1,76 @@ +### Rig + +A golang package for adding multi-protocol connectivity and multi-os operation functionality to your application's Host objects. + +#### Design goals + +Rig's intention is to be easy to use and extend. + +It should be easy to add support for new operating systems and to add new commands to the multi-os support mechanism without breaking go's type checking. + +All of the relevant structs have YAML tags and default values to make unmarshaling from YAML configurations as easy as possible. + +#### Protocols + +Currently rig comes with the most common ways to connect to hosts: +- SSH for connecting to hosts that accept SSH connections +- WinRM as an alternative to SSH for windows hosts (SSH works too) +- Local for treating the localhost as it was one of the remote hosts + +#### Usage + +The intended way to use rig is to embed the `rig.Connection` struct into your own. + +Example: + +```go +package main + +import "github.com/k0sproject/rig" + +type host struct { + rig.Connection +} + +func main() { + h := host{ + connection: rig.Connection{ + SSH: &rig.SSH{ + Address: 10.0.0.1 + } + } + } + + if err := h.Connect(); err != nil { + panic(err) + } + + output, err := h.ExecOutput("ls -al") + if err != nil { + panic(err) + } + println(output) +} +``` + +But of course you can use it directly on its own too: + +```go +package main + +import "github.com/k0sproject/rig" + +func main() { + h := rig.Connection{ + SSH: &rig.SSH{ + Address: 10.0.0.1 + } + } + + if err := h.Connect(); err != nil { + panic(err) + } +} +``` + +See more usage examples in the [examples/](examples/) directory. diff --git a/vendor/github.com/k0sproject/rig/connection.go b/vendor/github.com/k0sproject/rig/connection.go new file mode 100644 index 00000000000..55c7c10fdfa --- /dev/null +++ b/vendor/github.com/k0sproject/rig/connection.go @@ -0,0 +1,334 @@ +// Package rig provides an easy way to add multi-protocol connectivity and +// multi-os operation support to your application's Host objects +package rig + +import ( + "fmt" + "strings" + + "github.com/alessio/shellescape" + "github.com/creasty/defaults" + "github.com/google/shlex" + "github.com/k0sproject/rig/exec" +) + +type rigError struct { + Connection *Connection +} + +// NotConnectedError is returned when attempting to perform remote operations +// on Host when it is not connected +type NotConnectedError rigError + +// Error returns the error message +func (e *NotConnectedError) Error() string { return e.Connection.String() + ": not connected" } + +type client interface { + Connect() error + Disconnect() + Upload(source, destination string, opts ...exec.Option) error + IsWindows() bool + Exec(string, ...exec.Option) error + ExecInteractive(string) error + String() string + Protocol() string + IPAddress() string + IsConnected() bool +} + +// Connection is a Struct you can embed into your application's "Host" types +// to give them multi-protocol connectivity. +// +// All of the important fields have YAML tags. +// +// If you have a host like this: +// +// type Host struct { +// rig.Connection `yaml:"connection"` +// } +// +// and a YAML like this: +// +// hosts: +// - connection: +// ssh: +// address: 10.0.0.1 +// port: 8022 +// +// you can then simply do this: +// +// var hosts []*Host +// if err := yaml.Unmarshal(data, &hosts); err != nil { +// panic(err) +// } +// for _, h := range hosts { +// err := h.Connect() +// if err != nil { +// panic(err) +// } +// output, err := h.ExecOutput("echo hello") +// } +type Connection struct { + WinRM *WinRM `yaml:"winRM,omitempty"` + SSH *SSH `yaml:"ssh,omitempty"` + Localhost *Localhost `yaml:"localhost,omitempty"` + + OSVersion *OSVersion `yaml:"-"` + + client client `yaml:"-"` + sudofunc func(string) string +} + +// SetDefaults sets a connection +func (c *Connection) SetDefaults() { + if c.client == nil { + c.client = c.configuredClient() + if c.client == nil { + c.client = defaultClient() + } + } + + _ = defaults.Set(c.client) +} + +// Protocol returns the connection protocol name +func (c *Connection) Protocol() string { + if c.client != nil { + return c.client.Protocol() + } + + if client := c.configuredClient(); client != nil { + return client.Protocol() + } + + return "" +} + +// Address returns the connection address +func (c *Connection) Address() string { + if c.client != nil { + return c.client.IPAddress() + } + + if client := c.configuredClient(); client != nil { + return client.IPAddress() + } + + return "" +} + +// IsConnected returns true if the client is assumed to be connected. +// "Assumed" - as in `Connect()` has been called and no error was returned. +// The underlying client may actually have disconnected and has become +// inoperable, but rig won't know that until you try to execute commands on +// the connection. +func (c *Connection) IsConnected() bool { + if c.client == nil { + return false + } + + return c.client.IsConnected() +} + +// String returns a printable representation of the connection, which will look +// like: `[ssh] address:port` +func (c Connection) String() string { + client := c.client + if client == nil { + client = c.configuredClient() + _ = defaults.Set(c) + } + if client == nil { + client = defaultClient() + } + + return client.String() +} + +// IsWindows returns true on windows hosts +func (c *Connection) IsWindows() bool { + if !c.IsConnected() { + if client := c.configuredClient(); client != nil { + return client.IsWindows() + } + } + return c.client.IsWindows() +} + +// Exec runs a command on the host +func (c Connection) Exec(cmd string, opts ...exec.Option) error { + if !c.IsConnected() { + return &NotConnectedError{&c} + } + + return c.client.Exec(cmd, opts...) +} + +// ExecOutput runs a command on the host and returns the output as a String +func (c Connection) ExecOutput(cmd string, opts ...exec.Option) (string, error) { + if !c.IsConnected() { + return "", &NotConnectedError{&c} + } + + var output string + opts = append(opts, exec.Output(&output)) + err := c.Exec(cmd, opts...) + return strings.TrimSpace(output), err +} + +// Connect to the host and identify the operating system and sudo capability +func (c *Connection) Connect() error { + if c.client == nil { + _ = defaults.Set(c) + } + + if err := c.client.Connect(); err != nil { + c.client = nil + return err + } + + if c.OSVersion == nil { + o, err := GetOSVersion(c) + if err != nil { + return err + } + c.OSVersion = &o + } + + c.configureSudo() + + return nil +} + +func (c *Connection) configureSudo() { + switch c.OSVersion.ID { + case "windows": + c.sudofunc = func(cmd string) string { + return "runas /user:Administrator " + cmd + } + default: + if c.Exec(`[ "$(id -u)" = 0 ]`) == nil { + c.sudofunc = func(cmd string) string { + return cmd + } + } else if c.Exec("sudo -n true") == nil { + c.sudofunc = func(cmd string) string { + parts, err := shlex.Split(cmd) + if err != nil { + return "sudo -s -- " + cmd + } + + var idx int + for i, p := range parts { + if strings.Contains(p, "=") { + idx = i + 1 + continue + } + break + } + + if idx == 0 { + return "sudo -s -- " + cmd + } + + for i, p := range parts { + parts[i] = shellescape.Quote(p) + } + + return fmt.Sprintf("sudo -s %s -- %s", strings.Join(parts[0:idx], " "), strings.Join(parts[idx:], " ")) + } + } else if c.Exec("doas -n true") == nil { + c.sudofunc = func(cmd string) string { + return "doas -s -- " + cmd + } + } + } +} + +func (c Connection) Sudo(cmd string) (string, error) { + if c.sudofunc == nil { + return "", fmt.Errorf("user is not an administrator and passwordless access elevation has not been configured") + } + + return c.sudofunc(cmd), nil +} + +// Execf is just like `Exec` but you can use Sprintf templating for the command +func (c Connection) Execf(s string, params ...interface{}) error { + opts, args := GroupParams(params) + return c.Exec(fmt.Sprintf(s, args...), opts...) +} + +// ExecOutputf is like ExecOutput but you can use Sprintf +// templating for the command +func (c Connection) ExecOutputf(s string, params ...interface{}) (string, error) { + opts, args := GroupParams(params) + return c.ExecOutput(fmt.Sprintf(s, args...), opts...) +} + +// ExecInteractive executes a command on the host and passes control of +// local input to the remote command +func (c Connection) ExecInteractive(cmd string) error { + if !c.IsConnected() { + return &NotConnectedError{&c} + } + + return c.client.ExecInteractive(cmd) +} + +// Disconnect from the host +func (c *Connection) Disconnect() { + if c.client != nil { + c.client.Disconnect() + } + c.client = nil +} + +// Upload copies a file from a local path src to the remote host path dst. For +// smaller files you should probably use os.WriteFile +func (c Connection) Upload(src, dst string, opts ...exec.Option) error { + if !c.IsConnected() { + return &NotConnectedError{&c} + } + + return c.client.Upload(src, dst, opts...) +} + +func (c *Connection) configuredClient() client { + if c.WinRM != nil { + return c.WinRM + } + + if c.Localhost != nil { + return c.Localhost + } + + if c.SSH != nil { + return c.SSH + } + + return nil +} + +func defaultClient() client { + c := &Localhost{Enabled: true} + _ = defaults.Set(c) + return c +} + +// GroupParams separates exec.Options from other sprintf templating args +func GroupParams(params ...interface{}) (opts []exec.Option, args []interface{}) { + for _, v := range params { + switch vv := v.(type) { + case []interface{}: + o, a := GroupParams(vv...) + opts = append(opts, o...) + args = append(args, a...) + case exec.Option: + opts = append(opts, vv) + default: + args = append(args, vv) + } + } + return +} diff --git a/vendor/github.com/k0sproject/rig/exec/exec.go b/vendor/github.com/k0sproject/rig/exec/exec.go new file mode 100644 index 00000000000..5661418a22e --- /dev/null +++ b/vendor/github.com/k0sproject/rig/exec/exec.go @@ -0,0 +1,285 @@ +package exec + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + "sync" + + "github.com/k0sproject/rig/log" +) + +var ( + // DisableRedact will make redact not redact anything + DisableRedact = false + // Confirm will make all command execs ask for confirmation - this is a simplistic way for auditing what will be executed + Confirm = false + + // DebugFunc can be replaced to direct the output of exec logging into your own function (standard sprintf interface) + DebugFunc = func(s string, args ...interface{}) { + log.Debugf(s, args...) + } + + // InfoFunc can be replaced to direct the output of exec logging into your own function (standard sprintf interface) + InfoFunc = func(s string, args ...interface{}) { + log.Infof(s, args...) + } + + // ErrorFunc can be replaced to direct the output of exec logging into your own function (standard sprintf interface) + ErrorFunc = func(s string, args ...interface{}) { + log.Errorf(s, args...) + } + + // ConfirmFunc is called to ask for confirmation + ConfirmFunc = func(s string) bool { + fmt.Println(s) + fmt.Print("Allow? [Y/n]: ") + reader := bufio.NewReader(os.Stdin) + text, _ := reader.ReadString('\n') + text = strings.TrimSpace(text) + return text == "" || text == "Y" || text == "y" + } + + mutex sync.Mutex +) + +// Option is a functional option for the exec package +type Option func(*Options) + +// Options is a collection of exec options +type Options struct { + Stdin string + AllowWinStderr bool + LogInfo bool + LogDebug bool + LogError bool + LogCommand bool + LogOutput bool + StreamOutput bool + Sudo bool + RedactFunc func(string) string + Output *string + Writer io.Writer + + host host +} + +type host interface { + Sudo(string) (string, error) +} + +func (o *Options) Command(cmd string) (string, error) { + if !o.Sudo { + return cmd, nil + } + + return o.host.Sudo(cmd) +} + +// LogCmd is for logging the command to be executed +func (o *Options) LogCmd(prefix, cmd string) { + if Confirm { + + mutex.Lock() + if !ConfirmFunc(fmt.Sprintf("\nHost: %s\nCommand: %s", prefix, o.Redact(cmd))) { + os.Stderr.WriteString("aborted\n") + os.Exit(1) + } + mutex.Unlock() + } + + if o.LogCommand { + DebugFunc("%s: executing `%s`", prefix, o.Redact(cmd)) + } else { + DebugFunc("%s: executing [REDACTED]", prefix) + } +} + +// LogStdin is for logging information about command stdin input +func (o *Options) LogStdin(prefix string) { + if o.Stdin == "" || !o.LogDebug { + return + } + + if len(o.Stdin) > 256 { + o.LogDebugf("%s: writing %d bytes to command stdin", prefix, len(o.Stdin)) + } else { + o.LogDebugf("%s: writing %d bytes to command stdin: %s", prefix, len(o.Stdin), o.Redact(o.Stdin)) + } +} + +// LogDebugf is a conditional debug logger +func (o *Options) LogDebugf(s string, args ...interface{}) { + if o.LogDebug { + DebugFunc(s, args...) + } +} + +// LogInfof is a conditional info logger +func (o *Options) LogInfof(s string, args ...interface{}) { + if o.LogInfo { + InfoFunc(s, args...) + } +} + +// LogErrorf is a conditional error logger +func (o *Options) LogErrorf(s string, args ...interface{}) { + if o.LogError { + ErrorFunc(s, args...) + } +} + +// AddOutput is for appending / displaying output of the command +func (o *Options) AddOutput(prefix, stdout, stderr string) { + mutex.Lock() + defer mutex.Unlock() + + if o.Output != nil && stdout != "" { + *o.Output += stdout + } + + if o.StreamOutput { + if stdout != "" { + InfoFunc("%s: %s", prefix, strings.TrimSpace(o.Redact(stdout))) + } else if stderr != "" { + ErrorFunc("%s: %s", prefix, strings.TrimSpace(o.Redact(stderr))) + } + } else if o.LogOutput { + if stdout != "" { + DebugFunc("%s: %s", prefix, strings.TrimSpace(o.Redact(stdout))) + } else if stderr != "" { + DebugFunc("%s: (stderr) %s", prefix, strings.TrimSpace(o.Redact(stderr))) + } + } +} + +// AllowWinStderr exec option allows command to output to stderr without failing +func AllowWinStderr() Option { + return func(o *Options) { + o.AllowWinStderr = true + } +} + +// Redact is for filtering out sensitive text using a regexp +func (o *Options) Redact(s string) string { + if DisableRedact || o.RedactFunc == nil { + return s + } + return o.RedactFunc(s) +} + +// Stdin exec option for sending data to the command through stdin +func Stdin(t string) Option { + return func(o *Options) { + o.Stdin = t + } +} + +// Output exec option for setting output string target +func Output(output *string) Option { + return func(o *Options) { + o.Output = output + } +} + +// StreamOutput exec option for sending the command output to info log +func StreamOutput() Option { + return func(o *Options) { + o.StreamOutput = true + } +} + +// HideCommand exec option for hiding the command-string and stdin contents from the logs +func HideCommand() Option { + return func(o *Options) { + o.LogCommand = false + } +} + +// HideOutput exec option for hiding the command output from logs +func HideOutput() Option { + return func(o *Options) { + o.LogOutput = false + } +} + +// Sensitive exec option for disabling all logging of the command +func Sensitive() Option { + return func(o *Options) { + o.LogDebug = false + o.LogInfo = false + o.LogError = false + o.LogCommand = false + } +} + +// Sudo exec option for running the command with elevated permissions +func Sudo(h host) Option { + return func(o *Options) { + o.host = h + o.Sudo = true + } +} + +// Redact exec option for defining a redact regexp pattern that will be replaced with [REDACTED] in the logs +func Redact(rexp string) Option { + return func(o *Options) { + re := regexp.MustCompile(rexp) + o.RedactFunc = func(s2 string) string { + return re.ReplaceAllString(s2, "[REDACTED]") + } + } +} + +// RedactString exec option for defining one or more strings to replace with [REDACTED] in the log output +func RedactString(s ...string) Option { + var newS []string + for _, str := range s { + if str != "" { + newS = append(newS, str) + } + } + + return func(o *Options) { + o.RedactFunc = func(s2 string) string { + newstr := s2 + for _, r := range newS { + newstr = strings.ReplaceAll(newstr, r, "[REDACTED]") + } + return newstr + } + } +} + +// Writer exec option for sending command stdout to an io.Writer +func Writer(w io.Writer) Option { + return func(o *Options) { + o.Writer = w + } +} + +// Build returns an instance of Options +func Build(opts ...Option) *Options { + options := &Options{ + Stdin: "", + LogInfo: false, + LogCommand: true, + LogDebug: true, + LogError: true, + LogOutput: true, + StreamOutput: false, + Sudo: false, + Output: nil, + Writer: nil, + host: nil, + } + + for _, o := range opts { + o(options) + } + + return options +} diff --git a/vendor/github.com/k0sproject/rig/go.mod b/vendor/github.com/k0sproject/rig/go.mod new file mode 100644 index 00000000000..04f31b625a2 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/go.mod @@ -0,0 +1,36 @@ +module github.com/k0sproject/rig + +go 1.18 + +require ( + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d + github.com/alessio/shellescape v1.4.1 + github.com/creasty/defaults v1.5.2 + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/masterzen/winrm v0.0.0-20211231115050-232efb40349e + github.com/mitchellh/go-homedir v1.1.0 + github.com/stretchr/testify v1.7.0 + golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 +) + +require ( + github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e // indirect + github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/gofrs/uuid v4.2.0+incompatible // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/goidentity/v6 v6.0.1 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect + golang.org/x/text v0.3.7 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/vendor/github.com/k0sproject/rig/go.sum b/vendor/github.com/k0sproject/rig/go.sum new file mode 100644 index 00000000000..d74ea06eef5 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/go.sum @@ -0,0 +1,91 @@ +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e h1:ZU22z/2YRFLyf/P4ZwUYSdNCWsMEI0VeyrFoI2rAhJQ= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 h1:w0E0fgc1YafGEh5cROhlROMWXiNoZqApk2PDN0M1+Ns= +github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/creasty/defaults v1.5.2 h1:/VfB6uxpyp6h0fr7SPp7n8WJBoV8jfxQXPCnkVSjyls= +github.com/creasty/defaults v1.5.2/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg= +github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/masterzen/winrm v0.0.0-20211231115050-232efb40349e h1:au+BndCo30p6G49xKTj1ZigvPn/ekiO2Gt+V+pbujfQ= +github.com/masterzen/winrm v0.0.0-20211231115050-232efb40349e/go.mod h1:Iju3u6NzoTAvjuhsGCZc+7fReNnr/Bd6DsWj3WTokIU= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/k0sproject/rig/localhost.go b/vendor/github.com/k0sproject/rig/localhost.go new file mode 100644 index 00000000000..eae44af1196 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/localhost.go @@ -0,0 +1,194 @@ +package rig + +import ( + "bufio" + "fmt" + "io" + "os" + osexec "os/exec" + "runtime" + "strings" + "sync" + + "github.com/alessio/shellescape" + "github.com/k0sproject/rig/exec" + ps "github.com/k0sproject/rig/powershell" + "github.com/kballard/go-shellquote" +) + +const name = "[local] localhost" + +// Localhost is a direct localhost connection +type Localhost struct { + Enabled bool `yaml:"enabled" validate:"required,eq=true" default:"true"` +} + +// Protocol returns the protocol name, "Local" +func (c *Localhost) Protocol() string { + return "Local" +} + +// IPAddress returns the connection address +func (c *Localhost) IPAddress() string { + return "127.0.0.1" +} + +// String returns the connection's printable name +func (c *Localhost) String() string { + return name +} + +// IsConnected for local connections is always true +func (c *Localhost) IsConnected() bool { + return true +} + +// IsWindows is true when running on a windows host +func (c *Localhost) IsWindows() bool { + return runtime.GOOS == "windows" +} + +// Connect on local connection does nothing +func (c *Localhost) Connect() error { + return nil +} + +// Disconnect on local connection does nothing +func (c *Localhost) Disconnect() {} + +// Exec executes a command on the host +func (c *Localhost) Exec(cmd string, opts ...exec.Option) error { + o := exec.Build(opts...) + command, err := c.command(cmd, o) + if err != nil { + return err + } + + if o.Stdin != "" { + o.LogStdin(name) + + command.Stdin = strings.NewReader(o.Stdin) + } + + stdout, err := command.StdoutPipe() + if err != nil { + return err + } + stderr, err := command.StderrPipe() + if err != nil { + return err + } + + o.LogCmd(name, cmd) + + if err := command.Start(); err != nil { + return err + } + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + if o.Writer == nil { + outputScanner := bufio.NewScanner(stdout) + + for outputScanner.Scan() { + o.AddOutput(name, outputScanner.Text()+"\n", "") + } + } else { + if _, err := io.Copy(o.Writer, stdout); err != nil { + o.LogErrorf("%s: failed to stream stdout", c, err.Error()) + } + } + }() + wg.Add(1) + go func() { + defer wg.Done() + + outputScanner := bufio.NewScanner(stderr) + + for outputScanner.Scan() { + o.AddOutput(name, "", outputScanner.Text()+"\n") + } + }() + + err = command.Wait() + wg.Wait() + return err +} + +func (c *Localhost) command(cmd string, o *exec.Options) (*osexec.Cmd, error) { + cmd, err := o.Command(cmd) + if err != nil { + return nil, err + } + + if c.IsWindows() { + return osexec.Command(cmd), nil + } + + return osexec.Command("bash", "-c", "--", cmd), nil +} + +// Upload copies a larger file to another path on the host. +func (c *Localhost) Upload(src, dst string, opts ...exec.Option) error { + var remoteErr error + defer func() { + if remoteErr != nil { + if c.IsWindows() { + _ = c.Exec(fmt.Sprintf(`del %s`, ps.DoubleQuote(dst))) + } else { + _ = c.Exec(fmt.Sprintf(`rm -f -- %s`, shellescape.Quote(dst))) + } + } + }() + + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, remoteErr = io.Copy(out, in) + return remoteErr +} + +// ExecInteractive executes a command on the host and copies stdin/stdout/stderr from local host +func (c *Localhost) ExecInteractive(cmd string) error { + if cmd == "" { + cmd = os.Getenv("SHELL") + " -l" + } + + if cmd == " -l" { + cmd = "cmd" + } + + cwd, err := os.Getwd() + if err != nil { + return err + } + + pa := os.ProcAttr{ + Files: []*os.File{os.Stdin, os.Stdout, os.Stderr}, + Dir: cwd, + } + + parts, err := shellquote.Split(cmd) + if err != nil { + return err + } + + proc, err := os.StartProcess(parts[0], parts[1:], &pa) + if err != nil { + return err + } + + _, err = proc.Wait() + println("shell exited") + return err +} diff --git a/vendor/github.com/k0sproject/rig/log.go b/vendor/github.com/k0sproject/rig/log.go new file mode 100644 index 00000000000..b72acc561fa --- /dev/null +++ b/vendor/github.com/k0sproject/rig/log.go @@ -0,0 +1,12 @@ +package rig + +import "github.com/k0sproject/rig/log" + +// SetLogger can be used to assign your own logger to rig +func SetLogger(logger log.Logger) { + log.Log = logger +} + +func init() { + SetLogger(&log.StdLog{}) +} diff --git a/vendor/github.com/k0sproject/rig/log/log.go b/vendor/github.com/k0sproject/rig/log/log.go new file mode 100644 index 00000000000..8b9f8706125 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/log/log.go @@ -0,0 +1,48 @@ +package log + +import "fmt" + +// Logger interface should be implemented by the logging library you wish to use +type Logger interface { + Debugf(string, ...interface{}) + Infof(string, ...interface{}) + Errorf(string, ...interface{}) +} + +// Log can be assigned a proper logger, such as logrus configured to your liking. +var Log Logger + +// Debugf logs a debug level log message +func Debugf(t string, args ...interface{}) { + Log.Debugf(t, args...) +} + +// Infof logs an info level log message +func Infof(t string, args ...interface{}) { + Log.Infof(t, args...) +} + +// Errorf logs an error level log message +func Errorf(t string, args ...interface{}) { + Log.Errorf(t, args...) +} + +// StdLog is a simplistic logger for rig +type StdLog struct { + Logger +} + +// Debugf prints a debug level log message +func (l *StdLog) Debugf(t string, args ...interface{}) { + fmt.Println("DEBUG", fmt.Sprintf(t, args...)) +} + +// Infof prints an info level log message +func (l *StdLog) Infof(t string, args ...interface{}) { + fmt.Println("INFO ", fmt.Sprintf(t, args...)) +} + +// Errorf prints an error level log message +func (l *StdLog) Errorf(t string, args ...interface{}) { + fmt.Println("ERROR", fmt.Sprintf(t, args...)) +} diff --git a/vendor/github.com/k0sproject/rig/os/fileinfo.go b/vendor/github.com/k0sproject/rig/os/fileinfo.go new file mode 100644 index 00000000000..9d9db85b573 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/os/fileinfo.go @@ -0,0 +1,35 @@ +package os + +import ( + "io/fs" + "time" +) + +// FileInfo implements fs.FileInfo +type FileInfo struct { + FName string + FSize int64 + FMode fs.FileMode + FModTime time.Time + FIsDir bool +} + +func (f *FileInfo) Name() string { + return f.FName +} + +func (f *FileInfo) Size() int64 { + return f.FSize +} + +func (f *FileInfo) Mode() fs.FileMode { + return f.FMode +} + +func (f *FileInfo) ModTime() time.Time { + return f.FModTime +} + +func (f *FileInfo) IsDir() bool { + return f.FIsDir +} diff --git a/vendor/github.com/k0sproject/rig/os/host.go b/vendor/github.com/k0sproject/rig/os/host.go new file mode 100644 index 00000000000..dd7834c780d --- /dev/null +++ b/vendor/github.com/k0sproject/rig/os/host.go @@ -0,0 +1,16 @@ +package os + +import ( + "github.com/k0sproject/rig/exec" +) + +// Host is an interface to a host object that has the functions needed by the various OS support packages +type Host interface { + Upload(source, destination string, opts ...exec.Option) error + Exec(string, ...exec.Option) error + ExecOutput(string, ...exec.Option) (string, error) + Execf(string, ...interface{}) error + ExecOutputf(string, ...interface{}) (string, error) + String() string + Sudo(string) (string, error) +} diff --git a/vendor/github.com/k0sproject/rig/os/initsystem/host.go b/vendor/github.com/k0sproject/rig/os/initsystem/host.go new file mode 100644 index 00000000000..1263894c386 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/os/initsystem/host.go @@ -0,0 +1,8 @@ +package initsystem + +// Host interface for init system +type Host interface { + Execf(string, ...interface{}) error + ExecOutputf(string, ...interface{}) (string, error) + Sudo(string) (string, error) +} diff --git a/vendor/github.com/k0sproject/rig/os/initsystem/openrc.go b/vendor/github.com/k0sproject/rig/os/initsystem/openrc.go new file mode 100644 index 00000000000..689c4fde719 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/os/initsystem/openrc.go @@ -0,0 +1,67 @@ +package initsystem + +import ( + "fmt" + "path" + "strings" + + "github.com/k0sproject/rig/exec" +) + +// OpenRC is found on some linux systems, often installed on Alpine for example. +type OpenRC struct{} + +// StartService starts a a service +func (i OpenRC) StartService(h Host, s string) error { + return h.Execf("rc-service %s start", s, exec.Sudo(h)) +} + +// StopService stops a a service +func (i OpenRC) StopService(h Host, s string) error { + return h.Execf("rc-service %s stop", s, exec.Sudo(h)) +} + +// ServiceScriptPath returns the path to a service configuration file +func (i OpenRC) ServiceScriptPath(h Host, s string) (string, error) { + return h.ExecOutputf("rc-service -r %s 2> /dev/null", s, exec.Sudo(h)) +} + +// RestartService restarts a a service +func (i OpenRC) RestartService(h Host, s string) error { + return h.Execf("rc-service %s restart", s, exec.Sudo(h)) +} + +// DaemonReload reloads init system configuration +func (i OpenRC) DaemonReload(_ Host) error { + return nil +} + +// EnableService enables a a service +func (i OpenRC) EnableService(h Host, s string) error { + return h.Execf("rc-update add %s", s, exec.Sudo(h)) +} + +// DisableService disables a a service +func (i OpenRC) DisableService(h Host, s string) error { + return h.Execf("rc-update del %s", s, exec.Sudo(h)) +} + +// ServiceIsRunning returns true if a service is running +func (i OpenRC) ServiceIsRunning(h Host, s string) bool { + return h.Execf(`rc-service %s status | grep -q "status: started"`, s, exec.Sudo(h)) == nil +} + +// ServiceEnvironmentPath returns a path to an environment override file path +func (i OpenRC) ServiceEnvironmentPath(h Host, s string) (string, error) { + return path.Join("/etc/conf.d", s), nil +} + +// ServiceEnvironmentContent returns a formatted string for a service environment override file +func (i OpenRC) ServiceEnvironmentContent(env map[string]string) string { + var b strings.Builder + for k, v := range env { + _, _ = fmt.Fprintf(&b, "export %s=%s\n", k, v) + } + + return b.String() +} diff --git a/vendor/github.com/k0sproject/rig/os/initsystem/systemd.go b/vendor/github.com/k0sproject/rig/os/initsystem/systemd.go new file mode 100644 index 00000000000..3dd12049f08 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/os/initsystem/systemd.go @@ -0,0 +1,73 @@ +package initsystem + +import ( + "fmt" + "path" + "strings" + + "github.com/k0sproject/rig/exec" +) + +// Systemd is found by default on most linux distributions today +type Systemd struct{} + +// StartService starts a a service +func (i Systemd) StartService(h Host, s string) error { + return h.Execf("systemctl start %s 2> /dev/null", s, exec.Sudo(h)) +} + +// EnableService enables a a service +func (i Systemd) EnableService(h Host, s string) error { + return h.Execf("systemctl enable %s 2> /dev/null", s, exec.Sudo(h)) +} + +// DisableService disables a a service +func (i Systemd) DisableService(h Host, s string) error { + return h.Execf("systemctl disable %s 2> /dev/null", s, exec.Sudo(h)) +} + +// StopService stops a a service +func (i Systemd) StopService(h Host, s string) error { + return h.Execf("systemctl stop %s 2> /dev/null", s, exec.Sudo(h)) +} + +// RestartService restarts a a service +func (i Systemd) RestartService(h Host, s string) error { + return h.Execf("systemctl restart %s 2> /dev/null", s, exec.Sudo(h)) +} + +// DaemonReload reloads init system configuration +func (i Systemd) DaemonReload(h Host) error { + return h.Execf("systemctl daemon-reload 2> /dev/null", exec.Sudo(h)) +} + +// ServiceIsRunning returns true if a service is running +func (i Systemd) ServiceIsRunning(h Host, s string) bool { + return h.Execf(`systemctl status %s 2> /dev/null | grep -q "(running)"`, s, exec.Sudo(h)) == nil +} + +// ServiceScriptPath returns the path to a service configuration file +func (i Systemd) ServiceScriptPath(h Host, s string) (string, error) { + return h.ExecOutputf(`systemctl show -p FragmentPath %s.service 2> /dev/null | cut -d"=" -f2`, s, exec.Sudo(h)) +} + +// ServiceEnvironmentPath returns a path to an environment override file path +func (i Systemd) ServiceEnvironmentPath(h Host, s string) (string, error) { + sp, err := i.ServiceScriptPath(h, s) + if err != nil { + return "", err + } + dn := path.Dir(sp) + return path.Join(dn, fmt.Sprintf("%s.service.d", s), "env.conf"), nil +} + +// ServiceEnvironmentContent returns a formatted string for a service environment override file +func (i Systemd) ServiceEnvironmentContent(env map[string]string) string { + var b strings.Builder + fmt.Fprintln(&b, "[Service]") + for k, v := range env { + _, _ = fmt.Fprintf(&b, "Environment=%s=%s\n", k, v) + } + + return b.String() +} diff --git a/vendor/github.com/k0sproject/rig/os/linux.go b/vendor/github.com/k0sproject/rig/os/linux.go new file mode 100644 index 00000000000..2957fe4177a --- /dev/null +++ b/vendor/github.com/k0sproject/rig/os/linux.go @@ -0,0 +1,394 @@ +package os + +import ( + "bufio" + "fmt" + "io/fs" + "strconv" + "strings" + "time" + + "github.com/alessio/shellescape" + escape "github.com/alessio/shellescape" + "github.com/k0sproject/rig/exec" + "github.com/k0sproject/rig/os/initsystem" +) + +// Linux is a base module for various linux OS support packages +type Linux struct{} + +// initSystem interface defines an init system - the OS's system to manage services (systemd, openrc for example) +type initSystem interface { + StartService(initsystem.Host, string) error + StopService(initsystem.Host, string) error + RestartService(initsystem.Host, string) error + DisableService(initsystem.Host, string) error + EnableService(initsystem.Host, string) error + ServiceIsRunning(initsystem.Host, string) bool + ServiceScriptPath(initsystem.Host, string) (string, error) + DaemonReload(initsystem.Host) error + ServiceEnvironmentPath(initsystem.Host, string) (string, error) + ServiceEnvironmentContent(map[string]string) string +} + +// Kind returns "linux" +func (c Linux) Kind() string { + return "linux" +} + +func (c Linux) hasSystemd(h Host) bool { + return h.Exec("stat /run/systemd/system", exec.Sudo(h)) == nil +} + +func (c Linux) hasUpstart(h Host) bool { + return h.Exec(`stat /sbin/upstart-udev-bridge > /dev/null 2>&1 || \ + (stat /sbin/initctl > /dev/null 2>&1 && \ + /sbin/initctl --version 2> /dev/null | grep -q "\(upstart" )`, exec.Sudo(h)) == nil +} + +func (c Linux) hasOpenRC(h Host) bool { + return h.Exec(`command -v openrc-init > /dev/null 2>&1 || \ + (stat /etc/inittab > /dev/null 2>&1 && \ + (grep ::sysinit: /etc/inittab | grep -q openrc) )`, exec.Sudo(h)) == nil +} + +func (c Linux) hasSysV(h Host) bool { + return h.Exec(`command -v service 2>&1 && stat /etc/init.d > /dev/null 2>&1`, exec.Sudo(h)) == nil +} + +func (c Linux) is(h Host) (initSystem, error) { + if c.hasSystemd(h) { + return &initsystem.Systemd{}, nil + } + + if c.hasOpenRC(h) || c.hasUpstart(h) || c.hasSysV(h) { + return &initsystem.OpenRC{}, nil + } + + return nil, fmt.Errorf("failed to detect OS init system") +} + +// StartService starts a service on the host +func (c Linux) StartService(h Host, s string) error { + is, err := c.is(h) + if err != nil { + return err + } + return is.StartService(h, s) +} + +// StopService stops a service on the host +func (c Linux) StopService(h Host, s string) error { + is, err := c.is(h) + if err != nil { + return err + } + return is.StopService(h, s) +} + +// RestartService restarts a service on the host +func (c Linux) RestartService(h Host, s string) error { + is, err := c.is(h) + if err != nil { + return err + } + return is.RestartService(h, s) +} + +// DisableService disables a service on the host +func (c Linux) DisableService(h Host, s string) error { + is, err := c.is(h) + if err != nil { + return err + } + return is.DisableService(h, s) +} + +// EnableService enables a service on the host +func (c Linux) EnableService(h Host, s string) error { + is, err := c.is(h) + if err != nil { + return err + } + return is.EnableService(h, s) +} + +// ServiceIsRunning returns true if the service is running on the host +func (c Linux) ServiceIsRunning(h Host, s string) bool { + is, err := c.is(h) + if err != nil { + return false + } + return is.ServiceIsRunning(h, s) +} + +// ServiceScriptPath returns the service definition file path on the host +func (c Linux) ServiceScriptPath(h Host, s string) (string, error) { + is, err := c.is(h) + if err != nil { + return "", err + } + return is.ServiceScriptPath(h, s) +} + +// DaemonReload performs an init system config reload +func (c Linux) DaemonReload(h Host) error { + is, err := c.is(h) + if err != nil { + return err + } + return is.DaemonReload(h) +} + +// Pwd returns the current working directory of the session +func (c Linux) Pwd(h Host) string { + pwd, err := h.ExecOutput("pwd 2> /dev/null") + if err != nil { + return "" + } + return pwd +} + +func (c Linux) CheckPrivilege(h Host) error { + return h.Exec("true", exec.Sudo(h)) +} + +// JoinPath joins a path +func (c Linux) JoinPath(parts ...string) string { + return strings.Join(parts, "/") +} + +// Hostname resolves the short hostname +func (c Linux) Hostname(h Host) string { + n, _ := h.ExecOutput("hostname 2> /dev/null") + + return n +} + +// LongHostname resolves the FQDN (long) hostname +func (c Linux) LongHostname(h Host) string { + n, _ := h.ExecOutput("hostname -f 2> /dev/null") + + return n +} + +// IsContainer returns true if the host is actually a container +func (c Linux) IsContainer(h Host) bool { + return h.Exec("grep 'container=docker' /proc/1/environ 2> /dev/null") == nil +} + +// FixContainer makes a container work like a real host +func (c Linux) FixContainer(h Host) error { + return h.Exec("mount --make-rshared / 2> /dev/null", exec.Sudo(h)) +} + +// SELinuxEnabled is true when SELinux is enabled +func (c Linux) SELinuxEnabled(h Host) bool { + return h.Exec("getenforce | grep -iq enforcing 2> /dev/null", exec.Sudo(h)) == nil +} + +// WriteFile writes file to host with given contents. Do not use for large files. +func (c Linux) WriteFile(h Host, path string, data string, permissions string) error { + if data == "" { + return fmt.Errorf("empty content in WriteFile to %s", path) + } + + if path == "" { + return fmt.Errorf("empty path in WriteFile") + } + + tempFile, err := h.ExecOutput("mktemp 2> /dev/null") + if err != nil { + return err + } + + if err := h.Execf(`cat > %s`, tempFile, exec.Stdin(data), exec.RedactString(data)); err != nil { + return err + } + + if err := c.InstallFile(h, tempFile, path, permissions); err != nil { + _ = c.DeleteFile(h, tempFile) + } + + return nil +} + +func (c Linux) InstallFile(h Host, src, dst, permissions string) error { + return h.Execf("install -D -m %s -- %s %s", permissions, src, dst, exec.Sudo(h)) +} + +// ReadFile reads a files contents from the host. +func (c Linux) ReadFile(h Host, path string) (string, error) { + return h.ExecOutputf("cat -- %s 2> /dev/null", escape.Quote(path), exec.HideOutput(), exec.Sudo(h)) +} + +// DeleteFile deletes a file from the host. +func (c Linux) DeleteFile(h Host, path string) error { + return h.Execf(`rm -f -- %s 2> /dev/null`, escape.Quote(path), exec.Sudo(h)) +} + +// FileExist checks if a file exists on the host +func (c Linux) FileExist(h Host, path string) bool { + return h.Execf(`test -e %s 2> /dev/null`, escape.Quote(path), exec.Sudo(h)) == nil +} + +// LineIntoFile tries to find a line starting with the matcher and replace it with a new entry. If match isn't found, the string is appended to the file. +// TODO add exec.Opts (requires modifying readfile and writefile signatures) +func (c Linux) LineIntoFile(h Host, path, matcher, newLine string) error { + newLine = strings.TrimSuffix(newLine, "\n") + content, err := c.ReadFile(h, path) + if err != nil { + content = "" + } + + var found bool + writer := new(strings.Builder) + + scanner := bufio.NewScanner(strings.NewReader(content)) + for scanner.Scan() { + row := scanner.Text() + + if strings.HasPrefix(row, matcher) && !found { + row = newLine + found = true + } + + fmt.Fprintln(writer, row) + } + + if !found { + fmt.Fprintln(writer, newLine) + } + + return c.WriteFile(h, path, writer.String(), "0644") +} + +// UpdateEnvironment updates the hosts's environment variables +func (c Linux) UpdateEnvironment(h Host, env map[string]string) error { + for k, v := range env { + err := c.LineIntoFile(h, "/etc/environment", fmt.Sprintf("^%s=", k), fmt.Sprintf("%s=%s", k, v)) + if err != nil { + return err + } + } + + // Update current session environment from the /etc/environment + return h.Exec(`while read -r pair; do if [[ $pair == ?* && $pair != \#* ]]; then export "$pair" || exit 2; fi; done < /etc/environment`) +} + +// UpdateServiceEnvironment updates environment variables for a service +func (c Linux) UpdateServiceEnvironment(h Host, s string, env map[string]string) error { + is, err := c.is(h) + if err != nil { + return err + } + fp, err := is.ServiceEnvironmentPath(h, s) + if err != nil { + return err + } + err = c.WriteFile(h, fp, is.ServiceEnvironmentContent(env), "0660") + if err != nil { + return err + } + + return c.DaemonReload(h) +} + +// CleanupEnvironment removes environment variable configuration +func (c Linux) CleanupEnvironment(h Host, env map[string]string) error { + for k := range env { + err := c.LineIntoFile(h, "/etc/environment", fmt.Sprintf("^%s=", k), "") + if err != nil { + return err + } + } + // remove empty lines + return h.Exec(`sed -i '/^$/d' /etc/environment`, exec.Sudo(h)) +} + +// CleanupServiceEnvironment updates environment variables for a service +func (c Linux) CleanupServiceEnvironment(h Host, s string) error { + is, err := c.is(h) + if err != nil { + return err + } + fp, err := is.ServiceEnvironmentPath(h, s) + if err != nil { + return err + } + return c.DeleteFile(h, fp) +} + +// CommandExist returns true if the command exists +func (c Linux) CommandExist(h Host, cmd string) bool { + return h.Execf(`command -v -- "%s" 2> /dev/null`, cmd, exec.Sudo(h)) == nil +} + +// Reboot executes the reboot command +func (c Linux) Reboot(h Host) error { + cmd, err := h.Sudo("shutdown --reboot 0 2> /dev/null") + if err != nil { + return err + } + return h.Execf("%s && exit", cmd) +} + +// MkDir creates a directory (including intermediate directories) +func (c Linux) MkDir(h Host, s string, opts ...exec.Option) error { + return h.Exec(fmt.Sprintf("mkdir -p -- %s", escape.Quote(s)), opts...) +} + +// Chmod updates permissions of a path +func (c Linux) Chmod(h Host, s, perm string, opts ...exec.Option) error { + return h.Exec(fmt.Sprintf("chmod %s -- %s", perm, escape.Quote(s)), opts...) +} + +// gnuCoreutilsDateTimeLayout represents the date and time format employed by GNU +// coreutils. Note that this is different from BSD coreutils. +const gnuCoreutilsDateTimeLayout = "2006-01-02 15:04:05.999999999 -0700" + +// Stat gets file / directory information +func (c Linux) Stat(h Host, path string, opts ...exec.Option) (*FileInfo, error) { + cmd := `env -i LC_ALL=C stat --printf '%s\0%y\0%a\0%F' -- ` + shellescape.Quote(path) + + out, err := h.ExecOutput(cmd, opts...) + if err != nil { + return nil, err + } + + fields := strings.SplitN(out, "\x00", 4) + + size, err := strconv.ParseInt(fields[0], 10, 64) + if err != nil { + return nil, err + } + + modTime, err := time.Parse(gnuCoreutilsDateTimeLayout, fields[1]) + if err != nil { + return nil, err + } + + mode, err := strconv.ParseUint(fields[2], 8, 32) + if err != nil { + return nil, err + } + + return &FileInfo{ + FName: path, + FSize: size, + FModTime: modTime, + FMode: fs.FileMode(mode), + FIsDir: strings.Contains(fields[3], "directory"), + }, nil +} + +// Touch updates a file's last modified time. It creates a new empty file if it +// didn't exist prior to the call to Touch. +func (c Linux) Touch(h Host, path string, ts time.Time, opts ...exec.Option) error { + cmd := fmt.Sprintf("env -i LC_ALL=C touch -m -d %s -- %s", + shellescape.Quote(ts.Format(gnuCoreutilsDateTimeLayout)), + shellescape.Quote(path), + ) + + return h.Exec(cmd, opts...) +} diff --git a/vendor/github.com/k0sproject/rig/os/registry/registry.go b/vendor/github.com/k0sproject/rig/os/registry/registry.go new file mode 100644 index 00000000000..289d07d3949 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/os/registry/registry.go @@ -0,0 +1,34 @@ +package registry + +import ( + "fmt" + + "github.com/k0sproject/rig" +) + +type buildFunc = func() interface{} +type matchFunc = func(rig.OSVersion) bool + +type osFactory struct { + MatchFunc matchFunc + BuildFunc buildFunc +} + +var osModules []*osFactory + +// RegisterOSModule registers a OS support module into rig's registry +func RegisterOSModule(mf matchFunc, bf buildFunc) { + // Inserting to beginning to match the most latest added + osModules = append([]*osFactory{{MatchFunc: mf, BuildFunc: bf}}, osModules...) +} + +// GetOSModuleBuilder returns a suitable OS support module from rig's registry +func GetOSModuleBuilder(osv rig.OSVersion) (buildFunc, error) { + for _, of := range osModules { + if of.MatchFunc(osv) { + return of.BuildFunc, nil + } + } + + return nil, fmt.Errorf("os support module not found") +} diff --git a/vendor/github.com/k0sproject/rig/os/windows.go b/vendor/github.com/k0sproject/rig/os/windows.go new file mode 100644 index 00000000000..9adfe5dd61f --- /dev/null +++ b/vendor/github.com/k0sproject/rig/os/windows.go @@ -0,0 +1,320 @@ +package os + +import ( + "bufio" + "fmt" + "io/fs" + "strconv" + "strings" + "time" + + "github.com/k0sproject/rig/exec" + "github.com/k0sproject/rig/log" + ps "github.com/k0sproject/rig/powershell" +) + +// Windows is the base package for windows OS support +type Windows struct{} + +// Kind returns "windows" +func (c Windows) Kind() string { + return "windows" +} + +const privCheck = `"$currentPrincipal = New-Object Security.Principal.WindowsPrincipal([Security.Principal.WindowsIdentity]::GetCurrent()); if (!$currentPrincipal.IsInRole([Security.Principal.WindowsBuiltInRole]::Administrator)) { $host.SetShouldExit(1) }"` + +// CheckPrivilege returns an error if the user does not have admin access to the host +func (c Windows) CheckPrivilege(h Host) error { + if h.Exec(ps.Cmd(privCheck)) != nil { + return fmt.Errorf("user does not have administrator rights on the host") + } + + return nil +} + +// InstallPackage enables an optional windows feature +func (c Windows) InstallPackage(h Host, s ...string) error { + for _, n := range s { + err := h.Exec(ps.Cmd(fmt.Sprintf("Enable-WindowsOptionalFeature -Online -FeatureName %s -All", n))) + if err != nil { + return err + } + } + + return nil +} + +func (c Windows) InstallFile(h Host, src, dst, _ string) error { + return h.Execf("move /y %s %s", ps.DoubleQuote(src), ps.DoubleQuote(dst), exec.Sudo(h)) +} + +// Pwd returns the current working directory +func (c Windows) Pwd(h Host) string { + if pwd, err := h.ExecOutput("echo %cd%"); err == nil { + return pwd + } + + return "" +} + +// JoinPath joins a path +func (c Windows) JoinPath(parts ...string) string { + return strings.Join(parts, "\\") +} + +// Hostname resolves the short hostname +func (c Windows) Hostname(h Host) string { + output, err := h.ExecOutput("powershell $env:COMPUTERNAME") + if err != nil { + return "localhost" + } + + return strings.TrimSpace(output) +} + +// LongHostname resolves the FQDN (long) hostname +func (c Windows) LongHostname(h Host) string { + output, err := h.ExecOutput("powershell ([System.Net.Dns]::GetHostByName(($env:COMPUTERNAME))).Hostname") + if err != nil { + return "localhost.localdomain" + } + return strings.TrimSpace(output) +} + +// IsContainer returns true if the host is actually a container (always false on windows for now) +func (c Windows) IsContainer(_ Host) bool { + return false +} + +// FixContainer makes a container work like a real host (does nothing on windows for now) +func (c Windows) FixContainer(_ Host) error { + return nil +} + +// SELinuxEnabled is true when SELinux is enabled (always false on windows for now) +func (c Windows) SELinuxEnabled(_ Host) bool { + return false +} + +// WriteFile writes file to host with given contents. Do not use for large files. +// The permissions argument is ignored on windows. +func (c Windows) WriteFile(h Host, path string, data string, permissions string) error { + if data == "" { + return fmt.Errorf("empty content in WriteFile to %s", path) + } + + if path == "" { + return fmt.Errorf("empty path in WriteFile") + } + + tempFile, err := h.ExecOutput("powershell -Command \"New-TemporaryFile | Write-Host\"") + if err != nil { + return err + } + defer c.deleteTempFile(h, tempFile) + + err = h.Exec(fmt.Sprintf(`powershell -Command "$Input | Out-File -FilePath %s"`, ps.SingleQuote(tempFile)), exec.Stdin(data), exec.RedactString(data)) + if err != nil { + return err + } + + err = h.Exec(fmt.Sprintf(`powershell -Command "Move-Item -Force -Path %s -Destination %s"`, ps.SingleQuote(tempFile), ps.SingleQuote(path))) + if err != nil { + return err + } + + return nil +} + +// ReadFile reads a files contents from the host. +func (c Windows) ReadFile(h Host, path string) (string, error) { + return h.ExecOutput(fmt.Sprintf(`type %s`, ps.DoubleQuote(path)), exec.HideOutput()) +} + +// DeleteFile deletes a file from the host. +func (c Windows) DeleteFile(h Host, path string) error { + return h.Exec(fmt.Sprintf(`del /f %s`, ps.DoubleQuote(path))) +} + +func (c Windows) deleteTempFile(h Host, path string) { + if err := c.DeleteFile(h, path); err != nil { + log.Debugf("failed to delete temporary file %s: %s", path, err.Error()) + } +} + +// FileExist checks if a file exists on the host +func (c Windows) FileExist(h Host, path string) bool { + return h.Exec(fmt.Sprintf(`powershell -Command "if (!(Test-Path -Path \"%s\")) { exit 1 }"`, path)) == nil +} + +// UpdateEnvironment updates the hosts's environment variables +func (c Windows) UpdateEnvironment(h Host, env map[string]string) error { + for k, v := range env { + err := h.Exec(fmt.Sprintf(`setx %s %s`, ps.DoubleQuote(k), ps.DoubleQuote(v))) + if err != nil { + return err + } + } + return nil +} + +// UpdateServiceEnvironment does nothing on windows +func (c Windows) UpdateServiceEnvironment(_ Host, _ string, _ map[string]string) error { + return nil +} + +// CleanupEnvironment removes environment variable configuration +func (c Windows) CleanupEnvironment(h Host, env map[string]string) error { + for k := range env { + err := h.Exec(fmt.Sprintf(`powershell "[Environment]::SetEnvironmentVariable(%s, $null, 'User')"`, ps.SingleQuote(k))) + if err != nil { + return err + } + err = h.Exec(fmt.Sprintf(`powershell "[Environment]::SetEnvironmentVariable(%s, $null, 'Machine')"`, ps.SingleQuote(k))) + if err != nil { + return err + } + } + return nil +} + +// CleanupServiceEnvironment does nothing on windows +func (c Windows) CleanupServiceEnvironment(_ Host, _ string) error { + return nil +} + +// CommandExist returns true if the provided command exists +func (c Windows) CommandExist(h Host, cmd string) bool { + return h.Execf("where /q %s", cmd) == nil +} + +// Reboot executes the reboot command +func (c Windows) Reboot(h Host) error { + return h.Exec("shutdown /r /t 5") +} + +// StartService starts a a service +func (c Windows) StartService(h Host, s string) error { + return h.Execf(`sc start "%s"`, s) +} + +// StopService stops a a service +func (c Windows) StopService(h Host, s string) error { + return h.Execf(`sc stop "%s"`, s) +} + +// ServiceScriptPath returns the path to a service configuration file +func (c Windows) ServiceScriptPath(h Host, s string) (string, error) { + return "", fmt.Errorf("not available on windows") +} + +// RestartService restarts a a service +func (c Windows) RestartService(h Host, s string) error { + return h.Execf(ps.Cmd(fmt.Sprintf(`Restart-Service "%s"`, s))) +} + +// DaemonReload reloads init system configuration +func (c Windows) DaemonReload(_ Host) error { + return nil +} + +// EnableService enables a a service +func (c Windows) EnableService(h Host, s string) error { + return h.Execf(`sc.exe config "%s" start=disabled`, s) +} + +// DisableService disables a a service +func (c Windows) DisableService(h Host, s string) error { + return h.Execf(`sc.exe config "%s" start=enabled`, s) +} + +// ServiceIsRunning returns true if a service is running +func (c Windows) ServiceIsRunning(h Host, s string) bool { + return h.Execf(`sc.exe query "%s" | findstr "RUNNING"`, s) == nil +} + +// MkDir creates a directory (including intermediate directories) +func (c Windows) MkDir(h Host, s string, opts ...exec.Option) error { + // windows mkdir is "-p" by default + return h.Exec(fmt.Sprintf(`mkdir %s`, ps.DoubleQuote(s)), opts...) +} + +// Chmod on windows does nothing +func (c Windows) Chmod(h Host, s, perm string, opts ...exec.Option) error { + return nil +} + +// Stat gets file / directory information +func (c Windows) Stat(h Host, path string, opts ...exec.Option) (*FileInfo, error) { + f := &FileInfo{FName: path, FMode: fs.FileMode(0)} + + out, err := h.ExecOutput(fmt.Sprintf("[System.Math]::Truncate((Get-Date -Date ((Get-Item %s).LastWriteTime.ToUniversalTime()) -UFormat %%s))", ps.DoubleQuote(path)), opts...) + if err != nil { + return nil, err + } + ts, err := strconv.ParseInt(strings.TrimSpace(out), 10, 64) + if err != nil { + return nil, err + } + f.FModTime = time.Unix(ts, 0) + + out, err = h.ExecOutput(fmt.Sprintf("(Get-Item %s).Length", ps.DoubleQuote(path)), opts...) + if err != nil { + return nil, err + } + size, err := strconv.ParseInt(strings.TrimSpace(out), 10, 64) + if err != nil { + return nil, err + } + f.FSize = size + + out, err = h.ExecOutput(fmt.Sprintf("(Get-Item %s).GetType().Name", ps.DoubleQuote(path)), opts...) + if err != nil { + return nil, err + } + f.FIsDir = strings.Contains(out, "DirectoryInfo") + + return f, nil +} + +// Touch updates a file's last modified time or creates a new empty file +func (c Windows) Touch(h Host, path string, ts time.Time, opts ...exec.Option) error { + if !c.FileExist(h, path) { + if err := h.Exec(fmt.Sprintf("Set-Content -Path %s -value $null", ps.DoubleQuote(path)), opts...); err != nil { + return err + } + } + + return h.Exec(fmt.Sprintf("(Get-Item %s).LastWriteTime = (Get-Date %s)", ps.DoubleQuote(path), ps.DoubleQuote(ts.Format(time.RFC3339))), opts...) +} + +// LineIntoFile tries to find a line starting with the matcher and replace it with a new entry. If match isn't found, the string is appended to the file. +// TODO this is a straight copypaste from linux, figure out a way to share these +func (c Windows) LineIntoFile(h Host, path, matcher, newLine string) error { + newLine = strings.TrimSuffix(newLine, "\n") + content, err := c.ReadFile(h, path) + if err != nil { + content = "" + } + + var found bool + writer := new(strings.Builder) + + scanner := bufio.NewScanner(strings.NewReader(content)) + for scanner.Scan() { + row := scanner.Text() + + if strings.HasPrefix(row, matcher) && !found { + row = newLine + found = true + } + + fmt.Fprintln(writer, row) + } + + if !found { + fmt.Fprintln(writer, newLine) + } + + return c.WriteFile(h, path, writer.String(), "0644") +} diff --git a/vendor/github.com/k0sproject/rig/osversion.go b/vendor/github.com/k0sproject/rig/osversion.go new file mode 100644 index 00000000000..ae0008170fd --- /dev/null +++ b/vendor/github.com/k0sproject/rig/osversion.go @@ -0,0 +1,17 @@ +package rig + +// OSVersion host operating system version information +type OSVersion struct { + ID string + IDLike string + Name string + Version string +} + +// String returns a human readable representation of OSVersion +func (o *OSVersion) String() string { + if o.Name != "" { + return o.Name + } + return o.ID + " " + o.Version +} diff --git a/vendor/github.com/k0sproject/rig/powershell/powershell.go b/vendor/github.com/k0sproject/rig/powershell/powershell.go new file mode 100644 index 00000000000..320fb7912dd --- /dev/null +++ b/vendor/github.com/k0sproject/rig/powershell/powershell.go @@ -0,0 +1,123 @@ +package powershell + +import ( + "encoding/base64" + "fmt" + "strings" +) + +// PipeHasEnded string is used during the base64+sha265 upload process +const PipeHasEnded = "The pipe has been ended." + +// PipeIsBeingClosed string is used during the base64+sha265 upload process +const PipeIsBeingClosed = "The pipe is being closed." + +// UploadCmd generates a powershell script that acts as a small "stdin daemon" for file upload +func UploadCmd(path string) string { + return EncodeCmd(` + begin { + $path = "` + path + `" + Remove-Item $path -ErrorAction Ignore + $DebugPreference = "Continue" + $ErrorActionPreference = "Stop" + Set-StrictMode -Version 2 + $fd = [System.IO.File]::Create($path) + $sha256 = [System.Security.Cryptography.SHA256CryptoServiceProvider]::Create() + $bytes = @() #initialize for empty file case + } + process { + $bytes = [System.Convert]::FromBase64String($input) + $sha256.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null + $fd.Write($bytes, 0, $bytes.Length) + } + end { + $sha256.TransformFinalBlock($bytes, 0, 0) | Out-Null + $hash = [System.BitConverter]::ToString($sha256.Hash).Replace("-", "").ToLowerInvariant() + $fd.Close() + Write-Output "{""sha256"":""$hash""}" + } + `) +} + +// EncodeCmd base64-encodes a string in a way that is accepted by PowerShell -EncodedCommand +func EncodeCmd(psCmd string) string { + if !strings.Contains(psCmd, "begin {") { + psCmd = "$ProgressPreference='SilentlyContinue'; " + psCmd + } + // 2 byte chars to make PowerShell happy + wideCmd := "" + for _, b := range []byte(psCmd) { + wideCmd += string(b) + "\x00" + } + + // Base64 encode the command + input := []uint8(wideCmd) + return base64.StdEncoding.EncodeToString(input) +} + +// Cmd builds a command-line for executing a complex command or script as an EncodedCommand through powershell +func Cmd(psCmd string) string { + encodedCmd := EncodeCmd(psCmd) + + // Create the powershell.exe command line to execute the script + return fmt.Sprintf("powershell.exe -NonInteractive -ExecutionPolicy Bypass -NoProfile -EncodedCommand %s", encodedCmd) +} + +// SingleQuote quotes and escapes a string in a format that is accepted by powershell scriptlets +// from jbrekelmans/go-winrm/util.go PowerShellSingleQuotedStringLiteral +func SingleQuote(v string) string { + var sb strings.Builder + _, _ = sb.WriteRune('\'') + for _, rune := range v { + var esc string + switch rune { + case '\n': + esc = "`n" + case '\r': + esc = "`r" + case '\t': + esc = "`t" + case '\a': + esc = "`a" + case '\b': + esc = "`b" + case '\f': + esc = "`f" + case '\v': + esc = "`v" + case '"': + esc = "`\"" + case '\'': + esc = "`'" + case '`': + esc = "``" + case '\x00': + esc = "`0" + default: + _, _ = sb.WriteRune(rune) + continue + } + _, _ = sb.WriteString(esc) + } + _, _ = sb.WriteRune('\'') + return sb.String() +} + +// DoubleQuote escapes a string in a way that can be used as a windows file path +func DoubleQuote(v string) string { + var sb strings.Builder + _, _ = sb.WriteRune('"') + for _, rune := range v { + var esc string + switch rune { + case '"': + esc = "`\"" + default: + _, _ = sb.WriteRune(rune) + continue + } + _, _ = sb.WriteString(esc) + } + _, _ = sb.WriteRune('"') + return sb.String() +} diff --git a/vendor/github.com/k0sproject/rig/resolver.go b/vendor/github.com/k0sproject/rig/resolver.go new file mode 100644 index 00000000000..5f82cdaa0f0 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/resolver.go @@ -0,0 +1,136 @@ +package rig + +import ( + "bufio" + "fmt" + "strconv" + "strings" + + ps "github.com/k0sproject/rig/powershell" +) + +type resolveFunc func(*Connection) (OSVersion, error) + +// Resolvers exposes an array of resolve functions where you can add your own if you need to detect some OS rig doesn't already know about +// (consider making a PR) +var Resolvers []resolveFunc + +// GetOSVersion runs through the Resolvers and tries to figure out the OS version information +func GetOSVersion(c *Connection) (OSVersion, error) { + for _, r := range Resolvers { + if os, err := r(c); err == nil { + return os, nil + } + } + return OSVersion{}, fmt.Errorf("unable to determine host os") +} + +func init() { + Resolvers = append(Resolvers, resolveLinux, resolveWindows, resolveDarwin) +} + +func resolveLinux(c *Connection) (os OSVersion, err error) { + if err = c.Exec("uname | grep -q Linux"); err != nil { + return + } + + output, err := c.ExecOutput("cat /etc/os-release || cat /usr/lib/os-release") + if err != nil { + return + } + + err = parseOSReleaseFile(output, &os) + + return +} + +func resolveWindows(c *Connection) (os OSVersion, err error) { + osName, err := c.ExecOutput(ps.Cmd(`(Get-ItemProperty "HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion").ProductName`)) + if err != nil { + return + } + + osMajor, err := c.ExecOutput(ps.Cmd(`(Get-ItemProperty "HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion").CurrentMajorVersionNumber`)) + if err != nil { + return + } + + osMinor, err := c.ExecOutput(ps.Cmd(`(Get-ItemProperty "HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion").CurrentMinorVersionNumber`)) + if err != nil { + return + } + + osBuild, err := c.ExecOutput(ps.Cmd(`(Get-ItemProperty "HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion").CurrentBuild`)) + if err != nil { + return + } + + os = OSVersion{ + ID: "windows", + IDLike: "windows", + Version: fmt.Sprintf("%s.%s.%s", osMajor, osMinor, osBuild), + Name: osName, + } + + return +} + +func resolveDarwin(c *Connection) (os OSVersion, err error) { + if err = c.Exec("uname | grep -q Darwin"); err != nil { + return + } + + version, err := c.ExecOutput("sw_vers -productVersion") + if err != nil { + return + } + + var name string + if n, err := c.ExecOutput(`grep "SOFTWARE LICENSE AGREEMENT FOR " "/System/Library/CoreServices/Setup Assistant.app/Contents/Resources/en.lproj/OSXSoftwareLicense.rtf" | sed -E "s/^.*SOFTWARE LICENSE AGREEMENT FOR (.+)\\\/\1/"`); err == nil { + name = fmt.Sprintf("%s %s", n, version) + } + + os = OSVersion{ + ID: "darwin", + IDLike: "darwin", + Version: version, + Name: name, + } + + return +} + +func unquote(s string) string { + if u, err := strconv.Unquote(s); err == nil { + return u + } + return s +} + +func parseOSReleaseFile(s string, os *OSVersion) error { + scanner := bufio.NewScanner(strings.NewReader(s)) + for scanner.Scan() { + fields := strings.SplitN(scanner.Text(), "=", 2) + switch fields[0] { + case "ID": + os.ID = unquote(fields[1]) + case "ID_LIKE": + os.IDLike = unquote(fields[1]) + case "VERSION_ID": + os.Version = unquote(fields[1]) + case "PRETTY_NAME": + os.Name = unquote(fields[1]) + } + } + + // ArchLinux has no versions + if os.ID == "arch" || os.IDLike == "arch" { + os.Version = "0.0.0" + } + + if os.ID == "" || os.Version == "" { + return fmt.Errorf("invalid or incomplete os-release file contents, at least ID and VERSION_ID required") + } + + return nil +} diff --git a/vendor/github.com/k0sproject/rig/signals.go b/vendor/github.com/k0sproject/rig/signals.go new file mode 100644 index 00000000000..4943de4ddc4 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/signals.go @@ -0,0 +1,52 @@ +//go:build !windows +// +build !windows + +package rig + +import ( + "encoding/binary" + "fmt" + "io" + "os" + "os/signal" + "syscall" + + ssh "golang.org/x/crypto/ssh" + "golang.org/x/term" +) + +func captureSignals(stdin io.WriteCloser, session *ssh.Session) { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt, syscall.SIGTSTP, syscall.SIGWINCH) + + go func() { + for sig := range ch { + switch sig { + case os.Interrupt: + fmt.Fprintf(stdin, "\x03") + case syscall.SIGTSTP: + fmt.Fprintf(stdin, "\x1a") + case syscall.SIGWINCH: + _, err := session.SendRequest("window-change", false, termSizeWNCH()) + if err != nil { + println("failed to relay window-change event: " + err.Error()) + } + } + } + }() +} + +func termSizeWNCH() []byte { + size := make([]byte, 16) + fd := int(os.Stdin.Fd()) + rows, cols, err := term.GetSize(fd) + if err != nil { + binary.BigEndian.PutUint32(size, 40) + binary.BigEndian.PutUint32(size[4:], 80) + } else { + binary.BigEndian.PutUint32(size, uint32(cols)) + binary.BigEndian.PutUint32(size[4:], uint32(rows)) + } + + return size +} diff --git a/vendor/github.com/k0sproject/rig/signals_windows.go b/vendor/github.com/k0sproject/rig/signals_windows.go new file mode 100644 index 00000000000..06597396697 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/signals_windows.go @@ -0,0 +1,26 @@ +// +build windows + +package rig + +import ( + "fmt" + "io" + "os" + "os/signal" + + ssh "golang.org/x/crypto/ssh" +) + +func captureSignals(stdin io.WriteCloser, session *ssh.Session) { + ch := make(chan os.Signal) + signal.Notify(ch, os.Interrupt) + + go func() { + for sig := range ch { + switch sig { + case os.Interrupt: + fmt.Fprintf(stdin, "\x03") + } + } + }() +} diff --git a/vendor/github.com/k0sproject/rig/ssh.go b/vendor/github.com/k0sproject/rig/ssh.go new file mode 100644 index 00000000000..c3653478660 --- /dev/null +++ b/vendor/github.com/k0sproject/rig/ssh.go @@ -0,0 +1,591 @@ +package rig + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" + "sync" + + ssh "golang.org/x/crypto/ssh" + + "golang.org/x/crypto/ssh/agent" + "golang.org/x/term" + + "github.com/acarl005/stripansi" + "github.com/alessio/shellescape" + "github.com/k0sproject/rig/exec" + "github.com/k0sproject/rig/log" + ps "github.com/k0sproject/rig/powershell" + + "github.com/mitchellh/go-homedir" +) + +// SSH describes an SSH connection +type SSH struct { + Address string `yaml:"address" validate:"required,hostname|ip"` + User string `yaml:"user" validate:"required" default:"root"` + Port int `yaml:"port" default:"22" validate:"gt=0,lte=65535"` + KeyPath string `yaml:"keyPath" validate:"omitempty"` + HostKey string `yaml:"hostKey,omitempty"` + Bastion *SSH `yaml:"bastion,omitempty"` + + name string + + isWindows bool + knowOs bool + keypathDefault bool + + client *ssh.Client +} + +const DefaultKeypath = "~/.ssh/id_rsa" + +// SetDefaults sets various default values +func (c *SSH) SetDefaults() { + if c.KeyPath == "" { + c.KeyPath = DefaultKeypath + c.keypathDefault = true + } + if k, err := homedir.Expand(c.KeyPath); err == nil { + c.KeyPath = k + } +} + +// Protocol returns the protocol name, "SSH" +func (c *SSH) Protocol() string { + return "SSH" +} + +// IPAddress returns the connection address +func (c *SSH) IPAddress() string { + return c.Address +} + +// String returns the connection's printable name +func (c *SSH) String() string { + if c.name == "" { + c.name = fmt.Sprintf("[ssh] %s", net.JoinHostPort(c.Address, strconv.Itoa(c.Port))) + } + + return c.name +} + +// IsConnected returns true if the client is connected +func (c *SSH) IsConnected() bool { + return c.client != nil +} + +// Disconnect closes the SSH connection +func (c *SSH) Disconnect() { + c.client.Close() +} + +// IsWindows is true when the host is running windows +func (c *SSH) IsWindows() bool { + if !c.knowOs && c.client != nil { + log.Debugf("%s: checking if host is windows", c) + c.isWindows = c.Exec("cmd.exe /c exit 0") == nil + log.Debugf("%s: host is windows: %t", c, c.isWindows) + c.knowOs = true + + } + + return c.isWindows +} + +// create human-readable SSH-key strings +func keyString(k ssh.PublicKey) string { + return k.Type() + " " + base64.StdEncoding.EncodeToString(k.Marshal()) // e.g. "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTY...." +} + +func trustedHostKeyCallback(trustedKey string) ssh.HostKeyCallback { + return func(_ string, _ net.Addr, k ssh.PublicKey) error { + ks := keyString(k) + if trustedKey != ks { + return fmt.Errorf("SSH host key verification failed") + } + + return nil + } +} + +// Connect opens the SSH connection +func (c *SSH) Connect() error { + config := &ssh.ClientConfig{ + User: c.User, + } + + if c.HostKey == "" { + config.HostKeyCallback = ssh.InsecureIgnoreHostKey() + } else { + config.HostKeyCallback = trustedHostKeyCallback(c.HostKey) + } + + var pubkeySigners []ssh.Signer + + _, err := os.Stat(c.KeyPath) + if err != nil && !c.keypathDefault { + return err + } + if err == nil { + var key []byte + key, err = os.ReadFile(c.KeyPath) + if err != nil { + return err + } + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + log.Infof("can't parse keyfile %s: %s", c.KeyPath, err.Error()) + } else { + pubkeySigners = append(pubkeySigners, signer) + } + } + + sshAgentSock := os.Getenv("SSH_AUTH_SOCK") + if sshAgentSock != "" { + sshAgent, err := net.Dial("unix", sshAgentSock) + if err != nil { + log.Errorf("can't connect to SSH agent auth socket %s: %s", sshAgentSock, err) + } else { + signers, err := agent.NewClient(sshAgent).Signers() + if err == nil { + pubkeySigners = append(pubkeySigners, signers...) + } + } + } + + if len(pubkeySigners) > 0 { + config.Auth = append(config.Auth, ssh.PublicKeys(pubkeySigners...)) + } + + dst := net.JoinHostPort(c.Address, strconv.Itoa(c.Port)) + + var client *ssh.Client + + if c.Bastion == nil { + client, err = ssh.Dial("tcp", dst, config) + if err != nil { + return err + } + } else { + if err := c.Bastion.Connect(); err != nil { + return err + } + bconn, err := c.Bastion.client.Dial("tcp", dst) + if err != nil { + return err + } + c, chans, reqs, err := ssh.NewClientConn(bconn, dst, config) + if err != nil { + return err + } + client = ssh.NewClient(c, chans, reqs) + } + + c.client = client + return nil +} + +// Exec executes a command on the host +func (c *SSH) Exec(cmd string, opts ...exec.Option) error { + o := exec.Build(opts...) + session, err := c.client.NewSession() + if err != nil { + return err + } + defer session.Close() + + cmd, err = o.Command(cmd) + if err != nil { + return err + } + + if len(o.Stdin) == 0 && c.knowOs && !c.isWindows { + // Only request a PTY when there's no STDIN data, because + // then you would need to send a CTRL-D after input to signal + // the end of text + modes := ssh.TerminalModes{ssh.ECHO: 0} + err = session.RequestPty("xterm", 80, 40, modes) + if err != nil { + return err + } + } + + o.LogCmd(c.String(), cmd) + + stdin, _ := session.StdinPipe() + stdout, _ := session.StdoutPipe() + stderr, _ := session.StderrPipe() + + if err := session.Start(cmd); err != nil { + return err + } + + if len(o.Stdin) > 0 { + o.LogStdin(c.String()) + if _, err := io.WriteString(stdin, o.Stdin); err != nil { + return err + } + } + stdin.Close() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + if o.Writer == nil { + outputScanner := bufio.NewScanner(stdout) + + for outputScanner.Scan() { + text := outputScanner.Text() + stripped := stripansi.Strip(text) + o.AddOutput(c.String(), stripped+"\n", "") + } + + if err := outputScanner.Err(); err != nil { + o.LogErrorf("%s: %s", c, err.Error()) + } + } else { + if _, err := io.Copy(o.Writer, stdout); err != nil { + o.LogErrorf("%s: failed to stream stdout", c, err.Error()) + } + } + }() + + gotErrors := false + + wg.Add(1) + go func() { + defer wg.Done() + outputScanner := bufio.NewScanner(stderr) + + for outputScanner.Scan() { + gotErrors = true + o.AddOutput(c.String(), "", outputScanner.Text()+"\n") + } + + if err := outputScanner.Err(); err != nil { + gotErrors = true + o.LogErrorf("%s: %s", c, err.Error()) + } + }() + + err = session.Wait() + wg.Wait() + + if err != nil { + return err + } + + if c.knowOs && c.isWindows && (!o.AllowWinStderr && gotErrors) { + return fmt.Errorf("command failed (received output to stderr on windows)") + } + + return nil +} + +// Upload uploads a file from local src path to remote dst +func (c *SSH) Upload(src, dst string, opts ...exec.Option) error { + if c.IsWindows() { + return c.uploadWindows(src, dst, opts...) + } + return c.uploadLinux(src, dst, opts...) +} + +// ExecInteractive executes a command on the host and copies stdin/stdout/stderr from local host +func (c *SSH) ExecInteractive(cmd string) error { + session, err := c.client.NewSession() + if err != nil { + return err + } + defer session.Close() + + session.Stdout = os.Stdout + session.Stderr = os.Stderr + + fd := int(os.Stdin.Fd()) + old, err := term.MakeRaw(fd) + if err != nil { + return err + } + + defer func(fd int, old *term.State) { + _ = term.Restore(fd, old) + }(fd, old) + + rows, cols, err := term.GetSize(fd) + if err != nil { + return err + } + + modes := ssh.TerminalModes{ssh.ECHO: 1} + err = session.RequestPty("xterm", cols, rows, modes) + if err != nil { + return err + } + + stdinpipe, err := session.StdinPipe() + if err != nil { + return err + } + go func() { + _, _ = io.Copy(stdinpipe, os.Stdin) + }() + + captureSignals(stdinpipe, session) + + if cmd == "" { + err = session.Shell() + } else { + err = session.Start(cmd) + } + + if err != nil { + return err + } + + return session.Wait() +} + +func (c *SSH) uploadLinux(src, dst string, opts ...exec.Option) error { + var err error + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + defer func() { + if err != nil { + log.Debugf("%s: cleaning up %s", c, dst) + _ = c.Exec(fmt.Sprintf("rm -f -- %s", shellescape.Quote(dst)), opts...) + } + }() + + session, err := c.client.NewSession() + if err != nil { + return err + } + defer session.Close() + + hostIn, err := session.StdinPipe() + if err != nil { + return err + } + + stderr, err := session.StderrPipe() + if err != nil { + return err + } + + gw, err := gzip.NewWriterLevel(hostIn, gzip.BestSpeed) + if err != nil { + return err + } + + o := exec.Build(opts...) + teeCmd, err := o.Command(fmt.Sprintf("tee -- %s > /dev/null", shellescape.Quote(dst))) + if err != nil { + return err + } + unzipCmd := fmt.Sprintf("gzip -d | %s", teeCmd) + log.Debugf("%s: executing `%s`", c, unzipCmd) + + err = session.Start(unzipCmd) + if err != nil { + return err + } + + if _, err := io.Copy(gw, in); err != nil { + return err + } + gw.Close() + hostIn.Close() + + if err = session.Wait(); err != nil { + msg, readErr := io.ReadAll(stderr) + if readErr != nil { + msg = []byte(readErr.Error()) + } + + return fmt.Errorf("upload failed: %s (%s)", err.Error(), msg) + } + + return nil +} + +func (c *SSH) uploadWindows(src, dst string, opts ...exec.Option) error { + var err error + defer func() { + if err != nil { + log.Debugf("%s: cleaning up %s", c, dst) + _ = c.Exec(fmt.Sprintf(`del %s`, ps.DoubleQuote(dst)), opts...) + } + }() + psCmd := ps.UploadCmd(dst) + stat, err := os.Stat(src) + if err != nil { + return err + } + sha256DigestLocalObj := sha256.New() + sha256DigestLocal := "" + sha256DigestRemote := "" + srcSize := uint64(stat.Size()) + var bytesSent uint64 + var realSent uint64 + var fdClosed bool + fd, err := os.Open(src) + if err != nil { + return err + } + defer func() { + if !fdClosed { + _ = fd.Close() + fdClosed = true + } + }() + session, err := c.client.NewSession() + if err != nil { + return err + } + defer session.Close() + + hostIn, err := session.StdinPipe() + if err != nil { + return err + } + hostOut, err := session.StdoutPipe() + if err != nil { + return err + } + hostErr, err := session.StderrPipe() + if err != nil { + return err + } + + o := exec.Build(opts...) + psRunCmd, err := o.Command("powershell -ExecutionPolicy Unrestricted -EncodedCommand " + psCmd) + if err != nil { + return err + } + log.Debugf("%s: executing the upload command", c) + if err := session.Start(psRunCmd); err != nil { + return err + } + + bufferCapacity := 262143 // use 256kb chunks + base64LineBufferCapacity := bufferCapacity/3*4 + 2 + base64LineBuffer := make([]byte, base64LineBufferCapacity) + base64LineBuffer[base64LineBufferCapacity-2] = '\r' + base64LineBuffer[base64LineBufferCapacity-1] = '\n' + buffer := make([]byte, bufferCapacity) + var bufferLength int + + var ended bool + + for { + var n int + n, err = fd.Read(buffer) + bufferLength += n + if err != nil { + break + } + if bufferLength == bufferCapacity { + base64.StdEncoding.Encode(base64LineBuffer, buffer) + bytesSent += uint64(bufferLength) + _, _ = sha256DigestLocalObj.Write(buffer) + if bytesSent >= srcSize { + ended = true + sha256DigestLocal = hex.EncodeToString(sha256DigestLocalObj.Sum(nil)) + } + b, err := hostIn.Write(base64LineBuffer) + realSent += uint64(b) + if ended { + hostIn.Close() + } + + bufferLength = 0 + if err != nil { + return err + } + } + } + _ = fd.Close() + fdClosed = true + if err == io.EOF { + err = nil + } + if err != nil { + return err + } + if !ended { + _, _ = sha256DigestLocalObj.Write(buffer[:bufferLength]) + sha256DigestLocal = hex.EncodeToString(sha256DigestLocalObj.Sum(nil)) + base64.StdEncoding.Encode(base64LineBuffer, buffer[:bufferLength]) + i := base64.StdEncoding.EncodedLen(bufferLength) + base64LineBuffer[i] = '\r' + base64LineBuffer[i+1] = '\n' + _, err = hostIn.Write(base64LineBuffer[:i+2]) + if err != nil { + if !strings.Contains(err.Error(), ps.PipeHasEnded) && !strings.Contains(err.Error(), ps.PipeIsBeingClosed) { + return err + } + // ignore pipe errors that results from passing true to cmd.SendInput + } + hostIn.Close() + } + var wg sync.WaitGroup + var stderr bytes.Buffer + var stdout bytes.Buffer + + wg.Add(1) + go func() { + defer wg.Done() + _, _ = io.Copy(&stderr, hostErr) + }() + + wg.Add(1) + go func() { + defer wg.Done() + scanner := bufio.NewScanner(hostOut) + for scanner.Scan() { + var output struct { + Sha256 string `json:"sha256"` + } + if json.Unmarshal(scanner.Bytes(), &output) == nil { + sha256DigestRemote = output.Sha256 + } else { + _, _ = stdout.Write(scanner.Bytes()) + _, _ = stdout.WriteString("\n") + } + } + if err := scanner.Err(); err != nil { + stdout.Reset() + } + }() + + if err = session.Wait(); err != nil { + return fmt.Errorf("%s: upload failed: %s", c, err.Error()) + } + + wg.Wait() + + if sha256DigestRemote == "" { + return fmt.Errorf("copy file command did not output the expected JSON to stdout but exited with code 0") + } else if sha256DigestRemote != sha256DigestLocal { + return fmt.Errorf("copy file checksum mismatch (local = %s, remote = %s)", sha256DigestLocal, sha256DigestRemote) + } + + return nil +} diff --git a/vendor/github.com/k0sproject/rig/winrm.go b/vendor/github.com/k0sproject/rig/winrm.go new file mode 100644 index 00000000000..68d2db47b1d --- /dev/null +++ b/vendor/github.com/k0sproject/rig/winrm.go @@ -0,0 +1,440 @@ +package rig + +import ( + "bufio" + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "sync" + "time" + + "github.com/k0sproject/rig/exec" + "github.com/k0sproject/rig/log" + ps "github.com/k0sproject/rig/powershell" + "github.com/mitchellh/go-homedir" + + "github.com/masterzen/winrm" +) + +// WinRM describes a WinRM connection with its configuration options +type WinRM struct { + Address string `yaml:"address" validate:"required,hostname|ip"` + User string `yaml:"user" validate:"omitempty,gt=2" default:"Administrator"` + Port int `yaml:"port" default:"5985" validate:"gt=0,lte=65535"` + Password string `yaml:"password,omitempty"` + UseHTTPS bool `yaml:"useHTTPS" default:"false"` + Insecure bool `yaml:"insecure" default:"false"` + UseNTLM bool `yaml:"useNTLM" default:"false"` + CACertPath string `yaml:"caCertPath,omitempty" validate:"omitempty,file"` + CertPath string `yaml:"certPath,omitempty" validate:"omitempty,file"` + KeyPath string `yaml:"keyPath,omitempty" validate:"omitempty,file"` + TLSServerName string `yaml:"tlsServerName,omitempty" validate:"omitempty,hostname|ip"` + Bastion *SSH `yaml:"bastion,omitempty"` + + name string + + caCert []byte + key []byte + cert []byte + + client *winrm.Client +} + +// SetDefaults sets various default values +func (c *WinRM) SetDefaults() { + if p, err := homedir.Expand(c.CACertPath); err == nil { + c.CACertPath = p + } + + if p, err := homedir.Expand(c.CertPath); err == nil { + c.CertPath = p + } + + if p, err := homedir.Expand(c.KeyPath); err == nil { + c.KeyPath = p + } + + if c.Port == 5985 && c.UseHTTPS { + c.Port = 5986 + } +} + +// Protocol returns the protocol name, "WinRM" +func (c *WinRM) Protocol() string { + return "WinRM" +} + +// IPAddress returns the connection address +func (c *WinRM) IPAddress() string { + return c.Address +} + +// String returns the connection's printable name +func (c *WinRM) String() string { + if c.name == "" { + c.name = fmt.Sprintf("[winrm] %s:%d", c.Address, c.Port) + } + + return c.name +} + +// IsConnected returns true if the client is connected +func (c *WinRM) IsConnected() bool { + return c.client != nil +} + +// IsWindows always returns true on winrm +func (c *WinRM) IsWindows() bool { + return true +} + +func (c *WinRM) loadCertificates() error { + c.caCert = nil + if c.CACertPath != "" { + ca, err := os.ReadFile(c.CACertPath) + if err != nil { + return err + } + c.caCert = ca + } + + c.cert = nil + if c.CertPath != "" { + cert, err := os.ReadFile(c.CertPath) + if err != nil { + return err + } + c.cert = cert + } + + c.key = nil + if c.KeyPath != "" { + key, err := os.ReadFile(c.KeyPath) + if err != nil { + return err + } + c.key = key + } + + return nil +} + +// Connect opens the WinRM connection +func (c *WinRM) Connect() error { + if err := c.loadCertificates(); err != nil { + return fmt.Errorf("%s: failed to load certificates: %s", c, err) + } + + endpoint := &winrm.Endpoint{ + Host: c.Address, + Port: c.Port, + HTTPS: c.UseHTTPS, + Insecure: c.Insecure, + TLSServerName: c.TLSServerName, + Timeout: 60 * time.Second, + } + + if len(c.caCert) > 0 { + endpoint.CACert = c.caCert + } + + if len(c.cert) > 0 { + endpoint.Cert = c.cert + } + + if len(c.key) > 0 { + endpoint.Key = c.key + } + + params := winrm.DefaultParameters + + if c.Bastion != nil { + err := c.Bastion.Connect() + if err != nil { + return err + } + params.Dial = c.Bastion.client.Dial + } + + if c.UseNTLM { + params.TransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } + } + + if c.UseHTTPS && len(c.cert) > 0 { + params.TransportDecorator = func() winrm.Transporter { return &winrm.ClientAuthRequest{} } + } + + client, err := winrm.NewClientWithParameters(endpoint, c.User, c.Password, params) + + if err != nil { + return err + } + + log.Debugf("%s: testing connection", c) + _, err = client.Run("echo ok", io.Discard, io.Discard) + if err != nil { + return err + } + log.Debugf("%s: test passed", c) + + c.client = client + + return nil +} + +// Disconnect closes the WinRM connection +func (c *WinRM) Disconnect() { + c.client = nil +} + +// Exec executes a command on the host +func (c *WinRM) Exec(cmd string, opts ...exec.Option) error { + o := exec.Build(opts...) + shell, err := c.client.CreateShell() + if err != nil { + return err + } + defer shell.Close() + + o.LogCmd(c.String(), cmd) + + command, err := shell.Execute(cmd) + if err != nil { + return err + } + + var wg sync.WaitGroup + + if o.Stdin != "" { + o.LogStdin(c.String()) + wg.Add(1) + go func() { + defer wg.Done() + defer command.Stdin.Close() + _, _ = command.Stdin.Write([]byte(o.Stdin)) + }() + } + + wg.Add(1) + go func() { + defer wg.Done() + if o.Writer == nil { + outputScanner := bufio.NewScanner(command.Stdout) + + for outputScanner.Scan() { + o.AddOutput(c.String(), outputScanner.Text()+"\n", "") + } + + if err := outputScanner.Err(); err != nil { + o.LogErrorf("%s: %s", c, err.Error()) + } + command.Stdout.Close() + } else { + if _, err := io.Copy(o.Writer, command.Stdout); err != nil { + o.LogErrorf("%s: failed to stream stdout", c, err.Error()) + } + } + }() + + gotErrors := false + + wg.Add(1) + go func() { + defer wg.Done() + outputScanner := bufio.NewScanner(command.Stderr) + + for outputScanner.Scan() { + gotErrors = true + o.AddOutput(c.String(), "", outputScanner.Text()+"\n") + } + + if err := outputScanner.Err(); err != nil { + gotErrors = true + o.LogErrorf("%s: %s", c, err.Error()) + } + command.Stderr.Close() + }() + + command.Wait() + + wg.Wait() + + command.Close() + + if command.ExitCode() > 0 || (!o.AllowWinStderr && gotErrors) { + return fmt.Errorf("command failed (received output to stderr on windows)") + } + + return nil +} + +// ExecInteractive executes a command on the host and copies stdin/stdout/stderr from local host +func (c *WinRM) ExecInteractive(cmd string) error { + if cmd == "" { + cmd = "cmd" + } + _, err := c.client.RunWithInput(cmd, os.Stdout, os.Stderr, os.Stdin) + return err +} + +// Upload uploads a file from local src path to remote path dst +func (c *WinRM) Upload(src, dst string, opts ...exec.Option) error { + var err error + defer func() { + if err != nil { + _ = c.Exec(fmt.Sprintf(`del %s`, ps.DoubleQuote(dst)), opts...) + } + }() + psCmd := ps.UploadCmd(dst) + stat, err := os.Stat(src) + if err != nil { + return err + } + sha256DigestLocalObj := sha256.New() + sha256DigestLocal := "" + sha256DigestRemote := "" + srcSize := uint64(stat.Size()) + var bytesSent uint64 + var realSent uint64 + var fdClosed bool + fd, err := os.Open(src) + if err != nil { + return err + } + defer func() { + if !fdClosed { + _ = fd.Close() + fdClosed = true + } + }() + shell, err := c.client.CreateShell() + if err != nil { + return err + } + defer shell.Close() + o := exec.Build(opts...) + upcmd, err := o.Command("powershell -ExecutionPolicy Unrestricted -EncodedCommand " + psCmd) + if err != nil { + return err + } + + cmd, err := shell.Execute(upcmd) + if err != nil { + return err + } + + // Create a dummy request to get its length + dummy := winrm.NewSendInputRequest("dummydummydummy", "dummydummydummy", "dummydummydummy", []byte(""), false, winrm.DefaultParameters) + maxInput := len(dummy.String()) - 100 + bufferCapacity := (winrm.DefaultParameters.EnvelopeSize - maxInput) / 4 * 3 + base64LineBufferCapacity := bufferCapacity/3*4 + 2 + base64LineBuffer := make([]byte, base64LineBufferCapacity) + base64LineBuffer[base64LineBufferCapacity-2] = '\r' + base64LineBuffer[base64LineBufferCapacity-1] = '\n' + buffer := make([]byte, bufferCapacity) + var bufferLength int + + var ended bool + + for { + var n int + n, err = fd.Read(buffer) + bufferLength += n + if err != nil { + break + } + if bufferLength == bufferCapacity { + base64.StdEncoding.Encode(base64LineBuffer, buffer) + bytesSent += uint64(bufferLength) + _, _ = sha256DigestLocalObj.Write(buffer) + if bytesSent >= srcSize { + ended = true + sha256DigestLocal = hex.EncodeToString(sha256DigestLocalObj.Sum(nil)) + } + b, err := cmd.Stdin.Write(base64LineBuffer) + realSent += uint64(b) + if ended { + cmd.Stdin.Close() + } + + bufferLength = 0 + if err != nil { + return err + } + } + } + _ = fd.Close() + fdClosed = true + if err == io.EOF { + err = nil + } + if err != nil { + cmd.Close() + return err + } + if !ended { + _, _ = sha256DigestLocalObj.Write(buffer[:bufferLength]) + sha256DigestLocal = hex.EncodeToString(sha256DigestLocalObj.Sum(nil)) + base64.StdEncoding.Encode(base64LineBuffer, buffer[:bufferLength]) + i := base64.StdEncoding.EncodedLen(bufferLength) + base64LineBuffer[i] = '\r' + base64LineBuffer[i+1] = '\n' + _, err = cmd.Stdin.Write(base64LineBuffer[:i+2]) + if err != nil { + if !strings.Contains(err.Error(), ps.PipeHasEnded) && !strings.Contains(err.Error(), ps.PipeIsBeingClosed) { + cmd.Close() + return err + } + // ignore pipe errors that results from passing true to cmd.SendInput + } + cmd.Stdin.Close() + } + var wg sync.WaitGroup + wg.Add(2) + var stderr bytes.Buffer + var stdout bytes.Buffer + go func() { + defer wg.Done() + _, err = io.Copy(&stderr, cmd.Stderr) + if err != nil { + stderr.Reset() + } + }() + go func() { + defer wg.Done() + scanner := bufio.NewScanner(cmd.Stdout) + for scanner.Scan() { + var output struct { + Sha256 string `json:"sha256"` + } + if json.Unmarshal(scanner.Bytes(), &output) == nil { + sha256DigestRemote = output.Sha256 + } else { + _, _ = stdout.Write(scanner.Bytes()) + _, _ = stdout.WriteString("\n") + } + } + if err := scanner.Err(); err != nil { + stdout.Reset() + } + }() + cmd.Wait() + wg.Wait() + + if cmd.ExitCode() != 0 { + return fmt.Errorf("non-zero exit code: %d during upload", cmd.ExitCode()) + } + if sha256DigestRemote == "" { + return fmt.Errorf("copy file command did not output the expected JSON to stdout but exited with code 0") + } else if sha256DigestRemote != sha256DigestLocal { + return fmt.Errorf("copy file checksum mismatch (local = %s, remote = %s)", sha256DigestLocal, sha256DigestRemote) + } + + return nil +} diff --git a/vendor/github.com/k0sproject/version/LICENSE b/vendor/github.com/k0sproject/version/LICENSE new file mode 100644 index 00000000000..f36581594ee --- /dev/null +++ b/vendor/github.com/k0sproject/version/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021, Mirantis Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/k0sproject/version/README.md b/vendor/github.com/k0sproject/version/README.md new file mode 100644 index 00000000000..dc8ab1aebf3 --- /dev/null +++ b/vendor/github.com/k0sproject/version/README.md @@ -0,0 +1,47 @@ +# version + +A go-language package for managing [k0s](https://github.com/k0sproject/k0s) version numbers. It is based on [hashicorp/go-version](https://github.com/hashicorp/go-version) but adds sorting and comparison capabilities for the k0s version numbering scheme which requires additional sorting by the build tag. + +## Usage + +### Basic comparison + +```go +import ( + "fmt" + "github.com/k0sproject/version" +) + +func main() { + a := version.NewVersion("1.23.3+k0s.1") + b := version.NewVersion("1.23.3+k0s.2") + fmt.Println("a is greater than b: %t", a.GreaterThan(b)) + fmt.Println("a is less than b: %t", a.LessThan(b)) + fmt.Println("a is equal to b: %t", a.Equal(b)) +} +``` + +Outputs: + +```text +a is greater than b: false +a is less than b: true +a is equal to b: false +``` + +### Check online for latest version + +```go +import ( + "fmt" + "github.com/k0sproject/version" +) + +func main() { + latest, err := version.Latest() + if err != nil { + panic(err) + } + fmt.Println("Latest k0s version is: %s", latest) +} +``` diff --git a/vendor/github.com/k0sproject/version/collection.go b/vendor/github.com/k0sproject/version/collection.go new file mode 100644 index 00000000000..fa3000ac1b6 --- /dev/null +++ b/vendor/github.com/k0sproject/version/collection.go @@ -0,0 +1,31 @@ +package version + +import "fmt" + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func NewCollection(versions ...string) (Collection, error) { + c := make(Collection, len(versions)) + for i, v := range versions { + nv, err := NewVersion(v) + if err != nil { + return Collection{}, fmt.Errorf("invalid version '%s': %w", v, err) + } + c[i] = nv + } + return c, nil +} + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].Compare(v[j]) < 0 +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/k0sproject/version/go.mod b/vendor/github.com/k0sproject/version/go.mod new file mode 100644 index 00000000000..d178e9aca02 --- /dev/null +++ b/vendor/github.com/k0sproject/version/go.mod @@ -0,0 +1,14 @@ +module github.com/k0sproject/version + +go 1.17 + +require ( + github.com/hashicorp/go-version v1.4.0 + github.com/stretchr/testify v1.7.0 +) + +require ( + github.com/davecgh/go-spew v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect +) diff --git a/vendor/github.com/k0sproject/version/go.sum b/vendor/github.com/k0sproject/version/go.sum new file mode 100644 index 00000000000..ebae1e4de46 --- /dev/null +++ b/vendor/github.com/k0sproject/version/go.sum @@ -0,0 +1,13 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= +github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/k0sproject/version/latest.go b/vendor/github.com/k0sproject/version/latest.go new file mode 100644 index 00000000000..74914cc0586 --- /dev/null +++ b/vendor/github.com/k0sproject/version/latest.go @@ -0,0 +1,73 @@ +package version + +import ( + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +var Timeout = time.Second * 10 + +// LatestByPrerelease returns the latest released k0s version, if preok is true, prereleases are also accepted. +func LatestByPrerelease(allowpre bool) (*Version, error) { + u := &url.URL{ + Scheme: "https", + Host: "docs.k0sproject.io", + } + + if allowpre { + u.Path = "latest.txt" + } else { + u.Path = "stable.txt" + } + + v, err := httpGet(u.String()) + if err != nil { + return nil, err + } + + return NewVersion(v) +} + +// LatestStable returns the semantically sorted latest non-prerelease version from the online repository +func LatestStable() (*Version, error) { + return LatestByPrerelease(false) +} + +// LatestVersion returns the semantically sorted latest version even if it is a prerelease from the online repository +func Latest() (*Version, error) { + return LatestByPrerelease(true) +} + +func httpGet(u string) (string, error) { + client := &http.Client{ + Timeout: Timeout, + } + + resp, err := client.Get(u) + if err != nil { + return "", fmt.Errorf("http request to %s failed: %w", u, err) + } + + if resp.Body == nil { + return "", fmt.Errorf("http request to %s failed: nil body", u) + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("http request to %s failed: backend returned %d", u, resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("http request to %s failed: %w when reading body", u, err) + } + + if err := resp.Body.Close(); err != nil { + return "", fmt.Errorf("http request to %s failed: %w when closing body", u, err) + } + + return strings.TrimSpace(string(body)), nil +} diff --git a/vendor/github.com/k0sproject/version/version.go b/vendor/github.com/k0sproject/version/version.go new file mode 100644 index 00000000000..625fb4fbe8c --- /dev/null +++ b/vendor/github.com/k0sproject/version/version.go @@ -0,0 +1,121 @@ +package version + +import ( + "fmt" + "path/filepath" + "sort" + "strings" + + goversion "github.com/hashicorp/go-version" +) + +var BaseUrl = "https://github.com/k0sproject/k0s/" + +// Version is a k0s version +type Version struct { + goversion.Version +} + +func pair(a, b *Version) Collection { + return Collection{a, b} +} + +// String returns a v-prefixed string representation of the k0s version +func (v *Version) String() string { + return fmt.Sprintf("v%s", v.Version.String()) +} + +func (v *Version) urlString() string { + return strings.ReplaceAll(v.String(), "+", "%2B") +} + +// URL returns an URL to the release information page for the k0s version +func (v *Version) URL() string { + return BaseUrl + filepath.Join("releases", "tag", v.urlString()) +} + +func (v *Version) assetBaseURL() string { + return BaseUrl + filepath.Join("releases", "download", v.urlString()) + "/" +} + +// DownloadURL returns the k0s binary download URL for the k0s version +func (v *Version) DownloadURL(os, arch string) string { + var ext string + if strings.HasPrefix(strings.ToLower(os), "win") { + ext = ".exe" + } + return v.assetBaseURL() + fmt.Sprintf("k0s-%s-%s%s", v.String(), arch, ext) +} + +// AirgapDownloadURL returns the k0s airgap bundle download URL for the k0s version +func (v *Version) AirgapDownloadURL(arch string) string { + return v.assetBaseURL() + fmt.Sprintf("k0s-airgap-bundle-%s-%s", v.String(), arch) +} + +// DocsURL returns the documentation URL for the k0s version +func (v *Version) DocsURL() string { + return fmt.Sprintf("https://docs.k0sproject.io/%s/", v.String()) +} + +// Equal returns true if the version is equal to the supplied version +func (v *Version) Equal(b *Version) bool { + return v.String() == b.String() +} + +// GreaterThan returns true if the version is greater than the supplied version +func (v *Version) GreaterThan(b *Version) bool { + if v.String() == b.String() { + return false + } + p := pair(v, b) + sort.Sort(p) + return v.String() == p[1].String() +} + +// LessThan returns true if the version is lower than the supplied version +func (v *Version) LessThan(b *Version) bool { + if v.String() == b.String() { + return false + } + return !v.GreaterThan(b) +} + +// GreaterThanOrEqual returns true if the version is greater than the supplied version or equal +func (v *Version) GreaterThanOrEqual(b *Version) bool { + return v.Equal(b) || v.GreaterThan(b) +} + +// LessThanOrEqual returns true if the version is lower than the supplied version or equal +func (v *Version) LessThanOrEqual(b *Version) bool { + return v.Equal(b) || v.LessThan(b) +} + +// Compare compares two versions and returns one of the integers: -1, 0 or 1 (less than, equal, greater than) +func (v *Version) Compare(b *Version) int { + c := v.Version.Compare(&b.Version) + if c != 0 { + return c + } + + vA := v.String() + + // go to plain string comparison + s := []string{vA, b.String()} + sort.Strings(s) + + if vA == s[0] { + return -1 + } + + return 1 +} + +// NewVersion returns a new Version created from the supplied string or an error if the string is not a valid version number +func NewVersion(v string) (*Version, error) { + n, err := goversion.NewVersion(strings.TrimPrefix(v, "v")) + if err != nil { + return nil, err + } + + return &Version{Version: *n}, nil +} diff --git a/vendor/github.com/kballard/go-shellquote/LICENSE b/vendor/github.com/kballard/go-shellquote/LICENSE new file mode 100644 index 00000000000..a6d77312e10 --- /dev/null +++ b/vendor/github.com/kballard/go-shellquote/LICENSE @@ -0,0 +1,19 @@ +Copyright (C) 2014 Kevin Ballard + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kballard/go-shellquote/README b/vendor/github.com/kballard/go-shellquote/README new file mode 100644 index 00000000000..4d34e87afc3 --- /dev/null +++ b/vendor/github.com/kballard/go-shellquote/README @@ -0,0 +1,36 @@ +PACKAGE + +package shellquote + import "github.com/kballard/go-shellquote" + + Shellquote provides utilities for joining/splitting strings using sh's + word-splitting rules. + +VARIABLES + +var ( + UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") + UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") + UnterminatedEscapeError = errors.New("Unterminated backslash-escape") +) + + +FUNCTIONS + +func Join(args ...string) string + Join quotes each argument and joins them with a space. If passed to + /bin/sh, the resulting string will be split back into the original + arguments. + +func Split(input string) (words []string, err error) + Split splits a string according to /bin/sh's word-splitting rules. It + supports backslash-escapes, single-quotes, and double-quotes. Notably it + does not support the $'' style of quoting. It also doesn't attempt to + perform any other sort of expansion, including brace expansion, shell + expansion, or pathname expansion. + + If the given input has an unterminated quoted string or ends in a + backslash-escape, one of UnterminatedSingleQuoteError, + UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. + + diff --git a/vendor/github.com/kballard/go-shellquote/doc.go b/vendor/github.com/kballard/go-shellquote/doc.go new file mode 100644 index 00000000000..9445fa4ad99 --- /dev/null +++ b/vendor/github.com/kballard/go-shellquote/doc.go @@ -0,0 +1,3 @@ +// Shellquote provides utilities for joining/splitting strings using sh's +// word-splitting rules. +package shellquote diff --git a/vendor/github.com/kballard/go-shellquote/quote.go b/vendor/github.com/kballard/go-shellquote/quote.go new file mode 100644 index 00000000000..72a8cb38b54 --- /dev/null +++ b/vendor/github.com/kballard/go-shellquote/quote.go @@ -0,0 +1,102 @@ +package shellquote + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// Join quotes each argument and joins them with a space. +// If passed to /bin/sh, the resulting string will be split back into the +// original arguments. +func Join(args ...string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} + +const ( + specialChars = "\\'\"`${[|&;<>()*?!" + extraSpecialChars = " \t\n" + prefixChars = "~" +) + +func quote(word string, buf *bytes.Buffer) { + // We want to try to produce a "nice" output. As such, we will + // backslash-escape most characters, but if we encounter a space, or if we + // encounter an extra-special char (which doesn't work with + // backslash-escaping) we switch over to quoting the whole word. We do this + // with a space because it's typically easier for people to read multi-word + // arguments when quoted with a space rather than with ugly backslashes + // everywhere. + origLen := buf.Len() + + if len(word) == 0 { + // oops, no content + buf.WriteString("''") + return + } + + cur, prev := word, word + atStart := true + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if strings.ContainsRune(specialChars, c) || (atStart && strings.ContainsRune(prefixChars, c)) { + // copy the non-special chars up to this point + if len(cur) < len(prev) { + buf.WriteString(prev[0 : len(prev)-len(cur)-l]) + } + buf.WriteByte('\\') + buf.WriteRune(c) + prev = cur + } else if strings.ContainsRune(extraSpecialChars, c) { + // start over in quote mode + buf.Truncate(origLen) + goto quote + } + atStart = false + } + if len(prev) > 0 { + buf.WriteString(prev) + } + return + +quote: + // quote mode + // Use single-quotes, but if we find a single-quote in the word, we need + // to terminate the string, emit an escaped quote, and start the string up + // again + inQuote := false + for len(word) > 0 { + i := strings.IndexRune(word, '\'') + if i == -1 { + break + } + if i > 0 { + if !inQuote { + buf.WriteByte('\'') + inQuote = true + } + buf.WriteString(word[0:i]) + } + word = word[i+1:] + if inQuote { + buf.WriteByte('\'') + inQuote = false + } + buf.WriteString("\\'") + } + if len(word) > 0 { + if !inQuote { + buf.WriteByte('\'') + } + buf.WriteString(word) + buf.WriteByte('\'') + } +} diff --git a/vendor/github.com/kballard/go-shellquote/unquote.go b/vendor/github.com/kballard/go-shellquote/unquote.go new file mode 100644 index 00000000000..b1b13da9328 --- /dev/null +++ b/vendor/github.com/kballard/go-shellquote/unquote.go @@ -0,0 +1,156 @@ +package shellquote + +import ( + "bytes" + "errors" + "strings" + "unicode/utf8" +) + +var ( + UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") + UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") + UnterminatedEscapeError = errors.New("Unterminated backslash-escape") +) + +var ( + splitChars = " \n\t" + singleChar = '\'' + doubleChar = '"' + escapeChar = '\\' + doubleEscapeChars = "$`\"\n\\" +) + +// Split splits a string according to /bin/sh's word-splitting rules. It +// supports backslash-escapes, single-quotes, and double-quotes. Notably it does +// not support the $'' style of quoting. It also doesn't attempt to perform any +// other sort of expansion, including brace expansion, shell expansion, or +// pathname expansion. +// +// If the given input has an unterminated quoted string or ends in a +// backslash-escape, one of UnterminatedSingleQuoteError, +// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. +func Split(input string) (words []string, err error) { + var buf bytes.Buffer + words = make([]string, 0) + + for len(input) > 0 { + // skip any splitChars at the start + c, l := utf8.DecodeRuneInString(input) + if strings.ContainsRune(splitChars, c) { + input = input[l:] + continue + } else if c == escapeChar { + // Look ahead for escaped newline so we can skip over it + next := input[l:] + if len(next) == 0 { + err = UnterminatedEscapeError + return + } + c2, l2 := utf8.DecodeRuneInString(next) + if c2 == '\n' { + input = next[l2:] + continue + } + } + + var word string + word, input, err = splitWord(input, &buf) + if err != nil { + return + } + words = append(words, word) + } + return +} + +func splitWord(input string, buf *bytes.Buffer) (word string, remainder string, err error) { + buf.Reset() + +raw: + { + cur := input + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if c == singleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto single + } else if c == doubleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto double + } else if c == escapeChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto escape + } else if strings.ContainsRune(splitChars, c) { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + return buf.String(), cur, nil + } + } + if len(input) > 0 { + buf.WriteString(input) + input = "" + } + goto done + } + +escape: + { + if len(input) == 0 { + return "", "", UnterminatedEscapeError + } + c, l := utf8.DecodeRuneInString(input) + if c == '\n' { + // a backslash-escaped newline is elided from the output entirely + } else { + buf.WriteString(input[:l]) + } + input = input[l:] + } + goto raw + +single: + { + i := strings.IndexRune(input, singleChar) + if i == -1 { + return "", "", UnterminatedSingleQuoteError + } + buf.WriteString(input[0:i]) + input = input[i+1:] + goto raw + } + +double: + { + cur := input + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if c == doubleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto raw + } else if c == escapeChar { + // bash only supports certain escapes in double-quoted strings + c2, l2 := utf8.DecodeRuneInString(cur) + cur = cur[l2:] + if strings.ContainsRune(doubleEscapeChars, c2) { + buf.WriteString(input[0 : len(input)-len(cur)-l-l2]) + if c2 == '\n' { + // newline is special, skip the backslash entirely + } else { + buf.WriteRune(c2) + } + input = cur + } + } + } + return "", "", UnterminatedDoubleQuoteError + } + +done: + return buf.String(), input, nil +} diff --git a/vendor/github.com/leodido/go-urn/.gitignore b/vendor/github.com/leodido/go-urn/.gitignore new file mode 100644 index 00000000000..5bcf4bade0b --- /dev/null +++ b/vendor/github.com/leodido/go-urn/.gitignore @@ -0,0 +1,11 @@ +*.exe +*.dll +*.so +*.dylib + +*.test + +*.out +*.txt + +vendor/ \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/.travis.yml b/vendor/github.com/leodido/go-urn/.travis.yml new file mode 100644 index 00000000000..21f348d65a3 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.13.x + - 1.14.x + - 1.15.x + - tip + +before_install: + - go get -t -v ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/LICENSE b/vendor/github.com/leodido/go-urn/LICENSE new file mode 100644 index 00000000000..8c3504a5a9b --- /dev/null +++ b/vendor/github.com/leodido/go-urn/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Leonardo Di Donato + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/leodido/go-urn/README.md b/vendor/github.com/leodido/go-urn/README.md new file mode 100644 index 00000000000..cc902ec0e37 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/README.md @@ -0,0 +1,55 @@ +[![Build](https://img.shields.io/travis/leodido/go-urn/master.svg?style=for-the-badge)](https://travis-ci.org/leodido/go-urn) [![Coverage](https://img.shields.io/codecov/c/github/leodido/go-urn.svg?style=for-the-badge)](https://codecov.io/gh/leodido/go-urn) [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](https://godoc.org/github.com/leodido/go-urn) + +**A parser for URNs**. + +> As seen on [RFC 2141](https://tools.ietf.org/html/rfc2141#ref-1). + +[API documentation](https://godoc.org/github.com/leodido/go-urn). + +## Installation + +``` +go get github.com/leodido/go-urn +``` + +## Performances + +This implementation results to be really fast. + +Usually below ½ microsecond on my machine[1](#mymachine). + +Notice it also performs, while parsing: + +1. fine-grained and informative erroring +2. specific-string normalization + +``` +ok/00/urn:a:b______________________________________/-4 20000000 265 ns/op 182 B/op 6 allocs/op +ok/01/URN:foo:a123,456_____________________________/-4 30000000 296 ns/op 200 B/op 6 allocs/op +ok/02/urn:foo:a123%2c456___________________________/-4 20000000 331 ns/op 208 B/op 6 allocs/op +ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-4 20000000 430 ns/op 280 B/op 6 allocs/op +ok/04/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 411 ns/op 312 B/op 6 allocs/op +ok/05/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 472 ns/op 344 B/op 6 allocs/op +ok/06/urn:burnout:nss______________________________/-4 30000000 257 ns/op 192 B/op 6 allocs/op +ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-4 20000000 375 ns/op 213 B/op 6 allocs/op +ok/08/urn:urnurnurn:urn____________________________/-4 30000000 265 ns/op 197 B/op 6 allocs/op +ok/09/urn:ciao:@!=%2c(xyz)+a,b.*@g=$_'_____________/-4 20000000 307 ns/op 248 B/op 6 allocs/op +ok/10/URN:x:abc%1dz%2f%3az_________________________/-4 30000000 259 ns/op 212 B/op 6 allocs/op +no/11/URN:-xxx:x___________________________________/-4 20000000 445 ns/op 320 B/op 6 allocs/op +no/12/urn::colon:nss_______________________________/-4 20000000 461 ns/op 320 B/op 6 allocs/op +no/13/urn:abcdefghilmnopqrstuvzabcdefghilmn:specifi/-4 10000000 660 ns/op 320 B/op 6 allocs/op +no/14/URN:a!?:x____________________________________/-4 20000000 507 ns/op 320 B/op 6 allocs/op +no/15/urn:urn:NSS__________________________________/-4 20000000 429 ns/op 288 B/op 6 allocs/op +no/16/urn:white_space:NSS__________________________/-4 20000000 482 ns/op 320 B/op 6 allocs/op +no/17/urn:concat:no_spaces_________________________/-4 20000000 539 ns/op 328 B/op 7 allocs/op +no/18/urn:a:/______________________________________/-4 20000000 470 ns/op 320 B/op 7 allocs/op +no/19/urn:UrN:NSS__________________________________/-4 20000000 399 ns/op 288 B/op 6 allocs/op +``` + +--- + +* [1]: Intel Core i7-7600U CPU @ 2.80GHz + +--- + +[![Analytics](https://ga-beacon.appspot.com/UA-49657176-1/go-urn?flat)](https://github.com/igrigorik/ga-beacon) \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/go.mod b/vendor/github.com/leodido/go-urn/go.mod new file mode 100644 index 00000000000..98cf196df30 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/go.mod @@ -0,0 +1,5 @@ +module github.com/leodido/go-urn + +go 1.13 + +require github.com/stretchr/testify v1.6.1 diff --git a/vendor/github.com/leodido/go-urn/go.sum b/vendor/github.com/leodido/go-urn/go.sum new file mode 100644 index 00000000000..afe7890c9a1 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/leodido/go-urn/machine.go b/vendor/github.com/leodido/go-urn/machine.go new file mode 100644 index 00000000000..fe5a0cc8611 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/machine.go @@ -0,0 +1,1691 @@ +package urn + +import ( + "fmt" +) + +var ( + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]" + errParse = "parsing error [col %d]" +) + +const start int = 1 +const firstFinal int = 44 + +const enFail int = 46 +const enMain int = 1 + +// Machine is the interface representing the FSM +type Machine interface { + Error() error + Parse(input []byte) (*URN, error) +} + +type machine struct { + data []byte + cs int + p, pe, eof, pb int + err error + tolower []int +} + +// NewMachine creates a new FSM able to parse RFC 2141 strings. +func NewMachine() Machine { + m := &machine{} + + return m +} + +// Err returns the error that occurred on the last call to Parse. +// +// If the result is nil, then the line was parsed successfully. +func (m *machine) Error() error { + return m.err +} + +func (m *machine) text() []byte { + return m.data[m.pb:m.p] +} + +// Parse parses the input byte array as a RFC 2141 string. +func (m *machine) Parse(input []byte) (*URN, error) { + m.data = input + m.p = 0 + m.pb = 0 + m.pe = len(input) + m.eof = len(input) + m.err = nil + m.tolower = []int{} + output := &URN{} + + { + m.cs = start + } + + { + if (m.p) == (m.pe) { + goto _testEof + } + switch m.cs { + case 1: + goto stCase1 + case 0: + goto stCase0 + case 2: + goto stCase2 + case 3: + goto stCase3 + case 4: + goto stCase4 + case 5: + goto stCase5 + case 6: + goto stCase6 + case 7: + goto stCase7 + case 8: + goto stCase8 + case 9: + goto stCase9 + case 10: + goto stCase10 + case 11: + goto stCase11 + case 12: + goto stCase12 + case 13: + goto stCase13 + case 14: + goto stCase14 + case 15: + goto stCase15 + case 16: + goto stCase16 + case 17: + goto stCase17 + case 18: + goto stCase18 + case 19: + goto stCase19 + case 20: + goto stCase20 + case 21: + goto stCase21 + case 22: + goto stCase22 + case 23: + goto stCase23 + case 24: + goto stCase24 + case 25: + goto stCase25 + case 26: + goto stCase26 + case 27: + goto stCase27 + case 28: + goto stCase28 + case 29: + goto stCase29 + case 30: + goto stCase30 + case 31: + goto stCase31 + case 32: + goto stCase32 + case 33: + goto stCase33 + case 34: + goto stCase34 + case 35: + goto stCase35 + case 36: + goto stCase36 + case 37: + goto stCase37 + case 38: + goto stCase38 + case 44: + goto stCase44 + case 39: + goto stCase39 + case 40: + goto stCase40 + case 45: + goto stCase45 + case 41: + goto stCase41 + case 42: + goto stCase42 + case 43: + goto stCase43 + case 46: + goto stCase46 + } + goto stOut + stCase1: + switch (m.data)[(m.p)] { + case 85: + goto tr1 + case 117: + goto tr1 + } + goto tr0 + tr0: + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr3: + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr6: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr41: + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr44: + + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr50: + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr52: + + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + stCase0: + st0: + m.cs = 0 + goto _out + tr1: + + m.pb = m.p + + goto st2 + st2: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof2 + } + stCase2: + switch (m.data)[(m.p)] { + case 82: + goto st3 + case 114: + goto st3 + } + goto tr0 + st3: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof3 + } + stCase3: + switch (m.data)[(m.p)] { + case 78: + goto st4 + case 110: + goto st4 + } + goto tr3 + st4: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof4 + } + stCase4: + if (m.data)[(m.p)] == 58 { + goto tr5 + } + goto tr0 + tr5: + + output.prefix = string(m.text()) + + goto st5 + st5: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof5 + } + stCase5: + switch (m.data)[(m.p)] { + case 85: + goto tr8 + case 117: + goto tr8 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr7 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr7 + } + default: + goto tr7 + } + goto tr6 + tr7: + + m.pb = m.p + + goto st6 + st6: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof6 + } + stCase6: + switch (m.data)[(m.p)] { + case 45: + goto st7 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st7 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st7 + } + default: + goto st7 + } + goto tr6 + st7: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof7 + } + stCase7: + switch (m.data)[(m.p)] { + case 45: + goto st8 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st8 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st8 + } + default: + goto st8 + } + goto tr6 + st8: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof8 + } + stCase8: + switch (m.data)[(m.p)] { + case 45: + goto st9 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st9 + } + default: + goto st9 + } + goto tr6 + st9: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof9 + } + stCase9: + switch (m.data)[(m.p)] { + case 45: + goto st10 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st10 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st10 + } + default: + goto st10 + } + goto tr6 + st10: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof10 + } + stCase10: + switch (m.data)[(m.p)] { + case 45: + goto st11 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st11 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st11 + } + default: + goto st11 + } + goto tr6 + st11: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof11 + } + stCase11: + switch (m.data)[(m.p)] { + case 45: + goto st12 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st12 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st12 + } + default: + goto st12 + } + goto tr6 + st12: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof12 + } + stCase12: + switch (m.data)[(m.p)] { + case 45: + goto st13 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st13 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st13 + } + default: + goto st13 + } + goto tr6 + st13: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof13 + } + stCase13: + switch (m.data)[(m.p)] { + case 45: + goto st14 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st14 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st14 + } + default: + goto st14 + } + goto tr6 + st14: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof14 + } + stCase14: + switch (m.data)[(m.p)] { + case 45: + goto st15 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st15 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st15 + } + default: + goto st15 + } + goto tr6 + st15: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof15 + } + stCase15: + switch (m.data)[(m.p)] { + case 45: + goto st16 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st16 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st16 + } + default: + goto st16 + } + goto tr6 + st16: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof16 + } + stCase16: + switch (m.data)[(m.p)] { + case 45: + goto st17 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st17 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st17 + } + default: + goto st17 + } + goto tr6 + st17: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof17 + } + stCase17: + switch (m.data)[(m.p)] { + case 45: + goto st18 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st18 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st18 + } + default: + goto st18 + } + goto tr6 + st18: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof18 + } + stCase18: + switch (m.data)[(m.p)] { + case 45: + goto st19 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st19 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st19 + } + default: + goto st19 + } + goto tr6 + st19: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof19 + } + stCase19: + switch (m.data)[(m.p)] { + case 45: + goto st20 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st20 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st20 + } + default: + goto st20 + } + goto tr6 + st20: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof20 + } + stCase20: + switch (m.data)[(m.p)] { + case 45: + goto st21 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st21 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st21 + } + default: + goto st21 + } + goto tr6 + st21: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof21 + } + stCase21: + switch (m.data)[(m.p)] { + case 45: + goto st22 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st22 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st22 + } + default: + goto st22 + } + goto tr6 + st22: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof22 + } + stCase22: + switch (m.data)[(m.p)] { + case 45: + goto st23 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st23 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st23 + } + default: + goto st23 + } + goto tr6 + st23: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof23 + } + stCase23: + switch (m.data)[(m.p)] { + case 45: + goto st24 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st24 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st24 + } + default: + goto st24 + } + goto tr6 + st24: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof24 + } + stCase24: + switch (m.data)[(m.p)] { + case 45: + goto st25 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st25 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st25 + } + default: + goto st25 + } + goto tr6 + st25: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof25 + } + stCase25: + switch (m.data)[(m.p)] { + case 45: + goto st26 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st26 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st26 + } + default: + goto st26 + } + goto tr6 + st26: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof26 + } + stCase26: + switch (m.data)[(m.p)] { + case 45: + goto st27 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st27 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st27 + } + default: + goto st27 + } + goto tr6 + st27: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof27 + } + stCase27: + switch (m.data)[(m.p)] { + case 45: + goto st28 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st28 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st28 + } + default: + goto st28 + } + goto tr6 + st28: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof28 + } + stCase28: + switch (m.data)[(m.p)] { + case 45: + goto st29 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st29 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st29 + } + default: + goto st29 + } + goto tr6 + st29: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof29 + } + stCase29: + switch (m.data)[(m.p)] { + case 45: + goto st30 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st30 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st30 + } + default: + goto st30 + } + goto tr6 + st30: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof30 + } + stCase30: + switch (m.data)[(m.p)] { + case 45: + goto st31 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st31 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st31 + } + default: + goto st31 + } + goto tr6 + st31: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof31 + } + stCase31: + switch (m.data)[(m.p)] { + case 45: + goto st32 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st32 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st32 + } + default: + goto st32 + } + goto tr6 + st32: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof32 + } + stCase32: + switch (m.data)[(m.p)] { + case 45: + goto st33 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st33 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st33 + } + default: + goto st33 + } + goto tr6 + st33: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof33 + } + stCase33: + switch (m.data)[(m.p)] { + case 45: + goto st34 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st34 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st34 + } + default: + goto st34 + } + goto tr6 + st34: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof34 + } + stCase34: + switch (m.data)[(m.p)] { + case 45: + goto st35 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st35 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st35 + } + default: + goto st35 + } + goto tr6 + st35: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof35 + } + stCase35: + switch (m.data)[(m.p)] { + case 45: + goto st36 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st36 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st36 + } + default: + goto st36 + } + goto tr6 + st36: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof36 + } + stCase36: + switch (m.data)[(m.p)] { + case 45: + goto st37 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st37 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st37 + } + default: + goto st37 + } + goto tr6 + st37: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof37 + } + stCase37: + if (m.data)[(m.p)] == 58 { + goto tr10 + } + goto tr6 + tr10: + + output.ID = string(m.text()) + + goto st38 + st38: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof38 + } + stCase38: + switch (m.data)[(m.p)] { + case 33: + goto tr42 + case 36: + goto tr42 + case 37: + goto tr43 + case 61: + goto tr42 + case 95: + goto tr42 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr42 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr42 + } + case (m.data)[(m.p)] >= 64: + goto tr42 + } + default: + goto tr42 + } + goto tr41 + tr42: + + m.pb = m.p + + goto st44 + st44: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof44 + } + stCase44: + switch (m.data)[(m.p)] { + case 33: + goto st44 + case 36: + goto st44 + case 37: + goto st39 + case 61: + goto st44 + case 95: + goto st44 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto st44 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st44 + } + case (m.data)[(m.p)] >= 64: + goto st44 + } + default: + goto st44 + } + goto tr41 + tr43: + + m.pb = m.p + + goto st39 + st39: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof39 + } + stCase39: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st40 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st40 + } + default: + goto tr46 + } + goto tr44 + tr46: + + m.tolower = append(m.tolower, m.p-m.pb) + + goto st40 + st40: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof40 + } + stCase40: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st45 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st45 + } + default: + goto tr48 + } + goto tr44 + tr48: + + m.tolower = append(m.tolower, m.p-m.pb) + + goto st45 + st45: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof45 + } + stCase45: + switch (m.data)[(m.p)] { + case 33: + goto st44 + case 36: + goto st44 + case 37: + goto st39 + case 61: + goto st44 + case 95: + goto st44 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto st44 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st44 + } + case (m.data)[(m.p)] >= 64: + goto st44 + } + default: + goto st44 + } + goto tr44 + tr8: + + m.pb = m.p + + goto st41 + st41: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof41 + } + stCase41: + switch (m.data)[(m.p)] { + case 45: + goto st7 + case 58: + goto tr10 + case 82: + goto st42 + case 114: + goto st42 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st7 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st7 + } + default: + goto st7 + } + goto tr6 + st42: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof42 + } + stCase42: + switch (m.data)[(m.p)] { + case 45: + goto st8 + case 58: + goto tr10 + case 78: + goto st43 + case 110: + goto st43 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st8 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st8 + } + default: + goto st8 + } + goto tr50 + st43: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof43 + } + stCase43: + if (m.data)[(m.p)] == 45 { + goto st9 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st9 + } + default: + goto st9 + } + goto tr52 + st46: + if (m.p)++; (m.p) == (m.pe) { + goto _testEof46 + } + stCase46: + switch (m.data)[(m.p)] { + case 10: + goto st0 + case 13: + goto st0 + } + goto st46 + stOut: + _testEof2: + m.cs = 2 + goto _testEof + _testEof3: + m.cs = 3 + goto _testEof + _testEof4: + m.cs = 4 + goto _testEof + _testEof5: + m.cs = 5 + goto _testEof + _testEof6: + m.cs = 6 + goto _testEof + _testEof7: + m.cs = 7 + goto _testEof + _testEof8: + m.cs = 8 + goto _testEof + _testEof9: + m.cs = 9 + goto _testEof + _testEof10: + m.cs = 10 + goto _testEof + _testEof11: + m.cs = 11 + goto _testEof + _testEof12: + m.cs = 12 + goto _testEof + _testEof13: + m.cs = 13 + goto _testEof + _testEof14: + m.cs = 14 + goto _testEof + _testEof15: + m.cs = 15 + goto _testEof + _testEof16: + m.cs = 16 + goto _testEof + _testEof17: + m.cs = 17 + goto _testEof + _testEof18: + m.cs = 18 + goto _testEof + _testEof19: + m.cs = 19 + goto _testEof + _testEof20: + m.cs = 20 + goto _testEof + _testEof21: + m.cs = 21 + goto _testEof + _testEof22: + m.cs = 22 + goto _testEof + _testEof23: + m.cs = 23 + goto _testEof + _testEof24: + m.cs = 24 + goto _testEof + _testEof25: + m.cs = 25 + goto _testEof + _testEof26: + m.cs = 26 + goto _testEof + _testEof27: + m.cs = 27 + goto _testEof + _testEof28: + m.cs = 28 + goto _testEof + _testEof29: + m.cs = 29 + goto _testEof + _testEof30: + m.cs = 30 + goto _testEof + _testEof31: + m.cs = 31 + goto _testEof + _testEof32: + m.cs = 32 + goto _testEof + _testEof33: + m.cs = 33 + goto _testEof + _testEof34: + m.cs = 34 + goto _testEof + _testEof35: + m.cs = 35 + goto _testEof + _testEof36: + m.cs = 36 + goto _testEof + _testEof37: + m.cs = 37 + goto _testEof + _testEof38: + m.cs = 38 + goto _testEof + _testEof44: + m.cs = 44 + goto _testEof + _testEof39: + m.cs = 39 + goto _testEof + _testEof40: + m.cs = 40 + goto _testEof + _testEof45: + m.cs = 45 + goto _testEof + _testEof41: + m.cs = 41 + goto _testEof + _testEof42: + m.cs = 42 + goto _testEof + _testEof43: + m.cs = 43 + goto _testEof + _testEof46: + m.cs = 46 + goto _testEof + + _testEof: + { + } + if (m.p) == (m.eof) { + switch m.cs { + case 44, 45: + + raw := m.text() + output.SS = string(raw) + // Iterate upper letters lowering them + for _, i := range m.tolower { + raw[i] = raw[i] + 32 + } + output.norm = string(raw) + + case 1, 2, 4: + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 3: + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 41: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 38: + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 42: + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 43: + + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 39, 40: + + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + } + } + + _out: + { + } + } + + if m.cs < firstFinal || m.cs == enFail { + return nil, m.err + } + + return output, nil +} diff --git a/vendor/github.com/leodido/go-urn/machine.go.rl b/vendor/github.com/leodido/go-urn/machine.go.rl new file mode 100644 index 00000000000..3bc05a651a5 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/machine.go.rl @@ -0,0 +1,159 @@ +package urn + +import ( + "fmt" +) + +var ( + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]" + errParse = "parsing error [col %d]" +) + +%%{ +machine urn; + +# unsigned alphabet +alphtype uint8; + +action mark { + m.pb = m.p +} + +action tolower { + m.tolower = append(m.tolower, m.p - m.pb) +} + +action set_pre { + output.prefix = string(m.text()) +} + +action set_nid { + output.ID = string(m.text()) +} + +action set_nss { + raw := m.text() + output.SS = string(raw) + // Iterate upper letters lowering them + for _, i := range m.tolower { + raw[i] = raw[i] + 32 + } + output.norm = string(raw) +} + +action err_pre { + m.err = fmt.Errorf(errPrefix, m.p) + fhold; + fgoto fail; +} + +action err_nid { + m.err = fmt.Errorf(errIdentifier, m.p) + fhold; + fgoto fail; +} + +action err_nss { + m.err = fmt.Errorf(errSpecificString, m.p) + fhold; + fgoto fail; +} + +action err_urn { + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + fhold; + fgoto fail; +} + +action err_hex { + m.err = fmt.Errorf(errHex, m.p) + fhold; + fgoto fail; +} + +action err_parse { + m.err = fmt.Errorf(errParse, m.p) + fhold; + fgoto fail; +} + +pre = ([uU][rR][nN] @err(err_pre)) >mark %set_pre; + +nid = (alnum >mark (alnum | '-'){0,31}) %set_nid; + +hex = '%' (digit | lower | upper >tolower){2} $err(err_hex); + +sss = (alnum | [()+,\-.:=@;$_!*']); + +nss = (sss | hex)+ $err(err_nss); + +fail := (any - [\n\r])* @err{ fgoto main; }; + +main := (pre ':' (nid - pre %err(err_urn)) $err(err_nid) ':' nss >mark %set_nss) $err(err_parse); + +}%% + +%% write data noerror noprefix; + +// Machine is the interface representing the FSM +type Machine interface { + Error() error + Parse(input []byte) (*URN, error) +} + +type machine struct { + data []byte + cs int + p, pe, eof, pb int + err error + tolower []int +} + +// NewMachine creates a new FSM able to parse RFC 2141 strings. +func NewMachine() Machine { + m := &machine{} + + %% access m.; + %% variable p m.p; + %% variable pe m.pe; + %% variable eof m.eof; + %% variable data m.data; + + return m +} + +// Err returns the error that occurred on the last call to Parse. +// +// If the result is nil, then the line was parsed successfully. +func (m *machine) Error() error { + return m.err +} + +func (m *machine) text() []byte { + return m.data[m.pb:m.p] +} + +// Parse parses the input byte array as a RFC 2141 string. +func (m *machine) Parse(input []byte) (*URN, error) { + m.data = input + m.p = 0 + m.pb = 0 + m.pe = len(input) + m.eof = len(input) + m.err = nil + m.tolower = []int{} + output := &URN{} + + %% write init; + %% write exec; + + if m.cs < first_final || m.cs == en_fail { + return nil, m.err + } + + return output, nil +} diff --git a/vendor/github.com/leodido/go-urn/makefile b/vendor/github.com/leodido/go-urn/makefile new file mode 100644 index 00000000000..47026d50999 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/makefile @@ -0,0 +1,39 @@ +SHELL := /bin/bash + +build: machine.go + +images: docs/urn.png + +machine.go: machine.go.rl + ragel -Z -G2 -e -o $@ $< + @sed -i '/^\/\/line/d' $@ + @$(MAKE) -s file=$@ snake2camel + @gofmt -w -s $@ + +docs/urn.dot: machine.go.rl + @mkdir -p docs + ragel -Z -e -Vp $< -o $@ + +docs/urn.png: docs/urn.dot + dot $< -Tpng -o $@ + +.PHONY: bench +bench: *_test.go machine.go + go test -bench=. -benchmem -benchtime=5s ./... + +.PHONY: tests +tests: *_test.go machine.go + go test -race -timeout 10s -coverprofile=coverage.out -covermode=atomic -v ./... + +.PHONY: clean +clean: + @rm -rf docs + @rm -f machine.go + +.PHONY: snake2camel +snake2camel: + @awk -i inplace '{ \ + while ( match($$0, /(.*)([a-z]+[0-9]*)_([a-zA-Z0-9])(.*)/, cap) ) \ + $$0 = cap[1] cap[2] toupper(cap[3]) cap[4]; \ + print \ + }' $(file) \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/urn.go b/vendor/github.com/leodido/go-urn/urn.go new file mode 100644 index 00000000000..d51a6c915be --- /dev/null +++ b/vendor/github.com/leodido/go-urn/urn.go @@ -0,0 +1,86 @@ +package urn + +import ( + "encoding/json" + "fmt" + "strings" +) + +const errInvalidURN = "invalid URN: %s" + +// URN represents an Uniform Resource Name. +// +// The general form represented is: +// +// urn:: +// +// Details at https://tools.ietf.org/html/rfc2141. +type URN struct { + prefix string // Static prefix. Equal to "urn" when empty. + ID string // Namespace identifier + SS string // Namespace specific string + norm string // Normalized namespace specific string +} + +// Normalize turns the receiving URN into its norm version. +// +// Which means: lowercase prefix, lowercase namespace identifier, and immutate namespace specific string chars (except tokens which are lowercased). +func (u *URN) Normalize() *URN { + return &URN{ + prefix: "urn", + ID: strings.ToLower(u.ID), + SS: u.norm, + } +} + +// Equal checks the lexical equivalence of the current URN with another one. +func (u *URN) Equal(x *URN) bool { + return *u.Normalize() == *x.Normalize() +} + +// String reassembles the URN into a valid URN string. +// +// This requires both ID and SS fields to be non-empty. +// Otherwise it returns an empty string. +// +// Default URN prefix is "urn". +func (u *URN) String() string { + var res string + if u.ID != "" && u.SS != "" { + if u.prefix == "" { + res += "urn" + } + res += u.prefix + ":" + u.ID + ":" + u.SS + } + + return res +} + +// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax. +func Parse(u []byte) (*URN, bool) { + urn, err := NewMachine().Parse(u) + if err != nil { + return nil, false + } + + return urn, true +} + +// MarshalJSON marshals the URN to JSON string form (e.g. `"urn:oid:1.2.3.4"`). +func (u URN) MarshalJSON() ([]byte, error) { + return json.Marshal(u.String()) +} + +// MarshalJSON unmarshals a URN from JSON string form (e.g. `"urn:oid:1.2.3.4"`). +func (u *URN) UnmarshalJSON(bytes []byte) error { + var str string + if err := json.Unmarshal(bytes, &str); err != nil { + return err + } + if value, ok := Parse([]byte(str)); !ok { + return fmt.Errorf(errInvalidURN, str) + } else { + *u = *value + } + return nil +} \ No newline at end of file diff --git a/vendor/github.com/masterzen/simplexml/LICENSE b/vendor/github.com/masterzen/simplexml/LICENSE new file mode 100644 index 00000000000..ef51da2b0e8 --- /dev/null +++ b/vendor/github.com/masterzen/simplexml/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/masterzen/simplexml/dom/document.go b/vendor/github.com/masterzen/simplexml/dom/document.go new file mode 100644 index 00000000000..e871c35a398 --- /dev/null +++ b/vendor/github.com/masterzen/simplexml/dom/document.go @@ -0,0 +1,35 @@ +package dom + +import ( + "bytes" + "fmt" +) + +type Document struct { + root *Element + PrettyPrint bool + Indentation string + DocType bool +} + +func CreateDocument() *Document { + return &Document{ PrettyPrint: false, Indentation: " ", DocType: true } +} + +func (doc *Document) SetRoot(node *Element) { + node.parent = nil + doc.root = node +} + +func (doc *Document) String() string { + var b bytes.Buffer + if doc.DocType { + fmt.Fprintln(&b, ``) + } + + if doc.root != nil { + doc.root.Bytes(&b, doc.PrettyPrint, doc.Indentation, 0) + } + + return string(b.Bytes()) +} \ No newline at end of file diff --git a/vendor/github.com/masterzen/simplexml/dom/element.go b/vendor/github.com/masterzen/simplexml/dom/element.go new file mode 100644 index 00000000000..1ea16cc97b2 --- /dev/null +++ b/vendor/github.com/masterzen/simplexml/dom/element.go @@ -0,0 +1,200 @@ +package dom + +import ( + "encoding/xml" + "fmt" + "bytes" +) + +type Attr struct { + Name xml.Name // Attribute namespace and name. + Value string // Attribute value. +} + +type Element struct { + name xml.Name + children []*Element + parent *Element + content string + attributes []*Attr + namespaces []*Namespace + document *Document +} + +func CreateElement(n string) *Element { + element := &Element { name: xml.Name { Local: n } } + element.children = make([]*Element, 0, 5) + element.attributes = make([]*Attr, 0, 10) + element.namespaces = make([]*Namespace, 0, 10) + return element +} + +func (node *Element) AddChild(child *Element) *Element { + if child.parent != nil { + child.parent.RemoveChild(child) + } + child.SetParent(node) + node.children = append(node.children, child) + return node +} + +func (node *Element) RemoveChild(child *Element) *Element { + p := -1 + for i, v := range node.children { + if v == child { + p = i + break + } + } + + if p == -1 { + return node + } + + copy(node.children[p:], node.children[p+1:]) + node.children = node.children[0 : len(node.children)-1] + child.parent = nil + return node +} + +func (node *Element) SetAttr(name string, value string) *Element { + // namespaces? + attr := &Attr{ Name: xml.Name { Local: name }, Value: value } + node.attributes = append(node.attributes, attr) + return node +} + +func (node *Element) SetParent(parent *Element) *Element { + node.parent = parent + return node +} + +func (node *Element) SetContent(content string) *Element { + node.content = content + return node +} + +// Add a namespace declaration to this node +func (node *Element) DeclareNamespace(ns Namespace) *Element { + // check if we already have it + prefix := node.namespacePrefix(ns.Uri) + if prefix == ns.Prefix { + return node + } + // add it + node.namespaces = append(node.namespaces, &ns) + return node +} + +func (node *Element) DeclaredNamespaces() []*Namespace { + return node.namespaces +} + +func (node *Element) SetNamespace(prefix string, uri string) { + resolved := node.namespacePrefix(uri) + if resolved == "" { + // we couldn't find the namespace, let's declare it at this node + node.namespaces = append(node.namespaces, &Namespace { Prefix: prefix, Uri: uri }) + } + node.name.Space = uri +} + +func (node *Element) Bytes(out *bytes.Buffer, indent bool, indentType string, level int) { + empty := len(node.children) == 0 && node.content == "" + content := node.content != "" +// children := len(node.children) > 0 +// ns := len(node.namespaces) > 0 +// attrs := len(node.attributes) > 0 + + indentStr := "" + nextLine := "" + if indent { + nextLine = "\n" + for i := 0; i < level; i++ { + indentStr += indentType + } + } + + if node.name.Local != "" { + if len(node.name.Space) > 0 { + // first find if ns has been declared, otherwise + prefix := node.namespacePrefix(node.name.Space) + fmt.Fprintf(out, "%s<%s:%s", indentStr, prefix, node.name.Local) + } else { + fmt.Fprintf(out, "%s<%s", indentStr, node.name.Local) + } + } + + // declared namespaces + for _, v := range node.namespaces { + prefix := node.namespacePrefix(v.Uri) + fmt.Fprintf(out, ` xmlns:%s="%s"`, prefix, v.Uri) + } + + // attributes + for _, v := range node.attributes { + if len(v.Name.Space) > 0 { + prefix := node.namespacePrefix(v.Name.Space) + fmt.Fprintf(out, ` %s:%s="%s"`, prefix, v.Name.Local, v.Value) + } else { + fmt.Fprintf(out, ` %s="%s"`, v.Name.Local, v.Value) + } + } + + // close tag + if empty { + fmt.Fprintf(out, "/>%s", nextLine) + } else { + if content { + out.WriteRune('>') + } else { + fmt.Fprintf(out, ">%s", nextLine) + } + } + + if len(node.children) > 0 { + for _, child := range node.children { + child.Bytes(out, indent, indentType, level + 1) + } + } else if node.content != "" { + //val := []byte(node.content) + //xml.EscapeText(out, val) + out.WriteString(node.content) + } + + if !empty && len(node.name.Local) > 0 { + var indentation string + if content { + indentation = "" + } else { + indentation = indentStr + } + if len(node.name.Space) > 0 { + prefix := node.namespacePrefix(node.name.Space) + fmt.Fprintf(out, "%s\n", indentation, prefix, node.name.Local) + } else { + fmt.Fprintf(out, "%s\n", indentation, node.name.Local) + } + } +} + +// Finds the prefix of the given namespace if it has been declared +// in this node or in one of its parent +func (node *Element) namespacePrefix(uri string) string { + for _, ns := range node.namespaces { + if ns.Uri == uri { + return ns.Prefix + } + } + if node.parent == nil { + return "" + } + return node.parent.namespacePrefix(uri) +} + + +func (node *Element) String() string { + var b bytes.Buffer + node.Bytes(&b, false, "", 0) + return string(b.Bytes()) +} \ No newline at end of file diff --git a/vendor/github.com/masterzen/simplexml/dom/namespace.go b/vendor/github.com/masterzen/simplexml/dom/namespace.go new file mode 100644 index 00000000000..3961e654ddd --- /dev/null +++ b/vendor/github.com/masterzen/simplexml/dom/namespace.go @@ -0,0 +1,10 @@ +package dom + +type Namespace struct { + Prefix string + Uri string +} + +func (ns *Namespace) SetTo(node *Element) { + node.SetNamespace(ns.Prefix, ns.Uri) +} diff --git a/vendor/github.com/masterzen/winrm/.gitignore b/vendor/github.com/masterzen/winrm/.gitignore new file mode 100644 index 00000000000..c2398c5f135 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.idea + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +*.coverprofile +*.cov diff --git a/vendor/github.com/masterzen/winrm/.travis.yml b/vendor/github.com/masterzen/winrm/.travis.yml new file mode 100644 index 00000000000..2edf6a3014c --- /dev/null +++ b/vendor/github.com/masterzen/winrm/.travis.yml @@ -0,0 +1,29 @@ +arch: +- amd64 +- ppc64le +sudo: false +language: go +go: +- 1.7 +- 1.8 +- 1.9 +- '1.10' +install: +- make deps +- go build -o winrm-cli +before_install: +- go get github.com/axw/gocov/gocov +- go get github.com/mattn/goveralls +- go get golang.org/x/tools/cmd/cover +- go get github.com/modocache/gover +- go get gopkg.in/check.v1 +script: +- make ci +after_script: +- $HOME/gopath/bin/goveralls -service="travis-ci" -coverprofile=profile.cov -repotoken $COVERALLS_TOKEN +matrix: + allow_failures: + - go: tip +env: + global: + secure: "GTrEtbp3sq14Jjz34pgIO0/Zv19YaDOOUJay4qnzGxs527HkW7YdsWENz0/yGHet+0jMOPatfP3uLaQHVFCNFZLfMYVmD5apMl7hPFrCaDZVvI9+ZwngIZ8gHzcf2Q+L6LxUT523ypjmRR+T1qYfbOy4UXlfGCyHFBbRB7AbMDk=" diff --git a/vendor/github.com/masterzen/winrm/LICENSE b/vendor/github.com/masterzen/winrm/LICENSE new file mode 100644 index 00000000000..ef51da2b0e8 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/masterzen/winrm/Makefile b/vendor/github.com/masterzen/winrm/Makefile new file mode 100644 index 00000000000..e28e539b575 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/Makefile @@ -0,0 +1,34 @@ +NO_COLOR=\033[0m +OK_COLOR=\033[32;01m +ERROR_COLOR=\033[31;01m +WARN_COLOR=\033[33;01m +DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | fgrep -v 'winrm') + +all: deps + @mkdir -p bin/ + @printf "$(OK_COLOR)==> Building$(NO_COLOR)\n" + @go build github.com/masterzen/winrm + +deps: + @printf "$(OK_COLOR)==> Installing dependencies$(NO_COLOR)\n" + @go get -d -v ./... + @echo $(DEPS) | xargs -n1 go get -d + +updatedeps: + go list ./... | xargs go list -f '{{join .Deps "\n"}}' | grep -v github.com/masterzen/winrm | sort -u | xargs go get -f -u -v + +clean: + @rm -rf bin/ pkg/ src/ + +format: + go fmt ./... + +ci: deps + @printf "$(OK_COLOR)==> Testing with Coveralls...$(NO_COLOR)\n" + "$(CURDIR)/scripts/test.sh" + +test: deps + @printf "$(OK_COLOR)==> Testing...$(NO_COLOR)\n" + go test ./... + +.PHONY: all clean deps format test updatedeps diff --git a/vendor/github.com/masterzen/winrm/README.md b/vendor/github.com/masterzen/winrm/README.md new file mode 100644 index 00000000000..bfcb7c1dba3 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/README.md @@ -0,0 +1,330 @@ +# WinRM for Go + +_Note_: if you're looking for the `winrm` command-line tool, this has been splitted from this project and is available at [winrm-cli](https://github.com/masterzen/winrm-cli) + +This is a Go library to execute remote commands on Windows machines through +the use of WinRM/WinRS. + +_Note_: this library doesn't support domain users (it doesn't support GSSAPI nor Kerberos). It's primary target is to execute remote commands on EC2 windows machines. + +[![Build Status](https://travis-ci.org/masterzen/winrm.svg?branch=master)](https://travis-ci.org/masterzen/winrm) +[![Coverage Status](https://coveralls.io/repos/masterzen/winrm/badge.png)](https://coveralls.io/r/masterzen/winrm) + +## Contact + +- Bugs: https://github.com/masterzen/winrm/issues + + +## Getting Started +WinRM is available on Windows Server 2008 and up. This project natively supports basic authentication for local accounts, see the steps in the next section on how to prepare the remote Windows machine for this scenario. The authentication model is pluggable, see below for an example on using Negotiate/NTLM authentication (e.g. for connecting to vanilla Azure VMs) or Kerberos authentication (using domain accounts). + +_Note_: This library only supports Golang 1.7+ + +### Preparing the remote Windows machine for Basic authentication +This project supports only basic authentication for local accounts (domain users are not supported). The remote windows system must be prepared for winrm: + +_For a PowerShell script to do what is described below in one go, check [Richard Downer's blog](http://www.frontiertown.co.uk/2011/12/overthere-control-windows-from-java/)_ + +On the remote host, a PowerShell prompt, using the __Run as Administrator__ option and paste in the following lines: + + winrm quickconfig + y + winrm set winrm/config/service/Auth '@{Basic="true"}' + winrm set winrm/config/service '@{AllowUnencrypted="true"}' + winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="1024"}' + +__N.B.:__ The Windows Firewall needs to be running to run this command. See [Microsoft Knowledge Base article #2004640](http://support.microsoft.com/kb/2004640). + +__N.B.:__ Do not disable Negotiate authentication as the `winrm` command itself uses this for internal authentication, and you risk getting a system where `winrm` doesn't work anymore. + +__N.B.:__ The `MaxMemoryPerShellMB` option has no effects on some Windows 2008R2 systems because of a WinRM bug. Make sure to install the hotfix described [Microsoft Knowledge Base article #2842230](http://support.microsoft.com/kb/2842230) if you need to run commands that uses more than 150MB of memory. + +For more information on WinRM, please refer to the online documentation at Microsoft's DevCenter. + +### Preparing the remote Windows machine for kerberos authentication +This project supports domain users via kerberos authentication. The remote windows system must be prepared for winrm: + +On the remote host, a PowerShell prompt, using the __Run as Administrator__ option and paste in the following lines: + + winrm quickconfig + y + winrm set winrm/config/service '@{AllowUnencrypted="true"}' + winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="1024"}' + +All __N.B__ points of "Preparing the remote Windows machine for Basic authentication" also applies. + + +### Building the winrm go and executable + +You can build winrm from source: + +```sh +git clone https://github.com/masterzen/winrm +cd winrm +make +``` + +_Note_: this winrm code doesn't depend anymore on [Gokogiri](https://github.com/moovweb/gokogiri) which means it is now in pure Go. + +_Note_: you need go 1.5+. Please check your installation with + +``` +go version +``` + +## Command-line usage + +For command-line usage check the [winrm-cli project](https://github.com/masterzen/winrm-cli) + +## Library Usage + +**Warning the API might be subject to change.** + +For the fast version (this doesn't allow to send input to the command) and it's using HTTP as the transport: + +```go +package main + +import ( + "github.com/masterzen/winrm" + "os" +) + +endpoint := winrm.NewEndpoint(host, 5986, false, false, nil, nil, nil, 0) +client, err := winrm.NewClient(endpoint, "Administrator", "secret") +if err != nil { + panic(err) +} +client.Run("ipconfig /all", os.Stdout, os.Stderr) +``` + +or +```go +package main +import ( + "github.com/masterzen/winrm" + "fmt" + "os" +) + +endpoint := winrm.NewEndpoint("localhost", 5985, false, false, nil, nil, nil, 0) +client, err := winrm.NewClient(endpoint,"Administrator", "secret") +if err != nil { + panic(err) +} + +_, err := client.RunWithInput("ipconfig", os.Stdout, os.Stderr, os.Stdin) +if err != nil { + panic(err) +} + +``` + +By passing a TransportDecorator in the Parameters struct it is possible to use different Transports (e.g. NTLM) + +```go +package main +import ( + "github.com/masterzen/winrm" + "fmt" + "os" +) + +endpoint := winrm.NewEndpoint("localhost", 5985, false, false, nil, nil, nil, 0) + +params := DefaultParameters +params.TransportDecorator = func() Transporter { return &ClientNTLM{} } + +client, err := NewClientWithParameters(endpoint, "test", "test", params) +if err != nil { + panic(err) +} + +_, err := client.RunWithInput("ipconfig", os.Stdout, os.Stderr, os.Stdin) +if err != nil { + panic(err) +} + +``` + +Passing a TransportDecorator also permit to use Kerberos authentication + +```go +package main +import ( + "os" + "fmt" + "github.com/masterzen/winrm" +) + +endpoint := winrm.NewEndpoint("srv-win", 5985, false, false, nil, nil, nil, 0) + +params := winrm.DefaultParameters +params.TransportDecorator = func() Transporter { + return &winrm.ClientKerberos{ + Username: "test", + Password: "s3cr3t", + Hostname: "srv-win", + Realm: "DOMAIN.LAN", + Port: 5985, + Proto: "http", + KrbConf: "/etc/krb5.conf", + SPN: fmt.Sprintf("HTTP/%s", hostname), + } +} + +client, err := NewClientWithParameters(endpoint, "test", "s3cr3t", params) +if err != nil { + panic(err) +} + +_, err := client.RunWithInput("ipconfig", os.Stdout, os.Stderr, os.Stdin) +if err != nil { + panic(err) +} + +``` + + +By passing a Dial in the Parameters struct it is possible to use different dialer (e.g. tunnel through SSH) + +```go +package main + + import ( + "github.com/masterzen/winrm" + "golang.org/x/crypto/ssh" + "os" + ) + + func main() { + + sshClient, err := ssh.Dial("tcp","localhost:22", &ssh.ClientConfig{ + User:"ubuntu", + Auth: []ssh.AuthMethod{ssh.Password("ubuntu")}, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + }) + + endpoint := winrm.NewEndpoint("other-host", 5985, false, false, nil, nil, nil, 0) + + params := winrm.DefaultParameters + params.Dial = sshClient.Dial + + client, err := winrm.NewClientWithParameters(endpoint, "test", "test", params) + if err != nil { + panic(err) + } + + _, err = client.RunWithInput("ipconfig", os.Stdout, os.Stderr, os.Stdin) + if err != nil { + panic(err) + } + } + +``` + + +For a more complex example, it is possible to call the various functions directly: + +```go +package main + +import ( + "github.com/masterzen/winrm" + "fmt" + "bytes" + "os" +) + +stdin := bytes.NewBufferString("ipconfig /all") +endpoint := winrm.NewEndpoint("localhost", 5985, false, false,nil, nil, nil, 0) +client , err := winrm.NewClient(endpoint, "Administrator", "secret") +if err != nil { + panic(err) +} +shell, err := client.CreateShell() +if err != nil { + panic(err) +} +var cmd *winrm.Command +cmd, err = shell.Execute("cmd.exe") +if err != nil { + panic(err) +} + +go io.Copy(cmd.Stdin, stdin) +go io.Copy(os.Stdout, cmd.Stdout) +go io.Copy(os.Stderr, cmd.Stderr) + +cmd.Wait() +shell.Close() +``` + +For using HTTPS authentication with x 509 cert without checking the CA +```go +package main + +import ( + "github.com/masterzen/winrm" + "io/ioutil" + "log" + "os" +) + +func main() { + clientCert, err := ioutil.ReadFile("/home/example/winrm_client_cert.pem") + if err != nil { + log.Fatalf("failed to read client certificate: %q", err) + } + + clientKey, err := ioutil.ReadFile("/home/example/winrm_client_key.pem") + if err != nil { + log.Fatalf("failed to read client key: %q", err) + } + + winrm.DefaultParameters.TransportDecorator = func() winrm.Transporter { + // winrm https module + return &winrm.ClientAuthRequest{} + } + + endpoint := winrm.NewEndpoint( + "192.168.100.2", // host to connect to + 5986, // winrm port + true, // use TLS + true, // Allow insecure connection + nil, // CA certificate + clientCert, // Client Certificate + clientKey, // Client Key + 0, // Timeout + ) + client, err := winrm.NewClient(endpoint, "Administrator", "") + if err != nil { + log.Fatalf("failed to create client: %q", err) + } + _, err = client.Run("whoami", os.Stdout, os.Stderr) + if err != nil { + log.Fatalf("failed to run command: %q", err) + } +} +``` + +## Developing on WinRM + +If you wish to work on `winrm` itself, you'll first need [Go](http://golang.org) +installed (version 1.5+ is _required_). Make sure you have Go properly installed, +including setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH). + +For some additional dependencies, Go needs [Mercurial](http://mercurial.selenic.com/) +and [Bazaar](http://bazaar.canonical.com/en/) to be installed. +Winrm itself doesn't require these, but a dependency of a dependency does. + +Next, clone this repository into `$GOPATH/src/github.com/masterzen/winrm` and +then just type `make`. + +You can run tests by typing `make test`. + +If you make any changes to the code, run `make format` in order to automatically +format the code according to Go standards. + +When new dependencies are added to winrm you can use `make updatedeps` to +get the latest and subsequently use `make` to compile. diff --git a/vendor/github.com/masterzen/winrm/auth.go b/vendor/github.com/masterzen/winrm/auth.go new file mode 100644 index 00000000000..4130b0e0960 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/auth.go @@ -0,0 +1,123 @@ +package winrm + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "net" + "net/http" + "strings" + "time" + + "github.com/masterzen/winrm/soap" +) + +//ClientAuthRequest ClientAuthRequest +type ClientAuthRequest struct { + transport http.RoundTripper + dial func(network, addr string) (net.Conn, error) +} + +//Transport Transport +func (c *ClientAuthRequest) Transport(endpoint *Endpoint) error { + cert, err := tls.X509KeyPair(endpoint.Cert, endpoint.Key) + if err != nil { + return err + } + + dial := (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial + + if c.dial != nil { + dial = c.dial + } + + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{ + Renegotiation: tls.RenegotiateOnceAsClient, + InsecureSkipVerify: endpoint.Insecure, + Certificates: []tls.Certificate{cert}, + }, + Dial: dial, + ResponseHeaderTimeout: endpoint.Timeout, + } + + if endpoint.CACert != nil && len(endpoint.CACert) > 0 { + certPool, err := readCACerts(endpoint.CACert) + if err != nil { + return err + } + + transport.TLSClientConfig.RootCAs = certPool + } + + c.transport = transport + + return nil +} + +// parse func reads the response body and return it as a string +func parse(response *http.Response) (string, error) { + + // if we recived the content we expected + if strings.Contains(response.Header.Get("Content-Type"), "application/soap+xml") { + body, err := ioutil.ReadAll(response.Body) + defer func() { + // defer can modify the returned value before + // it is actually passed to the calling statement + if errClose := response.Body.Close(); errClose != nil && err == nil { + err = errClose + } + }() + if err != nil { + return "", fmt.Errorf("error while reading request body %w", err) + } + + return string(body), nil + } + + return "", fmt.Errorf("invalid content type") +} + +//Post Post +func (c ClientAuthRequest) Post(client *Client, request *soap.SoapMessage) (string, error) { + httpClient := &http.Client{Transport: c.transport} + + req, err := http.NewRequest("POST", client.url, strings.NewReader(request.String())) + if err != nil { + return "", fmt.Errorf("impossible to create http request %w", err) + } + + req.Header.Set("Content-Type", soapXML+";charset=UTF-8") + req.Header.Set("Authorization", "http://schemas.dmtf.org/wbem/wsman/1/wsman/secprofile/https/mutual") + + resp, err := httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("unknown error %w", err) + } + + body, err := parse(resp) + if err != nil { + return "", fmt.Errorf("http response error: %d - %w", resp.StatusCode, err) + } + + // if we have different 200 http status code + // we must replace the error + defer func() { + if resp.StatusCode != 200 { + body, err = "", fmt.Errorf("http error %d: %s", resp.StatusCode, body) + } + }() + + return body, err +} + +//NewClientAuthRequestWithDial NewClientAuthRequestWithDial +func NewClientAuthRequestWithDial(dial func(network, addr string) (net.Conn, error)) *ClientAuthRequest { + return &ClientAuthRequest{ + dial: dial, + } +} diff --git a/vendor/github.com/masterzen/winrm/client.go b/vendor/github.com/masterzen/winrm/client.go new file mode 100644 index 00000000000..464f8b0a1dd --- /dev/null +++ b/vendor/github.com/masterzen/winrm/client.go @@ -0,0 +1,236 @@ +package winrm + +import ( + "bytes" + "crypto/x509" + "errors" + "fmt" + "io" + "sync" + + "github.com/masterzen/winrm/soap" +) + +// Client struct +type Client struct { + Parameters + username string + password string + useHTTPS bool + url string + http Transporter +} + +// Transporter does different transporters +// and init a Post request based on them +type Transporter interface { + // init request baset on the transport configurations + Post(*Client, *soap.SoapMessage) (string, error) + Transport(*Endpoint) error +} + +// NewClient will create a new remote client on url, connecting with user and password +// This function doesn't connect (connection happens only when CreateShell is called) +func NewClient(endpoint *Endpoint, user, password string) (*Client, error) { + return NewClientWithParameters(endpoint, user, password, DefaultParameters) +} + +// NewClientWithParameters will create a new remote client on url, connecting with user and password +// This function doesn't connect (connection happens only when CreateShell is called) +func NewClientWithParameters(endpoint *Endpoint, user, password string, params *Parameters) (*Client, error) { + + // alloc a new client + client := &Client{ + Parameters: *params, + username: user, + password: password, + url: endpoint.url(), + useHTTPS: endpoint.HTTPS, + // default transport + http: &clientRequest{dial: params.Dial}, + } + + // switch to other transport if provided + if params.TransportDecorator != nil { + client.http = params.TransportDecorator() + } + + // set the transport to some endpoint configuration + if err := client.http.Transport(endpoint); err != nil { + return nil, fmt.Errorf("Can't parse this key and certs: %w", err) + } + + return client, nil +} + +func readCACerts(certs []byte) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + if !certPool.AppendCertsFromPEM(certs) { + return nil, fmt.Errorf("Unable to read certificates") + } + + return certPool, nil +} + +// CreateShell will create a WinRM Shell, +// which is the prealable for running commands. +func (c *Client) CreateShell() (*Shell, error) { + request := NewOpenShellRequest(c.url, &c.Parameters) + defer request.Free() + + response, err := c.sendRequest(request) + if err != nil { + return nil, err + } + + shellID, err := ParseOpenShellResponse(response) + if err != nil { + return nil, err + } + + return c.NewShell(shellID), nil + +} + +// NewShell will create a new WinRM Shell for the given shellID +func (c *Client) NewShell(id string) *Shell { + return &Shell{client: c, id: id} +} + +// sendRequest exec the custom http func from the client +func (c *Client) sendRequest(request *soap.SoapMessage) (string, error) { + return c.http.Post(c, request) +} + +// Run will run command on the the remote host, writing the process stdout and stderr to +// the given writers. Note with this method it isn't possible to inject stdin. +func (c *Client) Run(command string, stdout io.Writer, stderr io.Writer) (int, error) { + shell, err := c.CreateShell() + if err != nil { + return 1, err + } + defer shell.Close() + cmd, err := shell.Execute(command) + if err != nil { + return 1, err + } + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + io.Copy(stdout, cmd.Stdout) + }() + + go func() { + defer wg.Done() + io.Copy(stderr, cmd.Stderr) + }() + + cmd.Wait() + wg.Wait() + cmd.Close() + + return cmd.ExitCode(), cmd.err +} + +// RunWithString will run command on the the remote host, returning the process stdout and stderr +// as strings, and using the input stdin string as the process input +func (c *Client) RunWithString(command string, stdin string) (string, string, int, error) { + shell, err := c.CreateShell() + if err != nil { + return "", "", 1, err + } + defer shell.Close() + + cmd, err := shell.Execute(command) + if err != nil { + return "", "", 1, err + } + + if len(stdin) > 0 { + defer cmd.Stdin.Close() + _, err := cmd.Stdin.Write([]byte(stdin)) + if err != nil { + return "", "", -1, err + } + } + + var outWriter, errWriter bytes.Buffer + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + io.Copy(&outWriter, cmd.Stdout) + }() + + go func() { + defer wg.Done() + io.Copy(&errWriter, cmd.Stderr) + }() + + cmd.Wait() + wg.Wait() + cmd.Close() + + return outWriter.String(), errWriter.String(), cmd.ExitCode(), cmd.err +} + +//RunPSWithString will basically wrap your code to execute commands in powershell.exe. Default RunWithString +// runs commands in cmd.exe +func (c *Client) RunPSWithString(command string, stdin string) (string, string, int, error) { + command = Powershell(command) + + // Let's check if we actually created a command + if command == "" { + return "", "", 1, errors.New("cannot encode the given command") + } + + // Specify powershell.exe to run encoded command + return c.RunWithString(command, stdin) +} + +// RunWithInput will run command on the the remote host, writing the process stdout and stderr to +// the given writers, and injecting the process stdin with the stdin reader. +// Warning stdin (not stdout/stderr) are bufferized, which means reading only one byte in stdin will +// send a winrm http packet to the remote host. If stdin is a pipe, it might be better for +// performance reasons to buffer it. +func (c Client) RunWithInput(command string, stdout, stderr io.Writer, stdin io.Reader) (int, error) { + shell, err := c.CreateShell() + if err != nil { + return 1, err + } + defer shell.Close() + cmd, err := shell.Execute(command) + if err != nil { + return 1, err + } + + var wg sync.WaitGroup + wg.Add(3) + + go func() { + defer func() { + cmd.Stdin.Close() + wg.Done() + }() + io.Copy(cmd.Stdin, stdin) + }() + go func() { + defer wg.Done() + io.Copy(stdout, cmd.Stdout) + }() + go func() { + defer wg.Done() + io.Copy(stderr, cmd.Stderr) + }() + + cmd.Wait() + wg.Wait() + cmd.Close() + + return cmd.ExitCode(), cmd.err + +} diff --git a/vendor/github.com/masterzen/winrm/command.go b/vendor/github.com/masterzen/winrm/command.go new file mode 100644 index 00000000000..1f57d34fb55 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/command.go @@ -0,0 +1,261 @@ +package winrm + +import ( + "bytes" + "errors" + "io" + "strings" + "sync" +) + +type commandWriter struct { + *Command + mutex sync.Mutex + eof bool +} + +type commandReader struct { + *Command + write *io.PipeWriter + read *io.PipeReader + stream string +} + +// Command represents a given command running on a Shell. This structure allows to get access +// to the various stdout, stderr and stdin pipes. +type Command struct { + client *Client + shell *Shell + id string + exitCode int + finished bool + err error + + Stdin *commandWriter + Stdout *commandReader + Stderr *commandReader + + done chan struct{} + cancel chan struct{} +} + +func newCommand(shell *Shell, ids string) *Command { + command := &Command{ + shell: shell, + client: shell.client, + id: ids, + exitCode: 0, + err: nil, + done: make(chan struct{}), + cancel: make(chan struct{}), + } + + command.Stdout = newCommandReader("stdout", command) + command.Stdin = &commandWriter{ + Command: command, + eof: false, + } + command.Stderr = newCommandReader("stderr", command) + + go fetchOutput(command) + + return command +} + +func newCommandReader(stream string, command *Command) *commandReader { + read, write := io.Pipe() + return &commandReader{ + Command: command, + stream: stream, + write: write, + read: read, + } +} + +func fetchOutput(command *Command) { + for { + select { + case <-command.cancel: + close(command.done) + return + default: + finished, err := command.slurpAllOutput() + if finished { + command.err = err + close(command.done) + return + } + } + } +} + +func (c *Command) check() error { + if c.id == "" { + return errors.New("Command has already been closed") + } + if c.shell == nil { + return errors.New("Command has no associated shell") + } + if c.client == nil { + return errors.New("Command has no associated client") + } + return nil +} + +// Close will terminate the running command +func (c *Command) Close() error { + if err := c.check(); err != nil { + return err + } + + select { // close cancel channel if it's still open + case <-c.cancel: + default: + close(c.cancel) + } + + request := NewSignalRequest(c.client.url, c.shell.id, c.id, &c.client.Parameters) + defer request.Free() + + _, err := c.client.sendRequest(request) + return err +} + +func (c *Command) slurpAllOutput() (bool, error) { + if err := c.check(); err != nil { + c.Stderr.write.CloseWithError(err) + c.Stdout.write.CloseWithError(err) + return true, err + } + + request := NewGetOutputRequest(c.client.url, c.shell.id, c.id, "stdout stderr", &c.client.Parameters) + defer request.Free() + + response, err := c.client.sendRequest(request) + if err != nil { + if strings.Contains(err.Error(), "OperationTimeout") { + // Operation timeout because there was no command output + return false, err + } + if strings.Contains(err.Error(), "EOF") { + c.exitCode = 16001 + } + + c.Stderr.write.CloseWithError(err) + c.Stdout.write.CloseWithError(err) + return true, err + } + + var exitCode int + var stdout, stderr bytes.Buffer + finished, exitCode, err := ParseSlurpOutputErrResponse(response, &stdout, &stderr) + if err != nil { + c.Stderr.write.CloseWithError(err) + c.Stdout.write.CloseWithError(err) + return true, err + } + if stdout.Len() > 0 { + c.Stdout.write.Write(stdout.Bytes()) + } + if stderr.Len() > 0 { + c.Stderr.write.Write(stderr.Bytes()) + } + if finished { + c.exitCode = exitCode + c.Stderr.write.Close() + c.Stdout.write.Close() + } + + return finished, nil +} + +func (c *Command) sendInput(data []byte, eof bool) error { + if err := c.check(); err != nil { + return err + } + + request := NewSendInputRequest(c.client.url, c.shell.id, c.id, data, eof, &c.client.Parameters) + defer request.Free() + + _, err := c.client.sendRequest(request) + return err +} + +// ExitCode returns command exit code when it is finished. Before that the result is always 0. +func (c *Command) ExitCode() int { + return c.exitCode +} + +// Wait function will block the current goroutine until the remote command terminates. +func (c *Command) Wait() { + // block until finished + <-c.done +} + +// Write data to this Pipe +// commandWriter implements io.Writer and io.Closer interface +func (w *commandWriter) Write(data []byte) (int, error) { + w.mutex.Lock() + defer w.mutex.Unlock() + + if w.eof { + return 0, io.ErrClosedPipe + } + + var ( + written int + err error + ) + origLen := len(data) + for len(data) > 0 { + // never send more data than our EnvelopeSize. + n := min(w.client.Parameters.EnvelopeSize-1000, len(data)) + if err := w.sendInput(data[:n], false); err != nil { + break + } + data = data[n:] + written += n + } + + // signal that we couldn't write all data + if err == nil && written < origLen { + err = io.ErrShortWrite + } + + return written, err +} + +// Write data to this Pipe and mark EOF +func (w *commandWriter) WriteClose(data []byte) (int, error) { + w.eof = true + return w.Write(data) +} + +func min(a int, b int) int { + if a < b { + return a + } + return b +} + +// Close method wrapper +// commandWriter implements io.Closer interface +func (w *commandWriter) Close() error { + w.mutex.Lock() + defer w.mutex.Unlock() + + if w.eof { + return io.ErrClosedPipe + } + w.eof = true + return w.sendInput(nil, w.eof) +} + +// Read data from this Pipe +func (r *commandReader) Read(buf []byte) (int, error) { + n, err := r.read.Read(buf) + if err != nil && err != io.EOF { + return 0, err + } + return n, err +} diff --git a/vendor/github.com/masterzen/winrm/endpoint.go b/vendor/github.com/masterzen/winrm/endpoint.go new file mode 100644 index 00000000000..13e2ddae2b2 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/endpoint.go @@ -0,0 +1,63 @@ +package winrm + +import ( + "fmt" + "time" +) + +// Endpoint struct holds configurations +// for the server endpoint +type Endpoint struct { + // host name or ip address + Host string + // port to determine if it's http or https default + // winrm ports (http:5985, https:5986).Versions + // of winrm can be customized to listen on other ports + Port int + // set the flag true for https connections + HTTPS bool + // set the flag true for skipping ssl verifications + Insecure bool + // if set, used to verify the hostname on the returned certificate + TLSServerName string + // pointer pem certs, and key + CACert []byte // cert auth to intdetify the server cert + Key []byte // public key for client auth connections + Cert []byte // cert for client auth connections + // duration timeout for the underling tcp conn(http/https base protocol) + // if the time exceeds the connection is cloded/timeouts + Timeout time.Duration +} + +func (ep *Endpoint) url() string { + var scheme string + if ep.HTTPS { + scheme = "https" + } else { + scheme = "http" + } + + return fmt.Sprintf("%s://%s:%d/wsman", scheme, ep.Host, ep.Port) +} + +// NewEndpoint returns new pointer to struct Endpoint, with a default 60s response header timeout +func NewEndpoint(host string, port int, https bool, insecure bool, Cacert, cert, key []byte, timeout time.Duration) *Endpoint { + endpoint := &Endpoint{ + Host: host, + Port: port, + HTTPS: https, + Insecure: insecure, + CACert: Cacert, + Key: key, + Cert: cert, + } + // if the timeout was set + if timeout != 0 { + endpoint.Timeout = timeout + } else { + // assign default 60sec timeout + endpoint.Timeout = 60 * time.Second + } + + return endpoint +} diff --git a/vendor/github.com/masterzen/winrm/error.go b/vendor/github.com/masterzen/winrm/error.go new file mode 100644 index 00000000000..aaa50dcfed6 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/error.go @@ -0,0 +1,13 @@ +package winrm + +import "fmt" + +// errWinrm generic error struct +type errWinrm struct { + message string +} + +// ErrWinrm implements the Error type interface +func (e errWinrm) Error() string { + return fmt.Sprintf("%s", e.message) +} diff --git a/vendor/github.com/masterzen/winrm/go.mod b/vendor/github.com/masterzen/winrm/go.mod new file mode 100644 index 00000000000..002a3383e9b --- /dev/null +++ b/vendor/github.com/masterzen/winrm/go.mod @@ -0,0 +1,23 @@ +module github.com/masterzen/winrm + +go 1.14 + +require ( + github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e + github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 + github.com/gofrs/uuid v4.2.0+incompatible + github.com/google/go-cmp v0.5.6 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/goidentity/v6 v6.0.1 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 + golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect + golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect + golang.org/x/text v0.3.7 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 +) diff --git a/vendor/github.com/masterzen/winrm/go.sum b/vendor/github.com/masterzen/winrm/go.sum new file mode 100644 index 00000000000..897cf0b48c4 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/go.sum @@ -0,0 +1,88 @@ +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e h1:ZU22z/2YRFLyf/P4ZwUYSdNCWsMEI0VeyrFoI2rAhJQ= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 h1:w0E0fgc1YafGEh5cROhlROMWXiNoZqApk2PDN0M1+Ns= +github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/inconshreveable/log15 v0.0.0-20201112154412-8562bdadbbac/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg= +github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/myesui/uuid v1.0.0/go.mod h1:2CDfNgU0LR8mIdO8vdWd8i9gWWxLlcoIGGpSNgafq84= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/revel/config v1.0.0/go.mod h1:GT4a9px5kDGRqLizcw/md0QFErrhen76toz4qS3oIoI= +github.com/revel/log15 v2.11.20+incompatible/go.mod h1:l0WmLRs+IM1hBl4noJiBc2tZQiOgZyXzS1mdmFt+5Gc= +github.com/revel/pathtree v0.0.0-20140121041023-41257a1839e9/go.mod h1:TmlwoRLDvgRjoTe6rbsxIaka/CulzYrgfef7iNJcEWY= +github.com/revel/revel v1.0.0/go.mod h1:VZWJnHjpDEtuGUuZJ2NO42XryitrtwsdVaJxfDeo5yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/twinj/uuid v1.0.0/go.mod h1:mMgcE1RHFUFqe5AfiwlINXisXfDGro23fWdPUfOMjRY= +github.com/xeonx/timeago v1.0.0-rc4/go.mod h1:qDLrYEFynLO7y5Ho7w3GwgtYgpy5UfhcXIIQvMKVDkA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/stack.v0 v0.0.0-20141108040640-9b43fcefddd0/go.mod h1:kl/bNzW/jgTgUOCGDj3XPn9/Hbfhw6pjfBRUnaTioFQ= +gopkg.in/stretchr/testify.v1 v1.2.2/go.mod h1:QI5V/q6UbPmuhtm10CaFZxED9NreB8PnFYN9JcR6TxU= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/masterzen/winrm/http.go b/vendor/github.com/masterzen/winrm/http.go new file mode 100644 index 00000000000..d9e47b7c6c7 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/http.go @@ -0,0 +1,130 @@ +package winrm + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/masterzen/winrm/soap" +) + +var soapXML = "application/soap+xml" + +// body func reads the response body and return it as a string +func body(response *http.Response) (string, error) { + + // if we recived the content we expected + if strings.Contains(response.Header.Get("Content-Type"), "application/soap+xml") { + body, err := ioutil.ReadAll(response.Body) + defer func() { + // defer can modify the returned value before + // it is actually passed to the calling statement + if errClose := response.Body.Close(); errClose != nil && err == nil { + err = errClose + } + }() + if err != nil { + return "", fmt.Errorf("error while reading request body %w", err) + } + + return string(body), nil + } + + return "", fmt.Errorf("invalid content type") +} + +type clientRequest struct { + transport http.RoundTripper + dial func(network, addr string) (net.Conn, error) + proxyfunc func(req *http.Request) (*url.URL, error) +} + +func (c *clientRequest) Transport(endpoint *Endpoint) error { + + dial := (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial + + if c.dial != nil { + dial = c.dial + } + + proxyfunc := http.ProxyFromEnvironment + if c.proxyfunc != nil { + proxyfunc = c.proxyfunc + } + + transport := &http.Transport{ + Proxy: proxyfunc, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: endpoint.Insecure, + ServerName: endpoint.TLSServerName, + }, + Dial: dial, + ResponseHeaderTimeout: endpoint.Timeout, + } + + if endpoint.CACert != nil && len(endpoint.CACert) > 0 { + certPool, err := readCACerts(endpoint.CACert) + if err != nil { + return err + } + + transport.TLSClientConfig.RootCAs = certPool + } + + c.transport = transport + + return nil +} + +// Post make post to the winrm soap service +func (c clientRequest) Post(client *Client, request *soap.SoapMessage) (string, error) { + httpClient := &http.Client{Transport: c.transport} + + req, err := http.NewRequest("POST", client.url, strings.NewReader(request.String())) + if err != nil { + return "", fmt.Errorf("impossible to create http request %w", err) + } + req.Header.Set("Content-Type", soapXML+";charset=UTF-8") + req.SetBasicAuth(client.username, client.password) + resp, err := httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("unknown error %w", err) + } + + body, err := body(resp) + if err != nil { + return "", fmt.Errorf("http response error: %d - %w", resp.StatusCode, err) + } + + // if we have different 200 http status code + // we must replace the error + defer func() { + if resp.StatusCode != 200 { + body, err = "", fmt.Errorf("http error %d: %s", resp.StatusCode, body) + } + }() + + return body, err +} + +//NewClientWithDial NewClientWithDial +func NewClientWithDial(dial func(network, addr string) (net.Conn, error)) *clientRequest { + return &clientRequest{ + dial: dial, + } +} + +//NewClientWithProxyFunc NewClientWithProxyFunc +func NewClientWithProxyFunc(proxyfunc func(req *http.Request) (*url.URL, error)) *clientRequest { + return &clientRequest{ + proxyfunc: proxyfunc, + } +} diff --git a/vendor/github.com/masterzen/winrm/kerberos.go b/vendor/github.com/masterzen/winrm/kerberos.go new file mode 100644 index 00000000000..2d65e76bd47 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/kerberos.go @@ -0,0 +1,129 @@ +package winrm + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/masterzen/winrm/soap" + + "github.com/jcmturner/gokrb5/v8/client" + "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/spnego" +) + +// Settings holds all the information necessary to configure the provider +type Settings struct { + WinRMUsername string + WinRMPassword string + WinRMHost string + WinRMPort int + WinRMProto string + WinRMInsecure bool + KrbRealm string + KrbConfig string + KrbSpn string + KrbCCache string + WinRMUseNTLM bool + WinRMPassCredentials bool +} + +type ClientKerberos struct { + clientRequest + Username string + Password string + Realm string + Hostname string + Port int + Proto string + SPN string + KrbConf string + KrbCCache string +} + +func NewClientKerberos(settings *Settings) *ClientKerberos { + return &ClientKerberos{ + Username: settings.WinRMUsername, + Password: settings.WinRMPassword, + Realm: settings.KrbRealm, + Hostname: settings.WinRMHost, + Port: settings.WinRMPort, + Proto: settings.WinRMProto, + KrbConf: settings.KrbConfig, + KrbCCache: settings.KrbCCache, + SPN: settings.KrbSpn, + } +} + +func (c *ClientKerberos) Transport(endpoint *Endpoint) error { + c.clientRequest.Transport(endpoint) + + return nil +} + +func (c *ClientKerberos) Post(clt *Client, request *soap.SoapMessage) (string, error) { + cfg, err := config.Load(c.KrbConf) + if err != nil { + return "", err + } + + // setup the kerberos client + var kerberosClient *client.Client + if len(c.KrbCCache) > 0 { + b, err := ioutil.ReadFile(c.KrbCCache) + if err != nil { + return "", fmt.Errorf("Unable to read ccache file %s: %s\n", c.KrbCCache, err.Error()) + } + + cc := new(credentials.CCache) + err = cc.Unmarshal(b) + if err != nil { + return "", fmt.Errorf("Unable to parse ccache file %s: %s", c.KrbCCache, err.Error()) + } + kerberosClient, err = client.NewFromCCache(cc, cfg, client.DisablePAFXFAST(true)) + if err != nil { + return "", fmt.Errorf("Unable to create kerberos client from ccache: %s\n", err.Error()) + } + } else { + kerberosClient = client.NewWithPassword(c.Username, c.Realm, c.Password, cfg, + client.DisablePAFXFAST(true), client.AssumePreAuthentication(true)) + } + + //create an http request + winrmURL := fmt.Sprintf("%s://%s:%d/wsman", c.Proto, c.Hostname, c.Port) + winRMRequest, _ := http.NewRequest("POST", winrmURL, strings.NewReader(request.String())) + winRMRequest.Header.Add("Content-Type", "application/soap+xml;charset=UTF-8") + + err = spnego.SetSPNEGOHeader(kerberosClient, winRMRequest, c.SPN) + if err != nil { + return "", fmt.Errorf("Unable to set SPNego Header: %s\n", err.Error()) + } + + httpClient := &http.Client{Transport: c.transport} + + resp, err := httpClient.Do(winRMRequest) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + var bodyMsg string + respBody, err := io.ReadAll(resp.Body) + if err != nil { + bodyMsg = fmt.Sprintf("Error retrieving the response's body: %s", err) + } else { + bodyMsg = fmt.Sprintf("Response body:\n%s", string(respBody)) + } + return "", fmt.Errorf("Request returned: %d - %s. %s ", resp.StatusCode, resp.Status, bodyMsg) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(body), err +} diff --git a/vendor/github.com/masterzen/winrm/ntlm.go b/vendor/github.com/masterzen/winrm/ntlm.go new file mode 100644 index 00000000000..5cabe3b28cf --- /dev/null +++ b/vendor/github.com/masterzen/winrm/ntlm.go @@ -0,0 +1,45 @@ +package winrm + +import ( + "net" + "net/http" + "net/url" + + "github.com/Azure/go-ntlmssp" + "github.com/masterzen/winrm/soap" +) + +// ClientNTLM provides a transport via NTLMv2 +type ClientNTLM struct { + clientRequest +} + +// Transport creates the wrapped NTLM transport +func (c *ClientNTLM) Transport(endpoint *Endpoint) error { + c.clientRequest.Transport(endpoint) + c.clientRequest.transport = &ntlmssp.Negotiator{RoundTripper: c.clientRequest.transport} + return nil +} + +// Post make post to the winrm soap service (forwarded to clientRequest implementation) +func (c ClientNTLM) Post(client *Client, request *soap.SoapMessage) (string, error) { + return c.clientRequest.Post(client, request) +} + +//NewClientNTLMWithDial NewClientNTLMWithDial +func NewClientNTLMWithDial(dial func(network, addr string) (net.Conn, error)) *ClientNTLM { + return &ClientNTLM{ + clientRequest{ + dial: dial, + }, + } +} + +//NewClientNTLMWithProxyFunc NewClientNTLMWithProxyFunc +func NewClientNTLMWithProxyFunc(proxyfunc func(req *http.Request) (*url.URL, error)) *ClientNTLM { + return &ClientNTLM{ + clientRequest{ + proxyfunc: proxyfunc, + }, + } +} diff --git a/vendor/github.com/masterzen/winrm/parameters.go b/vendor/github.com/masterzen/winrm/parameters.go new file mode 100644 index 00000000000..08adcc99f04 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/parameters.go @@ -0,0 +1,27 @@ +package winrm + +import "net" + +// Parameters struct defines +// metadata information and http transport config +type Parameters struct { + Timeout string + Locale string + EnvelopeSize int + TransportDecorator func() Transporter + Dial func(network, addr string) (net.Conn, error) +} + +// DefaultParameters return constant config +// of type Parameters +var DefaultParameters = NewParameters("PT60S", "en-US", 153600) + +// NewParameters return new struct of type Parameters +// this struct makes the configuration for the request, size message, etc. +func NewParameters(timeout, locale string, envelopeSize int) *Parameters { + return &Parameters{ + Timeout: timeout, + Locale: locale, + EnvelopeSize: envelopeSize, + } +} diff --git a/vendor/github.com/masterzen/winrm/powershell.go b/vendor/github.com/masterzen/winrm/powershell.go new file mode 100644 index 00000000000..fd1963957d6 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/powershell.go @@ -0,0 +1,27 @@ +package winrm + +import ( + "encoding/base64" + + "golang.org/x/text/encoding/unicode" +) + +// Powershell wraps a PowerShell script +// and prepares it for execution by the winrm client +func Powershell(psCmd string) string { + // Disable unnecessary progress bars which considered as stderr. + psCmd = "$ProgressPreference = 'SilentlyContinue';" + psCmd + + // Encode string to UTF16-LE + encoder := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewEncoder() + encoded, err := encoder.String(psCmd) + if err != nil { + return "" + } + + // Finally make it base64 encoded which is required for powershell. + psCmd = base64.StdEncoding.EncodeToString([]byte(encoded)) + + // Specify powershell.exe to run encoded command + return "powershell.exe -EncodedCommand " + psCmd +} diff --git a/vendor/github.com/masterzen/winrm/request.go b/vendor/github.com/masterzen/winrm/request.go new file mode 100644 index 00000000000..e66bfd769da --- /dev/null +++ b/vendor/github.com/masterzen/winrm/request.go @@ -0,0 +1,161 @@ +package winrm + +import ( + "encoding/base64" + + "github.com/gofrs/uuid" + "github.com/masterzen/winrm/soap" +) + +func genUUID() string { + id := uuid.Must(uuid.NewV4()) + return "uuid:" + id.String() +} + +func defaultHeaders(message *soap.SoapMessage, url string, params *Parameters) *soap.SoapHeader { + return message. + Header(). + To(url). + ReplyTo("http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous"). + MaxEnvelopeSize(params.EnvelopeSize). + Id(genUUID()). + Locale(params.Locale). + Timeout(params.Timeout) +} + +//NewOpenShellRequest makes a new soap request +func NewOpenShellRequest(uri string, params *Parameters) *soap.SoapMessage { + if params == nil { + params = DefaultParameters + } + + message := soap.NewMessage() + defaultHeaders(message, uri, params). + Action("http://schemas.xmlsoap.org/ws/2004/09/transfer/Create"). + ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd"). + AddOption(soap.NewHeaderOption("WINRS_NOPROFILE", "FALSE")). + AddOption(soap.NewHeaderOption("WINRS_CODEPAGE", "65001")). + Build() + + body := message.CreateBodyElement("Shell", soap.DOM_NS_WIN_SHELL) + input := message.CreateElement(body, "InputStreams", soap.DOM_NS_WIN_SHELL) + input.SetContent("stdin") + output := message.CreateElement(body, "OutputStreams", soap.DOM_NS_WIN_SHELL) + output.SetContent("stdout stderr") + + return message +} + +// NewDeleteShellRequest ... +func NewDeleteShellRequest(uri, shellID string, params *Parameters) *soap.SoapMessage { + if params == nil { + params = DefaultParameters + } + message := soap.NewMessage() + defaultHeaders(message, uri, params). + Action("http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete"). + ShellId(shellID). + ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd"). + Build() + + message.NewBody() + + return message +} + +// NewExecuteCommandRequest exec command on specific shellID +func NewExecuteCommandRequest(uri, shellID, command string, arguments []string, params *Parameters) *soap.SoapMessage { + if params == nil { + params = DefaultParameters + } + message := soap.NewMessage() + defaultHeaders(message, uri, params). + Action("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command"). + ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd"). + ShellId(shellID). + AddOption(soap.NewHeaderOption("WINRS_CONSOLEMODE_STDIN", "TRUE")). + AddOption(soap.NewHeaderOption("WINRS_SKIP_CMD_SHELL", "FALSE")). + Build() + + body := message.CreateBodyElement("CommandLine", soap.DOM_NS_WIN_SHELL) + + // ensure special characters like & don't mangle the request XML + command = "" + commandElement := message.CreateElement(body, "Command", soap.DOM_NS_WIN_SHELL) + commandElement.SetContent(command) + + for _, arg := range arguments { + arg = "" + argumentsElement := message.CreateElement(body, "Arguments", soap.DOM_NS_WIN_SHELL) + argumentsElement.SetContent(arg) + } + + return message +} + +//NewGetOutputRequest NewGetOutputRequest +func NewGetOutputRequest(uri, shellID, commandID, streams string, params *Parameters) *soap.SoapMessage { + if params == nil { + params = DefaultParameters + } + message := soap.NewMessage() + defaultHeaders(message, uri, params). + Action("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive"). + ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd"). + ShellId(shellID). + Build() + + receive := message.CreateBodyElement("Receive", soap.DOM_NS_WIN_SHELL) + desiredStreams := message.CreateElement(receive, "DesiredStream", soap.DOM_NS_WIN_SHELL) + desiredStreams.SetAttr("CommandId", commandID) + desiredStreams.SetContent(streams) + + return message +} + +//NewSendInputRequest NewSendInputRequest +func NewSendInputRequest(uri, shellID, commandID string, input []byte, eof bool, params *Parameters) *soap.SoapMessage { + if params == nil { + params = DefaultParameters + } + message := soap.NewMessage() + + defaultHeaders(message, uri, params). + Action("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send"). + ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd"). + ShellId(shellID). + Build() + + content := base64.StdEncoding.EncodeToString(input) + + send := message.CreateBodyElement("Send", soap.DOM_NS_WIN_SHELL) + streams := message.CreateElement(send, "Stream", soap.DOM_NS_WIN_SHELL) + streams.SetAttr("Name", "stdin") + streams.SetAttr("CommandId", commandID) + streams.SetContent(content) + if eof { + streams.SetAttr("End", "true") + } + return message +} + +//NewSignalRequest NewSignalRequest +func NewSignalRequest(uri string, shellID string, commandID string, params *Parameters) *soap.SoapMessage { + if params == nil { + params = DefaultParameters + } + message := soap.NewMessage() + + defaultHeaders(message, uri, params). + Action("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Signal"). + ResourceURI("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd"). + ShellId(shellID). + Build() + + signal := message.CreateBodyElement("Signal", soap.DOM_NS_WIN_SHELL) + signal.SetAttr("CommandId", commandID) + code := message.CreateElement(signal, "Code", soap.DOM_NS_WIN_SHELL) + code.SetContent("http://schemas.microsoft.com/wbem/wsman/1/windows/shell/signal/terminate") + + return message +} diff --git a/vendor/github.com/masterzen/winrm/response.go b/vendor/github.com/masterzen/winrm/response.go new file mode 100644 index 00000000000..79dc7a568e8 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/response.go @@ -0,0 +1,128 @@ +package winrm + +import ( + "encoding/base64" + "fmt" + "io" + "strconv" + "strings" + + "github.com/ChrisTrenkamp/goxpath" + "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/tree/xmltree" + "github.com/masterzen/winrm/soap" +) + +func first(node tree.Node, xpath string) (string, error) { + nodes, err := xPath(node, xpath) + if err != nil { + return "", err + } + if len(nodes) < 1 { + return "", err + } + return nodes[0].ResValue(), nil +} + +func any(node tree.Node, xpath string) (bool, error) { + nodes, err := xPath(node, xpath) + if err != nil { + return false, err + } + if len(nodes) > 0 { + return true, nil + } + return false, nil +} + +func xPath(node tree.Node, xpath string) (tree.NodeSet, error) { + xpExec := goxpath.MustParse(xpath) + nodes, err := xpExec.ExecNode(node, soap.GetAllXPathNamespaces()) + if err != nil { + return nil, err + } + return nodes, nil +} + +//ParseOpenShellResponse ParseOpenShellResponse +func ParseOpenShellResponse(response string) (string, error) { + doc, err := xmltree.ParseXML(strings.NewReader(response)) + if err != nil { + return "", err + } + return first(doc, "//w:Selector[@Name='ShellId']") +} + +//ParseExecuteCommandResponse ParseExecuteCommandResponse +func ParseExecuteCommandResponse(response string) (string, error) { + doc, err := xmltree.ParseXML(strings.NewReader(response)) + if err != nil { + return "", err + } + return first(doc, "//rsp:CommandId") +} + +//ParseSlurpOutputErrResponse ParseSlurpOutputErrResponse +func ParseSlurpOutputErrResponse(response string, stdout, stderr io.Writer) (bool, int, error) { + var ( + finished bool + exitCode int + ) + + doc, err := xmltree.ParseXML(strings.NewReader(response)) + + stdouts, _ := xPath(doc, "//rsp:Stream[@Name='stdout']") + for _, node := range stdouts { + content, _ := base64.StdEncoding.DecodeString(node.ResValue()) + stdout.Write(content) + } + stderrs, _ := xPath(doc, "//rsp:Stream[@Name='stderr']") + for _, node := range stderrs { + content, _ := base64.StdEncoding.DecodeString(node.ResValue()) + stderr.Write(content) + } + + ended, _ := any(doc, "//*[@State='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandState/Done']") + + if ended { + finished = ended + if exitBool, _ := any(doc, "//rsp:ExitCode"); exitBool { + exit, _ := first(doc, "//rsp:ExitCode") + exitCode, _ = strconv.Atoi(exit) + } + } else { + finished = false + } + + return finished, exitCode, err +} + +//ParseSlurpOutputResponse ParseSlurpOutputResponse +func ParseSlurpOutputResponse(response string, stream io.Writer, streamType string) (bool, int, error) { + var ( + finished bool + exitCode int + ) + + doc, err := xmltree.ParseXML(strings.NewReader(response)) + + nodes, _ := xPath(doc, fmt.Sprintf("//rsp:Stream[@Name='%s']", streamType)) + for _, node := range nodes { + content, _ := base64.StdEncoding.DecodeString(node.ResValue()) + stream.Write(content) + } + + ended, _ := any(doc, "//*[@State='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandState/Done']") + + if ended { + finished = ended + if exitBool, _ := any(doc, "//rsp:ExitCode"); exitBool { + exit, _ := first(doc, "//rsp:ExitCode") + exitCode, _ = strconv.Atoi(exit) + } + } else { + finished = false + } + + return finished, exitCode, err +} diff --git a/vendor/github.com/masterzen/winrm/shell.go b/vendor/github.com/masterzen/winrm/shell.go new file mode 100644 index 00000000000..0f33348f7bb --- /dev/null +++ b/vendor/github.com/masterzen/winrm/shell.go @@ -0,0 +1,36 @@ +package winrm + +// Shell is the local view of a WinRM Shell of a given Client +type Shell struct { + client *Client + id string +} + +// Execute command on the given Shell, returning either an error or a Command +func (s *Shell) Execute(command string, arguments ...string) (*Command, error) { + request := NewExecuteCommandRequest(s.client.url, s.id, command, arguments, &s.client.Parameters) + defer request.Free() + + response, err := s.client.sendRequest(request) + if err != nil { + return nil, err + } + + commandID, err := ParseExecuteCommandResponse(response) + if err != nil { + return nil, err + } + + cmd := newCommand(s, commandID) + + return cmd, nil +} + +// Close will terminate this shell. No commands can be issued once the shell is closed. +func (s *Shell) Close() error { + request := NewDeleteShellRequest(s.client.url, s.id, &s.client.Parameters) + defer request.Free() + + _, err := s.client.sendRequest(request) + return err +} diff --git a/vendor/github.com/masterzen/winrm/soap/header.go b/vendor/github.com/masterzen/winrm/soap/header.go new file mode 100644 index 00000000000..6aeaebe2ecc --- /dev/null +++ b/vendor/github.com/masterzen/winrm/soap/header.go @@ -0,0 +1,182 @@ +package soap + +import ( + "strconv" + + "github.com/masterzen/simplexml/dom" +) + +type HeaderOption struct { + key string + value string +} + +func NewHeaderOption(name string, value string) *HeaderOption { + return &HeaderOption{key: name, value: value} +} + +type SoapHeader struct { + to string + replyTo string + maxEnvelopeSize string + timeout string + locale string + id string + action string + shellId string + resourceURI string + options []HeaderOption + message *SoapMessage +} + +type HeaderBuilder interface { + To(string) *SoapHeader + ReplyTo(string) *SoapHeader + MaxEnvelopeSize(int) *SoapHeader + Timeout(string) *SoapHeader + Locale(string) *SoapHeader + Id(string) *SoapHeader + Action(string) *SoapHeader + ShellId(string) *SoapHeader + resourceURI(string) *SoapHeader + AddOption(*HeaderOption) *SoapHeader + Options([]HeaderOption) *SoapHeader + Build(*SoapMessage) *SoapMessage +} + +func (self *SoapHeader) To(uri string) *SoapHeader { + self.to = uri + return self +} + +func (self *SoapHeader) ReplyTo(uri string) *SoapHeader { + self.replyTo = uri + return self +} + +func (self *SoapHeader) MaxEnvelopeSize(size int) *SoapHeader { + self.maxEnvelopeSize = strconv.Itoa(size) + return self +} + +func (self *SoapHeader) Timeout(timeout string) *SoapHeader { + self.timeout = timeout + return self +} + +func (self *SoapHeader) Id(id string) *SoapHeader { + self.id = id + return self +} + +func (self *SoapHeader) Action(action string) *SoapHeader { + self.action = action + return self +} + +func (self *SoapHeader) Locale(locale string) *SoapHeader { + self.locale = locale + return self +} + +func (self *SoapHeader) ShellId(shellId string) *SoapHeader { + self.shellId = shellId + return self +} + +func (self *SoapHeader) ResourceURI(resourceURI string) *SoapHeader { + self.resourceURI = resourceURI + return self +} + +func (self *SoapHeader) AddOption(option *HeaderOption) *SoapHeader { + self.options = append(self.options, *option) + return self +} + +func (self *SoapHeader) Options(options []HeaderOption) *SoapHeader { + self.options = options + return self +} + +func (self *SoapHeader) Build() *SoapMessage { + header := self.createElement(self.message.envelope, "Header", DOM_NS_SOAP_ENV) + + if self.to != "" { + to := self.createElement(header, "To", DOM_NS_ADDRESSING) + to.SetContent(self.to) + } + + if self.replyTo != "" { + replyTo := self.createElement(header, "ReplyTo", DOM_NS_ADDRESSING) + a := self.createMUElement(replyTo, "Address", DOM_NS_ADDRESSING, true) + a.SetContent(self.replyTo) + } + + if self.maxEnvelopeSize != "" { + envelope := self.createMUElement(header, "MaxEnvelopeSize", DOM_NS_WSMAN_DMTF, true) + envelope.SetContent(self.maxEnvelopeSize) + } + + if self.timeout != "" { + timeout := self.createElement(header, "OperationTimeout", DOM_NS_WSMAN_DMTF) + timeout.SetContent(self.timeout) + } + + if self.id != "" { + id := self.createElement(header, "MessageID", DOM_NS_ADDRESSING) + id.SetContent(self.id) + } + + if self.locale != "" { + locale := self.createMUElement(header, "Locale", DOM_NS_WSMAN_DMTF, false) + locale.SetAttr("xml:lang", self.locale) + datalocale := self.createMUElement(header, "DataLocale", DOM_NS_WSMAN_MSFT, false) + datalocale.SetAttr("xml:lang", self.locale) + } + + if self.action != "" { + action := self.createMUElement(header, "Action", DOM_NS_ADDRESSING, true) + action.SetContent(self.action) + } + + if self.shellId != "" { + selectorSet := self.createElement(header, "SelectorSet", DOM_NS_WSMAN_DMTF) + selector := self.createElement(selectorSet, "Selector", DOM_NS_WSMAN_DMTF) + selector.SetAttr("Name", "ShellId") + selector.SetContent(self.shellId) + } + + if self.resourceURI != "" { + resource := self.createMUElement(header, "ResourceURI", DOM_NS_WSMAN_DMTF, true) + resource.SetContent(self.resourceURI) + } + + if len(self.options) > 0 { + set := self.createElement(header, "OptionSet", DOM_NS_WSMAN_DMTF) + for _, option := range self.options { + e := self.createElement(set, "Option", DOM_NS_WSMAN_DMTF) + e.SetAttr("Name", option.key) + e.SetContent(option.value) + } + } + + return self.message +} + +func (self *SoapHeader) createElement(parent *dom.Element, name string, ns dom.Namespace) (element *dom.Element) { + element = dom.CreateElement(name) + parent.AddChild(element) + ns.SetTo(element) + return +} + +func (self *SoapHeader) createMUElement(parent *dom.Element, name string, ns dom.Namespace, mustUnderstand bool) (element *dom.Element) { + element = self.createElement(parent, name, ns) + value := "false" + if mustUnderstand { + value = "true" + } + element.SetAttr("mustUnderstand", value) + return +} diff --git a/vendor/github.com/masterzen/winrm/soap/message.go b/vendor/github.com/masterzen/winrm/soap/message.go new file mode 100644 index 00000000000..1c3b2ecc8da --- /dev/null +++ b/vendor/github.com/masterzen/winrm/soap/message.go @@ -0,0 +1,73 @@ +package soap + +import ( + "github.com/masterzen/simplexml/dom" +) + +type SoapMessage struct { + document *dom.Document + envelope *dom.Element + header *SoapHeader + body *dom.Element +} + +type MessageBuilder interface { + SetBody(*dom.Element) + NewBody() *dom.Element + CreateElement(*dom.Element, string, dom.Namespace) *dom.Element + CreateBodyElement(string, dom.Namespace) *dom.Element + Header() *SoapHeader + Doc() *dom.Document + Free() + String() string +} + +func NewMessage() (message *SoapMessage) { + doc := dom.CreateDocument() + e := dom.CreateElement("Envelope") + doc.SetRoot(e) + AddUsualNamespaces(e) + DOM_NS_SOAP_ENV.SetTo(e) + + message = &SoapMessage{document: doc, envelope: e} + return +} + +func (message *SoapMessage) NewBody() (body *dom.Element) { + body = dom.CreateElement("Body") + message.envelope.AddChild(body) + DOM_NS_SOAP_ENV.SetTo(body) + return +} + +func (message *SoapMessage) String() string { + return message.document.String() +} + +func (message *SoapMessage) Doc() *dom.Document { + return message.document +} + +func (message *SoapMessage) Free() { +} + +func (message *SoapMessage) CreateElement(parent *dom.Element, name string, ns dom.Namespace) (element *dom.Element) { + element = dom.CreateElement(name) + parent.AddChild(element) + ns.SetTo(element) + return +} + +func (message *SoapMessage) CreateBodyElement(name string, ns dom.Namespace) (element *dom.Element) { + if message.body == nil { + message.body = message.NewBody() + } + return message.CreateElement(message.body, name, ns) +} + +func (message *SoapMessage) Header() *SoapHeader { + if message.header == nil { + message.header = &SoapHeader{message: message} + } + return message.header +} diff --git a/vendor/github.com/masterzen/winrm/soap/namespaces.go b/vendor/github.com/masterzen/winrm/soap/namespaces.go new file mode 100644 index 00000000000..e7f5e22f59d --- /dev/null +++ b/vendor/github.com/masterzen/winrm/soap/namespaces.go @@ -0,0 +1,81 @@ +package soap + +import ( + "github.com/ChrisTrenkamp/goxpath" + "github.com/masterzen/simplexml/dom" +) + +// Namespaces +const ( + NS_SOAP_ENV = "http://www.w3.org/2003/05/soap-envelope" + NS_ADDRESSING = "http://schemas.xmlsoap.org/ws/2004/08/addressing" + NS_CIMBINDING = "http://schemas.dmtf.org/wbem/wsman/1/cimbinding.xsd" + NS_ENUM = "http://schemas.xmlsoap.org/ws/2004/09/enumeration" + NS_TRANSFER = "http://schemas.xmlsoap.org/ws/2004/09/transfer" + NS_WSMAN_DMTF = "http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" + NS_WSMAN_MSFT = "http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd" + NS_SCHEMA_INST = "http://www.w3.org/2001/XMLSchema-instance" + NS_WIN_SHELL = "http://schemas.microsoft.com/wbem/wsman/1/windows/shell" + NS_WSMAN_FAULT = "http://schemas.microsoft.com/wbem/wsman/1/wsmanfault" +) + +// Namespace Prefixes +const ( + NSP_SOAP_ENV = "env" + NSP_ADDRESSING = "a" + NSP_CIMBINDING = "b" + NSP_ENUM = "n" + NSP_TRANSFER = "x" + NSP_WSMAN_DMTF = "w" + NSP_WSMAN_MSFT = "p" + NSP_SCHEMA_INST = "xsi" + NSP_WIN_SHELL = "rsp" + NSP_WSMAN_FAULT = "f" +) + +// DOM Namespaces +var ( + DOM_NS_SOAP_ENV = dom.Namespace{"env", "http://www.w3.org/2003/05/soap-envelope"} + DOM_NS_ADDRESSING = dom.Namespace{"a", "http://schemas.xmlsoap.org/ws/2004/08/addressing"} + DOM_NS_CIMBINDING = dom.Namespace{"b", "http://schemas.dmtf.org/wbem/wsman/1/cimbinding.xsd"} + DOM_NS_ENUM = dom.Namespace{"n", "http://schemas.xmlsoap.org/ws/2004/09/enumeration"} + DOM_NS_TRANSFER = dom.Namespace{"x", "http://schemas.xmlsoap.org/ws/2004/09/transfer"} + DOM_NS_WSMAN_DMTF = dom.Namespace{"w", "http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd"} + DOM_NS_WSMAN_MSFT = dom.Namespace{"p", "http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd"} + DOM_NS_SCHEMA_INST = dom.Namespace{"xsi", "http://www.w3.org/2001/XMLSchema-instance"} + DOM_NS_WIN_SHELL = dom.Namespace{"rsp", "http://schemas.microsoft.com/wbem/wsman/1/windows/shell"} + DOM_NS_WSMAN_FAULT = dom.Namespace{"f", "http://schemas.microsoft.com/wbem/wsman/1/wsmanfault"} +) + +var MostUsed = [...]dom.Namespace{ + DOM_NS_SOAP_ENV, + DOM_NS_ADDRESSING, + DOM_NS_WIN_SHELL, + DOM_NS_WSMAN_DMTF, + DOM_NS_WSMAN_MSFT, +} + +func AddUsualNamespaces(node *dom.Element) { + for _, ns := range MostUsed { + node.DeclareNamespace(ns) + } +} + +func GetAllXPathNamespaces() func(o *goxpath.Opts) { + ns := map[string]string{ + NSP_SOAP_ENV: NS_SOAP_ENV, + NSP_ADDRESSING: NS_ADDRESSING, + NSP_CIMBINDING: NS_CIMBINDING, + NSP_ENUM: NS_ENUM, + NSP_TRANSFER: NS_TRANSFER, + NSP_WSMAN_DMTF: NS_WSMAN_DMTF, + NSP_WSMAN_MSFT: NS_WSMAN_MSFT, + NSP_SCHEMA_INST: NS_SCHEMA_INST, + NSP_WIN_SHELL: NS_WIN_SHELL, + NSP_WSMAN_FAULT: NS_WSMAN_FAULT, + } + + return func(o *goxpath.Opts) { + o.NS = ns + } +} diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml index fbb43744d94..b097728dbff 100644 --- a/vendor/github.com/modern-go/reflect2/.travis.yml +++ b/vendor/github.com/modern-go/reflect2/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.8.x + - 1.9.x - 1.x before_install: diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock index 2a3a69893b5..10ef811182d 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.lock +++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock @@ -1,15 +1,9 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7" + input-imports = [] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml index 2f4f4dbdcc5..a9bc5061b04 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.toml +++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml @@ -26,10 +26,6 @@ ignored = [] -[[constraint]] - name = "github.com/modern-go/concurrent" - version = "1.0.0" - [prune] go-tests = true unused-packages = true diff --git a/vendor/github.com/modern-go/reflect2/go.mod b/vendor/github.com/modern-go/reflect2/go.mod new file mode 100644 index 00000000000..9057e9b33f2 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go.mod @@ -0,0 +1,3 @@ +module github.com/modern-go/reflect2 + +go 1.12 diff --git a/vendor/github.com/modern-go/reflect2/go_above_118.go b/vendor/github.com/modern-go/reflect2/go_above_118.go new file mode 100644 index 00000000000..2b4116f6c9b --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_above_118.go @@ -0,0 +1,23 @@ +//+build go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + var it hiter + mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj), &it) + return &UnsafeMapIterator{ + hiter: &it, + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go deleted file mode 100644 index 5c1cea8683a..00000000000 --- a/vendor/github.com/modern-go/reflect2/go_above_17.go +++ /dev/null @@ -1,8 +0,0 @@ -//+build go1.7 - -package reflect2 - -import "unsafe" - -//go:linkname resolveTypeOff reflect.resolveTypeOff -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go index c7e3b780116..974f7685e49 100644 --- a/vendor/github.com/modern-go/reflect2/go_above_19.go +++ b/vendor/github.com/modern-go/reflect2/go_above_19.go @@ -6,6 +6,9 @@ import ( "unsafe" ) +//go:linkname resolveTypeOff reflect.resolveTypeOff +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + //go:linkname makemap reflect.makemap func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer) diff --git a/vendor/github.com/modern-go/reflect2/go_below_118.go b/vendor/github.com/modern-go/reflect2/go_below_118.go new file mode 100644 index 00000000000..00003dbd7c5 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_below_118.go @@ -0,0 +1,21 @@ +//+build !go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + return &UnsafeMapIterator{ + hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go deleted file mode 100644 index 65a93c889b7..00000000000 --- a/vendor/github.com/modern-go/reflect2/go_below_17.go +++ /dev/null @@ -1,9 +0,0 @@ -//+build !go1.7 - -package reflect2 - -import "unsafe" - -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { - return nil -} diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go deleted file mode 100644 index b050ef70cdd..00000000000 --- a/vendor/github.com/modern-go/reflect2/go_below_19.go +++ /dev/null @@ -1,14 +0,0 @@ -//+build !go1.9 - -package reflect2 - -import ( - "unsafe" -) - -//go:linkname makemap reflect.makemap -func makemap(rtype unsafe.Pointer) (m unsafe.Pointer) - -func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer { - return makemap(rtype) -} diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go index 63b49c79919..c43c8b9d629 100644 --- a/vendor/github.com/modern-go/reflect2/reflect2.go +++ b/vendor/github.com/modern-go/reflect2/reflect2.go @@ -1,8 +1,9 @@ package reflect2 import ( - "github.com/modern-go/concurrent" "reflect" + "runtime" + "sync" "unsafe" ) @@ -130,13 +131,13 @@ var ConfigSafe = Config{UseSafeImplementation: true}.Froze() type frozenConfig struct { useSafeImplementation bool - cache *concurrent.Map + cache *sync.Map } func (cfg Config) Froze() *frozenConfig { return &frozenConfig{ useSafeImplementation: cfg.UseSafeImplementation, - cache: concurrent.NewMap(), + cache: new(sync.Map), } } @@ -288,11 +289,12 @@ func NoEscape(p unsafe.Pointer) unsafe.Pointer { } func UnsafeCastString(str string) []byte { + bytes := make([]byte, 0) stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str)) - sliceHeader := &reflect.SliceHeader{ - Data: stringHeader.Data, - Cap: stringHeader.Len, - Len: stringHeader.Len, - } - return *(*[]byte)(unsafe.Pointer(sliceHeader)) + sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes)) + sliceHeader.Data = stringHeader.Data + sliceHeader.Cap = stringHeader.Len + sliceHeader.Len = stringHeader.Len + runtime.KeepAlive(str) + return bytes } diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh deleted file mode 100644 index 3d2b9768ce6..00000000000 --- a/vendor/github.com/modern-go/reflect2/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do - go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go index 3acfb55803a..4b13c3155c8 100644 --- a/vendor/github.com/modern-go/reflect2/type_map.go +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -1,17 +1,13 @@ +// +build !gccgo + package reflect2 import ( "reflect" - "runtime" - "strings" "sync" "unsafe" ) -// typelinks1 for 1.5 ~ 1.6 -//go:linkname typelinks1 reflect.typelinks -func typelinks1() [][]unsafe.Pointer - // typelinks2 for 1.7 ~ //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) @@ -27,49 +23,10 @@ func discoverTypes() { types = make(map[string]reflect.Type) packages = make(map[string]map[string]reflect.Type) - ver := runtime.Version() - if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { - loadGo15Types() - } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") { - loadGo15Types() - } else { - loadGo17Types() - } -} - -func loadGo15Types() { - var obj interface{} = reflect.TypeOf(0) - typePtrss := typelinks1() - for _, typePtrs := range typePtrss { - for _, typePtr := range typePtrs { - (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr - typ := obj.(reflect.Type) - if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { - loadedType := typ.Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr && - typ.Elem().Elem().Kind() == reflect.Struct { - loadedType := typ.Elem().Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - } - } + loadGoTypes() } -func loadGo17Types() { +func loadGoTypes() { var obj interface{} = reflect.TypeOf(0) sections, offset := typelinks2() for i, offs := range offset { diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go index 57229c8db41..b49f614efc5 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_link.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go @@ -19,18 +19,12 @@ func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int //go:linkname mapassign reflect.mapassign //go:noescape -func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer) +func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer, val unsafe.Pointer) //go:linkname mapaccess reflect.mapaccess //go:noescape func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) -// m escapes into the return value, but the caller of mapiterinit -// doesn't let the return value escape. -//go:noescape -//go:linkname mapiterinit reflect.mapiterinit -func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter - //go:noescape //go:linkname mapiternext reflect.mapiternext func mapiternext(it *hiter) @@ -42,9 +36,21 @@ func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer) // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate // the layout of this structure. type hiter struct { - key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). - value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). - // rest fields are ignored + key unsafe.Pointer + value unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow *[]unsafe.Pointer + oldoverflow *[]unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr } // add returns p+x. diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go index f2e76e6bb14..37872da8191 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_map.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go @@ -107,14 +107,6 @@ func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator { return type2.UnsafeIterate(objEFace.data) } -func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { - return &UnsafeMapIterator{ - hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), - pKeyRType: type2.pKeyRType, - pElemRType: type2.pElemRType, - } -} - type UnsafeMapIterator struct { *hiter pKeyRType unsafe.Pointer diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md index d5a8649bd53..d9c08a22fc5 100644 --- a/vendor/github.com/russross/blackfriday/v2/README.md +++ b/vendor/github.com/russross/blackfriday/v2/README.md @@ -1,4 +1,6 @@ -Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday) +Blackfriday +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] =========== Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It @@ -16,19 +18,21 @@ It started as a translation from C of [Sundown][3]. Installation ------------ -Blackfriday is compatible with any modern Go release. With Go 1.7 and git -installed: +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: - go get gopkg.in/russross/blackfriday.v2 + go get github.com/russross/blackfriday/v2 -will download, compile, and install the package into your `$GOPATH` -directory hierarchy. Alternatively, you can achieve the same if you -import it into a project: +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: - import "gopkg.in/russross/blackfriday.v2" + import "github.com/russross/blackfriday/v2" and `go get` without parameters. +Legacy GOPATH mode is unsupported. + Versions -------- @@ -36,13 +40,9 @@ Versions Currently maintained and recommended version of Blackfriday is `v2`. It's being developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the documentation is available at -https://godoc.org/gopkg.in/russross/blackfriday.v2. +https://pkg.go.dev/github.com/russross/blackfriday/v2. -It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, -but we highly recommend using package management tool like [dep][7] or -[Glide][8] and make use of semantic versioning. With package management you -should import `github.com/russross/blackfriday` and specify that you're using -version 2.0.0. +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. Version 2 offers a number of improvements over v1: @@ -62,6 +62,11 @@ Potential drawbacks: v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for tracking. +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://pkg.go.dev/github.com/russross/blackfriday. + + Usage ----- @@ -91,7 +96,7 @@ Here's an example of simple usage of Blackfriday together with Bluemonday: ```go import ( "github.com/microcosm-cc/bluemonday" - "github.com/russross/blackfriday" + "github.com/russross/blackfriday/v2" ) // ... @@ -104,6 +109,8 @@ html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) If you want to customize the set of options, use `blackfriday.WithExtensions`, `blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. +### `blackfriday-tool` + You can also check out `blackfriday-tool` for a more complete example of how to use it. Download and install it using: @@ -114,7 +121,7 @@ markdown file using a standalone program. You can also browse the source directly on github if you are just looking for some example code: -* +* Note that if you have not already done so, installing `blackfriday-tool` will be sufficient to download and install @@ -123,6 +130,22 @@ installed in `$GOPATH/bin`. This is a statically-linked binary that can be copied to wherever you need it without worrying about dependencies and library versions. +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `AutoHeadingIDs` extension is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + Features -------- @@ -199,6 +222,15 @@ implements the following extensions: You can use 3 or more backticks to mark the beginning of the block, and the same number to mark the end of the block. + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ```go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + * **Definition lists**. A simple definition list is made of a single-line term followed by a colon and the definition for that term. @@ -250,7 +282,7 @@ Other renderers Blackfriday is structured to allow alternative rendering engines. Here are a few of note: -* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): provides a GitHub Flavored Markdown renderer with fenced code block highlighting, clickable heading anchor links. @@ -261,20 +293,28 @@ are a few of note: * [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, but for markdown. -* [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX): +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): renders output as LaTeX. +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + * [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + -Todo +TODO ---- * More unit testing -* Improve unicode support. It does not understand all unicode +* Improve Unicode support. It does not understand all Unicode rules (about what constitutes a letter, a punctuation symbol, etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all utf-8 input. + some instances. It is safe on all UTF-8 input. License @@ -286,6 +326,10 @@ License [1]: https://daringfireball.net/projects/markdown/ "Markdown" [2]: https://golang.org/ "Go Language" [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - [6]: https://labix.org/gopkg.in "gopkg.in" + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go index b8607474e59..dcd61e6e35b 100644 --- a/vendor/github.com/russross/blackfriday/v2/block.go +++ b/vendor/github.com/russross/blackfriday/v2/block.go @@ -18,8 +18,7 @@ import ( "html" "regexp" "strings" - - "github.com/shurcooL/sanitized_anchor_name" + "unicode" ) const ( @@ -259,7 +258,7 @@ func (p *Markdown) prefixHeading(data []byte) int { } if end > i { if id == "" && p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[i:end])) + id = SanitizedAnchorName(string(data[i:end])) } block := p.addBlock(Heading, data[i:end]) block.HeadingID = id @@ -673,6 +672,7 @@ func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { if beg == 0 || beg >= len(data) { return 0 } + fenceLength := beg - 1 var work bytes.Buffer work.Write([]byte(info)) @@ -706,6 +706,7 @@ func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { if doRender { block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer block.IsFenced = true + block.FenceLength = fenceLength finalizeCodeBlock(block) } @@ -1503,7 +1504,7 @@ func (p *Markdown) paragraph(data []byte) int { id := "" if p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[prev:eol])) + id = SanitizedAnchorName(string(data[prev:eol])) } block := p.addBlock(Heading, data[prev:eol]) @@ -1588,3 +1589,24 @@ func skipUntilChar(text []byte, start int, char byte) int { } return i } + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go index 5b3fa9876ac..57ff152a056 100644 --- a/vendor/github.com/russross/blackfriday/v2/doc.go +++ b/vendor/github.com/russross/blackfriday/v2/doc.go @@ -15,4 +15,32 @@ // // If you're interested in calling Blackfriday from command line, see // https://github.com/russross/blackfriday-tool. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when AutoHeadingIDs extension is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that precede the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go new file mode 100644 index 00000000000..a2c3edb691c --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/entities.go @@ -0,0 +1,2236 @@ +package blackfriday + +// Extracted from https://html.spec.whatwg.org/multipage/entities.json +var entities = map[string]bool{ + "Æ": true, + "Æ": true, + "&": true, + "&": true, + "Á": true, + "Á": true, + "Ă": true, + "Â": true, + "Â": true, + "А": true, + "𝔄": true, + "À": true, + "À": true, + "Α": true, + "Ā": true, + "⩓": true, + "Ą": true, + "𝔸": true, + "⁡": true, + "Å": true, + "Å": true, + "𝒜": true, + "≔": true, + "Ã": true, + "Ã": true, + "Ä": true, + "Ä": true, + "∖": true, + "⫧": true, + "⌆": true, + "Б": true, + "∵": true, + "ℬ": true, + "Β": true, + "𝔅": true, + "𝔹": true, + "˘": true, + "ℬ": true, + "≎": true, + "Ч": true, + "©": true, + "©": true, + "Ć": true, + "⋒": true, + "ⅅ": true, + "ℭ": true, + "Č": true, + "Ç": true, + "Ç": true, + "Ĉ": true, + "∰": true, + "Ċ": true, + "¸": true, + "·": true, + "ℭ": true, + "Χ": true, + "⊙": true, + "⊖": true, + "⊕": true, + "⊗": true, + "∲": true, + "”": true, + "’": true, + "∷": true, + "⩴": true, + "≡": true, + "∯": true, + "∮": true, + "ℂ": true, + "∐": true, + "∳": true, + "⨯": true, + "𝒞": true, + "⋓": true, + "≍": true, + "ⅅ": true, + "⤑": true, + "Ђ": true, + "Ѕ": true, + "Џ": true, + "‡": true, + "↡": true, + "⫤": true, + "Ď": true, + "Д": true, + "∇": true, + "Δ": true, + "𝔇": true, + "´": true, + "˙": true, + "˝": true, + "`": true, + "˜": true, + "⋄": true, + "ⅆ": true, + "𝔻": true, + "¨": true, + "⃜": true, + "≐": true, + "∯": true, + "¨": true, + "⇓": true, + "⇐": true, + "⇔": true, + "⫤": true, + "⟸": true, + "⟺": true, + "⟹": true, + "⇒": true, + "⊨": true, + "⇑": true, + "⇕": true, + "∥": true, + "↓": true, + "⤓": true, + "⇵": true, + "̑": true, + "⥐": true, + "⥞": true, + "↽": true, + "⥖": true, + "⥟": true, + "⇁": true, + "⥗": true, + "⊤": true, + "↧": true, + "⇓": true, + "𝒟": true, + "Đ": true, + "Ŋ": true, + "Ð": true, + "Ð": true, + "É": true, + "É": true, + "Ě": true, + "Ê": true, + "Ê": true, + "Э": true, + "Ė": true, + "𝔈": true, + "È": true, + "È": true, + "∈": true, + "Ē": true, + "◻": true, + "▫": true, + "Ę": true, + "𝔼": true, + "Ε": true, + "⩵": true, + "≂": true, + "⇌": true, + "ℰ": true, + "⩳": true, + "Η": true, + "Ë": true, + "Ë": true, + "∃": true, + "ⅇ": true, + "Ф": true, + "𝔉": true, + "◼": true, + "▪": true, + "𝔽": true, + "∀": true, + "ℱ": true, + "ℱ": true, + "Ѓ": true, + ">": true, + ">": true, + "Γ": true, + "Ϝ": true, + "Ğ": true, + "Ģ": true, + "Ĝ": true, + "Г": true, + "Ġ": true, + "𝔊": true, + "⋙": true, + "𝔾": true, + "≥": true, + "⋛": true, + "≧": true, + "⪢": true, + "≷": true, + "⩾": true, + "≳": true, + "𝒢": true, + "≫": true, + "Ъ": true, + "ˇ": true, + "^": true, + "Ĥ": true, + "ℌ": true, + "ℋ": true, + "ℍ": true, + "─": true, + "ℋ": true, + "Ħ": true, + "≎": true, + "≏": true, + "Е": true, + "IJ": true, + "Ё": true, + "Í": true, + "Í": true, + "Î": true, + "Î": true, + "И": true, + "İ": true, + "ℑ": true, + "Ì": true, + "Ì": true, + "ℑ": true, + "Ī": true, + "ⅈ": true, + "⇒": true, + "∬": true, + "∫": true, + "⋂": true, + "⁣": true, + "⁢": true, + "Į": true, + "𝕀": true, + "Ι": true, + "ℐ": true, + "Ĩ": true, + "І": true, + "Ï": true, + "Ï": true, + "Ĵ": true, + "Й": true, + "𝔍": true, + "𝕁": true, + "𝒥": true, + "Ј": true, + "Є": true, + "Х": true, + "Ќ": true, + "Κ": true, + "Ķ": true, + "К": true, + "𝔎": true, + "𝕂": true, + "𝒦": true, + "Љ": true, + "<": true, + "<": true, + "Ĺ": true, + "Λ": true, + "⟪": true, + "ℒ": true, + "↞": true, + "Ľ": true, + "Ļ": true, + "Л": true, + "⟨": true, + "←": true, + "⇤": true, + "⇆": true, + "⌈": true, + "⟦": true, + "⥡": true, + "⇃": true, + "⥙": true, + "⌊": true, + "↔": true, + "⥎": true, + "⊣": true, + "↤": true, + "⥚": true, + "⊲": true, + "⧏": true, + "⊴": true, + "⥑": true, + "⥠": true, + "↿": true, + "⥘": true, + "↼": true, + "⥒": true, + "⇐": true, + "⇔": true, + "⋚": true, + "≦": true, + "≶": true, + "⪡": true, + "⩽": true, + "≲": true, + "𝔏": true, + "⋘": true, + "⇚": true, + "Ŀ": true, + "⟵": true, + "⟷": true, + "⟶": true, + "⟸": true, + "⟺": true, + "⟹": true, + "𝕃": true, + "↙": true, + "↘": true, + "ℒ": true, + "↰": true, + "Ł": true, + "≪": true, + "⤅": true, + "М": true, + " ": true, + "ℳ": true, + "𝔐": true, + "∓": true, + "𝕄": true, + "ℳ": true, + "Μ": true, + "Њ": true, + "Ń": true, + "Ň": true, + "Ņ": true, + "Н": true, + "​": true, + "​": true, + "​": true, + "​": true, + "≫": true, + "≪": true, + " ": true, + "𝔑": true, + "⁠": true, + " ": true, + "ℕ": true, + "⫬": true, + "≢": true, + "≭": true, + "∦": true, + "∉": true, + "≠": true, + "≂̸": true, + "∄": true, + "≯": true, + "≱": true, + "≧̸": true, + "≫̸": true, + "≹": true, + "⩾̸": true, + "≵": true, + "≎̸": true, + "≏̸": true, + "⋪": true, + "⧏̸": true, + "⋬": true, + "≮": true, + "≰": true, + "≸": true, + "≪̸": true, + "⩽̸": true, + "≴": true, + "⪢̸": true, + "⪡̸": true, + "⊀": true, + "⪯̸": true, + "⋠": true, + "∌": true, + "⋫": true, + "⧐̸": true, + "⋭": true, + "⊏̸": true, + "⋢": true, + "⊐̸": true, + "⋣": true, + "⊂⃒": true, + "⊈": true, + "⊁": true, + "⪰̸": true, + "⋡": true, + "≿̸": true, + "⊃⃒": true, + "⊉": true, + "≁": true, + "≄": true, + "≇": true, + "≉": true, + "∤": true, + "𝒩": true, + "Ñ": true, + "Ñ": true, + "Ν": true, + "Œ": true, + "Ó": true, + "Ó": true, + "Ô": true, + "Ô": true, + "О": true, + "Ő": true, + "𝔒": true, + "Ò": true, + "Ò": true, + "Ō": true, + "Ω": true, + "Ο": true, + "𝕆": true, + "“": true, + "‘": true, + "⩔": true, + "𝒪": true, + "Ø": true, + "Ø": true, + "Õ": true, + "Õ": true, + "⨷": true, + "Ö": true, + "Ö": true, + "‾": true, + "⏞": true, + "⎴": true, + "⏜": true, + "∂": true, + "П": true, + "𝔓": true, + "Φ": true, + "Π": true, + "±": true, + "ℌ": true, + "ℙ": true, + "⪻": true, + "≺": true, + "⪯": true, + "≼": true, + "≾": true, + "″": true, + "∏": true, + "∷": true, + "∝": true, + "𝒫": true, + "Ψ": true, + """: true, + """: true, + "𝔔": true, + "ℚ": true, + "𝒬": true, + "⤐": true, + "®": true, + "®": true, + "Ŕ": true, + "⟫": true, + "↠": true, + "⤖": true, + "Ř": true, + "Ŗ": true, + "Р": true, + "ℜ": true, + "∋": true, + "⇋": true, + "⥯": true, + "ℜ": true, + "Ρ": true, + "⟩": true, + "→": true, + "⇥": true, + "⇄": true, + "⌉": true, + "⟧": true, + "⥝": true, + "⇂": true, + "⥕": true, + "⌋": true, + "⊢": true, + "↦": true, + "⥛": true, + "⊳": true, + "⧐": true, + "⊵": true, + "⥏": true, + "⥜": true, + "↾": true, + "⥔": true, + "⇀": true, + "⥓": true, + "⇒": true, + "ℝ": true, + "⥰": true, + "⇛": true, + "ℛ": true, + "↱": true, + "⧴": true, + "Щ": true, + "Ш": true, + "Ь": true, + "Ś": true, + "⪼": true, + "Š": true, + "Ş": true, + "Ŝ": true, + "С": true, + "𝔖": true, + "↓": true, + "←": true, + "→": true, + "↑": true, + "Σ": true, + "∘": true, + "𝕊": true, + "√": true, + "□": true, + "⊓": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊔": true, + "𝒮": true, + "⋆": true, + "⋐": true, + "⋐": true, + "⊆": true, + "≻": true, + "⪰": true, + "≽": true, + "≿": true, + "∋": true, + "∑": true, + "⋑": true, + "⊃": true, + "⊇": true, + "⋑": true, + "Þ": true, + "Þ": true, + "™": true, + "Ћ": true, + "Ц": true, + " ": true, + "Τ": true, + "Ť": true, + "Ţ": true, + "Т": true, + "𝔗": true, + "∴": true, + "Θ": true, + "  ": true, + " ": true, + "∼": true, + "≃": true, + "≅": true, + "≈": true, + "𝕋": true, + "⃛": true, + "𝒯": true, + "Ŧ": true, + "Ú": true, + "Ú": true, + "↟": true, + "⥉": true, + "Ў": true, + "Ŭ": true, + "Û": true, + "Û": true, + "У": true, + "Ű": true, + "𝔘": true, + "Ù": true, + "Ù": true, + "Ū": true, + "_": true, + "⏟": true, + "⎵": true, + "⏝": true, + "⋃": true, + "⊎": true, + "Ų": true, + "𝕌": true, + "↑": true, + "⤒": true, + "⇅": true, + "↕": true, + "⥮": true, + "⊥": true, + "↥": true, + "⇑": true, + "⇕": true, + "↖": true, + "↗": true, + "ϒ": true, + "Υ": true, + "Ů": true, + "𝒰": true, + "Ũ": true, + "Ü": true, + "Ü": true, + "⊫": true, + "⫫": true, + "В": true, + "⊩": true, + "⫦": true, + "⋁": true, + "‖": true, + "‖": true, + "∣": true, + "|": true, + "❘": true, + "≀": true, + " ": true, + "𝔙": true, + "𝕍": true, + "𝒱": true, + "⊪": true, + "Ŵ": true, + "⋀": true, + "𝔚": true, + "𝕎": true, + "𝒲": true, + "𝔛": true, + "Ξ": true, + "𝕏": true, + "𝒳": true, + "Я": true, + "Ї": true, + "Ю": true, + "Ý": true, + "Ý": true, + "Ŷ": true, + "Ы": true, + "𝔜": true, + "𝕐": true, + "𝒴": true, + "Ÿ": true, + "Ж": true, + "Ź": true, + "Ž": true, + "З": true, + "Ż": true, + "​": true, + "Ζ": true, + "ℨ": true, + "ℤ": true, + "𝒵": true, + "á": true, + "á": true, + "ă": true, + "∾": true, + "∾̳": true, + "∿": true, + "â": true, + "â": true, + "´": true, + "´": true, + "а": true, + "æ": true, + "æ": true, + "⁡": true, + "𝔞": true, + "à": true, + "à": true, + "ℵ": true, + "ℵ": true, + "α": true, + "ā": true, + "⨿": true, + "&": true, + "&": true, + "∧": true, + "⩕": true, + "⩜": true, + "⩘": true, + "⩚": true, + "∠": true, + "⦤": true, + "∠": true, + "∡": true, + "⦨": true, + "⦩": true, + "⦪": true, + "⦫": true, + "⦬": true, + "⦭": true, + "⦮": true, + "⦯": true, + "∟": true, + "⊾": true, + "⦝": true, + "∢": true, + "Å": true, + "⍼": true, + "ą": true, + "𝕒": true, + "≈": true, + "⩰": true, + "⩯": true, + "≊": true, + "≋": true, + "'": true, + "≈": true, + "≊": true, + "å": true, + "å": true, + "𝒶": true, + "*": true, + "≈": true, + "≍": true, + "ã": true, + "ã": true, + "ä": true, + "ä": true, + "∳": true, + "⨑": true, + "⫭": true, + "≌": true, + "϶": true, + "‵": true, + "∽": true, + "⋍": true, + "⊽": true, + "⌅": true, + "⌅": true, + "⎵": true, + "⎶": true, + "≌": true, + "б": true, + "„": true, + "∵": true, + "∵": true, + "⦰": true, + "϶": true, + "ℬ": true, + "β": true, + "ℶ": true, + "≬": true, + "𝔟": true, + "⋂": true, + "◯": true, + "⋃": true, + "⨀": true, + "⨁": true, + "⨂": true, + "⨆": true, + "★": true, + "▽": true, + "△": true, + "⨄": true, + "⋁": true, + "⋀": true, + "⤍": true, + "⧫": true, + "▪": true, + "▴": true, + "▾": true, + "◂": true, + "▸": true, + "␣": true, + "▒": true, + "░": true, + "▓": true, + "█": true, + "=⃥": true, + "≡⃥": true, + "⌐": true, + "𝕓": true, + "⊥": true, + "⊥": true, + "⋈": true, + "╗": true, + "╔": true, + "╖": true, + "╓": true, + "═": true, + "╦": true, + "╩": true, + "╤": true, + "╧": true, + "╝": true, + "╚": true, + "╜": true, + "╙": true, + "║": true, + "╬": true, + "╣": true, + "╠": true, + "╫": true, + "╢": true, + "╟": true, + "⧉": true, + "╕": true, + "╒": true, + "┐": true, + "┌": true, + "─": true, + "╥": true, + "╨": true, + "┬": true, + "┴": true, + "⊟": true, + "⊞": true, + "⊠": true, + "╛": true, + "╘": true, + "┘": true, + "└": true, + "│": true, + "╪": true, + "╡": true, + "╞": true, + "┼": true, + "┤": true, + "├": true, + "‵": true, + "˘": true, + "¦": true, + "¦": true, + "𝒷": true, + "⁏": true, + "∽": true, + "⋍": true, + "\": true, + "⧅": true, + "⟈": true, + "•": true, + "•": true, + "≎": true, + "⪮": true, + "≏": true, + "≏": true, + "ć": true, + "∩": true, + "⩄": true, + "⩉": true, + "⩋": true, + "⩇": true, + "⩀": true, + "∩︀": true, + "⁁": true, + "ˇ": true, + "⩍": true, + "č": true, + "ç": true, + "ç": true, + "ĉ": true, + "⩌": true, + "⩐": true, + "ċ": true, + "¸": true, + "¸": true, + "⦲": true, + "¢": true, + "¢": true, + "·": true, + "𝔠": true, + "ч": true, + "✓": true, + "✓": true, + "χ": true, + "○": true, + "⧃": true, + "ˆ": true, + "≗": true, + "↺": true, + "↻": true, + "®": true, + "Ⓢ": true, + "⊛": true, + "⊚": true, + "⊝": true, + "≗": true, + "⨐": true, + "⫯": true, + "⧂": true, + "♣": true, + "♣": true, + ":": true, + "≔": true, + "≔": true, + ",": true, + "@": true, + "∁": true, + "∘": true, + "∁": true, + "ℂ": true, + "≅": true, + "⩭": true, + "∮": true, + "𝕔": true, + "∐": true, + "©": true, + "©": true, + "℗": true, + "↵": true, + "✗": true, + "𝒸": true, + "⫏": true, + "⫑": true, + "⫐": true, + "⫒": true, + "⋯": true, + "⤸": true, + "⤵": true, + "⋞": true, + "⋟": true, + "↶": true, + "⤽": true, + "∪": true, + "⩈": true, + "⩆": true, + "⩊": true, + "⊍": true, + "⩅": true, + "∪︀": true, + "↷": true, + "⤼": true, + "⋞": true, + "⋟": true, + "⋎": true, + "⋏": true, + "¤": true, + "¤": true, + "↶": true, + "↷": true, + "⋎": true, + "⋏": true, + "∲": true, + "∱": true, + "⌭": true, + "⇓": true, + "⥥": true, + "†": true, + "ℸ": true, + "↓": true, + "‐": true, + "⊣": true, + "⤏": true, + "˝": true, + "ď": true, + "д": true, + "ⅆ": true, + "‡": true, + "⇊": true, + "⩷": true, + "°": true, + "°": true, + "δ": true, + "⦱": true, + "⥿": true, + "𝔡": true, + "⇃": true, + "⇂": true, + "⋄": true, + "⋄": true, + "♦": true, + "♦": true, + "¨": true, + "ϝ": true, + "⋲": true, + "÷": true, + "÷": true, + "÷": true, + "⋇": true, + "⋇": true, + "ђ": true, + "⌞": true, + "⌍": true, + "$": true, + "𝕕": true, + "˙": true, + "≐": true, + "≑": true, + "∸": true, + "∔": true, + "⊡": true, + "⌆": true, + "↓": true, + "⇊": true, + "⇃": true, + "⇂": true, + "⤐": true, + "⌟": true, + "⌌": true, + "𝒹": true, + "ѕ": true, + "⧶": true, + "đ": true, + "⋱": true, + "▿": true, + "▾": true, + "⇵": true, + "⥯": true, + "⦦": true, + "џ": true, + "⟿": true, + "⩷": true, + "≑": true, + "é": true, + "é": true, + "⩮": true, + "ě": true, + "≖": true, + "ê": true, + "ê": true, + "≕": true, + "э": true, + "ė": true, + "ⅇ": true, + "≒": true, + "𝔢": true, + "⪚": true, + "è": true, + "è": true, + "⪖": true, + "⪘": true, + "⪙": true, + "⏧": true, + "ℓ": true, + "⪕": true, + "⪗": true, + "ē": true, + "∅": true, + "∅": true, + "∅": true, + " ": true, + " ": true, + " ": true, + "ŋ": true, + " ": true, + "ę": true, + "𝕖": true, + "⋕": true, + "⧣": true, + "⩱": true, + "ε": true, + "ε": true, + "ϵ": true, + "≖": true, + "≕": true, + "≂": true, + "⪖": true, + "⪕": true, + "=": true, + "≟": true, + "≡": true, + "⩸": true, + "⧥": true, + "≓": true, + "⥱": true, + "ℯ": true, + "≐": true, + "≂": true, + "η": true, + "ð": true, + "ð": true, + "ë": true, + "ë": true, + "€": true, + "!": true, + "∃": true, + "ℰ": true, + "ⅇ": true, + "≒": true, + "ф": true, + "♀": true, + "ffi": true, + "ff": true, + "ffl": true, + "𝔣": true, + "fi": true, + "fj": true, + "♭": true, + "fl": true, + "▱": true, + "ƒ": true, + "𝕗": true, + "∀": true, + "⋔": true, + "⫙": true, + "⨍": true, + "½": true, + "½": true, + "⅓": true, + "¼": true, + "¼": true, + "⅕": true, + "⅙": true, + "⅛": true, + "⅔": true, + "⅖": true, + "¾": true, + "¾": true, + "⅗": true, + "⅜": true, + "⅘": true, + "⅚": true, + "⅝": true, + "⅞": true, + "⁄": true, + "⌢": true, + "𝒻": true, + "≧": true, + "⪌": true, + "ǵ": true, + "γ": true, + "ϝ": true, + "⪆": true, + "ğ": true, + "ĝ": true, + "г": true, + "ġ": true, + "≥": true, + "⋛": true, + "≥": true, + "≧": true, + "⩾": true, + "⩾": true, + "⪩": true, + "⪀": true, + "⪂": true, + "⪄": true, + "⋛︀": true, + "⪔": true, + "𝔤": true, + "≫": true, + "⋙": true, + "ℷ": true, + "ѓ": true, + "≷": true, + "⪒": true, + "⪥": true, + "⪤": true, + "≩": true, + "⪊": true, + "⪊": true, + "⪈": true, + "⪈": true, + "≩": true, + "⋧": true, + "𝕘": true, + "`": true, + "ℊ": true, + "≳": true, + "⪎": true, + "⪐": true, + ">": true, + ">": true, + "⪧": true, + "⩺": true, + "⋗": true, + "⦕": true, + "⩼": true, + "⪆": true, + "⥸": true, + "⋗": true, + "⋛": true, + "⪌": true, + "≷": true, + "≳": true, + "≩︀": true, + "≩︀": true, + "⇔": true, + " ": true, + "½": true, + "ℋ": true, + "ъ": true, + "↔": true, + "⥈": true, + "↭": true, + "ℏ": true, + "ĥ": true, + "♥": true, + "♥": true, + "…": true, + "⊹": true, + "𝔥": true, + "⤥": true, + "⤦": true, + "⇿": true, + "∻": true, + "↩": true, + "↪": true, + "𝕙": true, + "―": true, + "𝒽": true, + "ℏ": true, + "ħ": true, + "⁃": true, + "‐": true, + "í": true, + "í": true, + "⁣": true, + "î": true, + "î": true, + "и": true, + "е": true, + "¡": true, + "¡": true, + "⇔": true, + "𝔦": true, + "ì": true, + "ì": true, + "ⅈ": true, + "⨌": true, + "∭": true, + "⧜": true, + "℩": true, + "ij": true, + "ī": true, + "ℑ": true, + "ℐ": true, + "ℑ": true, + "ı": true, + "⊷": true, + "Ƶ": true, + "∈": true, + "℅": true, + "∞": true, + "⧝": true, + "ı": true, + "∫": true, + "⊺": true, + "ℤ": true, + "⊺": true, + "⨗": true, + "⨼": true, + "ё": true, + "į": true, + "𝕚": true, + "ι": true, + "⨼": true, + "¿": true, + "¿": true, + "𝒾": true, + "∈": true, + "⋹": true, + "⋵": true, + "⋴": true, + "⋳": true, + "∈": true, + "⁢": true, + "ĩ": true, + "і": true, + "ï": true, + "ï": true, + "ĵ": true, + "й": true, + "𝔧": true, + "ȷ": true, + "𝕛": true, + "𝒿": true, + "ј": true, + "є": true, + "κ": true, + "ϰ": true, + "ķ": true, + "к": true, + "𝔨": true, + "ĸ": true, + "х": true, + "ќ": true, + "𝕜": true, + "𝓀": true, + "⇚": true, + "⇐": true, + "⤛": true, + "⤎": true, + "≦": true, + "⪋": true, + "⥢": true, + "ĺ": true, + "⦴": true, + "ℒ": true, + "λ": true, + "⟨": true, + "⦑": true, + "⟨": true, + "⪅": true, + "«": true, + "«": true, + "←": true, + "⇤": true, + "⤟": true, + "⤝": true, + "↩": true, + "↫": true, + "⤹": true, + "⥳": true, + "↢": true, + "⪫": true, + "⤙": true, + "⪭": true, + "⪭︀": true, + "⤌": true, + "❲": true, + "{": true, + "[": true, + "⦋": true, + "⦏": true, + "⦍": true, + "ľ": true, + "ļ": true, + "⌈": true, + "{": true, + "л": true, + "⤶": true, + "“": true, + "„": true, + "⥧": true, + "⥋": true, + "↲": true, + "≤": true, + "←": true, + "↢": true, + "↽": true, + "↼": true, + "⇇": true, + "↔": true, + "⇆": true, + "⇋": true, + "↭": true, + "⋋": true, + "⋚": true, + "≤": true, + "≦": true, + "⩽": true, + "⩽": true, + "⪨": true, + "⩿": true, + "⪁": true, + "⪃": true, + "⋚︀": true, + "⪓": true, + "⪅": true, + "⋖": true, + "⋚": true, + "⪋": true, + "≶": true, + "≲": true, + "⥼": true, + "⌊": true, + "𝔩": true, + "≶": true, + "⪑": true, + "↽": true, + "↼": true, + "⥪": true, + "▄": true, + "љ": true, + "≪": true, + "⇇": true, + "⌞": true, + "⥫": true, + "◺": true, + "ŀ": true, + "⎰": true, + "⎰": true, + "≨": true, + "⪉": true, + "⪉": true, + "⪇": true, + "⪇": true, + "≨": true, + "⋦": true, + "⟬": true, + "⇽": true, + "⟦": true, + "⟵": true, + "⟷": true, + "⟼": true, + "⟶": true, + "↫": true, + "↬": true, + "⦅": true, + "𝕝": true, + "⨭": true, + "⨴": true, + "∗": true, + "_": true, + "◊": true, + "◊": true, + "⧫": true, + "(": true, + "⦓": true, + "⇆": true, + "⌟": true, + "⇋": true, + "⥭": true, + "‎": true, + "⊿": true, + "‹": true, + "𝓁": true, + "↰": true, + "≲": true, + "⪍": true, + "⪏": true, + "[": true, + "‘": true, + "‚": true, + "ł": true, + "<": true, + "<": true, + "⪦": true, + "⩹": true, + "⋖": true, + "⋋": true, + "⋉": true, + "⥶": true, + "⩻": true, + "⦖": true, + "◃": true, + "⊴": true, + "◂": true, + "⥊": true, + "⥦": true, + "≨︀": true, + "≨︀": true, + "∺": true, + "¯": true, + "¯": true, + "♂": true, + "✠": true, + "✠": true, + "↦": true, + "↦": true, + "↧": true, + "↤": true, + "↥": true, + "▮": true, + "⨩": true, + "м": true, + "—": true, + "∡": true, + "𝔪": true, + "℧": true, + "µ": true, + "µ": true, + "∣": true, + "*": true, + "⫰": true, + "·": true, + "·": true, + "−": true, + "⊟": true, + "∸": true, + "⨪": true, + "⫛": true, + "…": true, + "∓": true, + "⊧": true, + "𝕞": true, + "∓": true, + "𝓂": true, + "∾": true, + "μ": true, + "⊸": true, + "⊸": true, + "⋙̸": true, + "≫⃒": true, + "≫̸": true, + "⇍": true, + "⇎": true, + "⋘̸": true, + "≪⃒": true, + "≪̸": true, + "⇏": true, + "⊯": true, + "⊮": true, + "∇": true, + "ń": true, + "∠⃒": true, + "≉": true, + "⩰̸": true, + "≋̸": true, + "ʼn": true, + "≉": true, + "♮": true, + "♮": true, + "ℕ": true, + " ": true, + " ": true, + "≎̸": true, + "≏̸": true, + "⩃": true, + "ň": true, + "ņ": true, + "≇": true, + "⩭̸": true, + "⩂": true, + "н": true, + "–": true, + "≠": true, + "⇗": true, + "⤤": true, + "↗": true, + "↗": true, + "≐̸": true, + "≢": true, + "⤨": true, + "≂̸": true, + "∄": true, + "∄": true, + "𝔫": true, + "≧̸": true, + "≱": true, + "≱": true, + "≧̸": true, + "⩾̸": true, + "⩾̸": true, + "≵": true, + "≯": true, + "≯": true, + "⇎": true, + "↮": true, + "⫲": true, + "∋": true, + "⋼": true, + "⋺": true, + "∋": true, + "њ": true, + "⇍": true, + "≦̸": true, + "↚": true, + "‥": true, + "≰": true, + "↚": true, + "↮": true, + "≰": true, + "≦̸": true, + "⩽̸": true, + "⩽̸": true, + "≮": true, + "≴": true, + "≮": true, + "⋪": true, + "⋬": true, + "∤": true, + "𝕟": true, + "¬": true, + "¬": true, + "∉": true, + "⋹̸": true, + "⋵̸": true, + "∉": true, + "⋷": true, + "⋶": true, + "∌": true, + "∌": true, + "⋾": true, + "⋽": true, + "∦": true, + "∦": true, + "⫽⃥": true, + "∂̸": true, + "⨔": true, + "⊀": true, + "⋠": true, + "⪯̸": true, + "⊀": true, + "⪯̸": true, + "⇏": true, + "↛": true, + "⤳̸": true, + "↝̸": true, + "↛": true, + "⋫": true, + "⋭": true, + "⊁": true, + "⋡": true, + "⪰̸": true, + "𝓃": true, + "∤": true, + "∦": true, + "≁": true, + "≄": true, + "≄": true, + "∤": true, + "∦": true, + "⋢": true, + "⋣": true, + "⊄": true, + "⫅̸": true, + "⊈": true, + "⊂⃒": true, + "⊈": true, + "⫅̸": true, + "⊁": true, + "⪰̸": true, + "⊅": true, + "⫆̸": true, + "⊉": true, + "⊃⃒": true, + "⊉": true, + "⫆̸": true, + "≹": true, + "ñ": true, + "ñ": true, + "≸": true, + "⋪": true, + "⋬": true, + "⋫": true, + "⋭": true, + "ν": true, + "#": true, + "№": true, + " ": true, + "⊭": true, + "⤄": true, + "≍⃒": true, + "⊬": true, + "≥⃒": true, + ">⃒": true, + "⧞": true, + "⤂": true, + "≤⃒": true, + "<⃒": true, + "⊴⃒": true, + "⤃": true, + "⊵⃒": true, + "∼⃒": true, + "⇖": true, + "⤣": true, + "↖": true, + "↖": true, + "⤧": true, + "Ⓢ": true, + "ó": true, + "ó": true, + "⊛": true, + "⊚": true, + "ô": true, + "ô": true, + "о": true, + "⊝": true, + "ő": true, + "⨸": true, + "⊙": true, + "⦼": true, + "œ": true, + "⦿": true, + "𝔬": true, + "˛": true, + "ò": true, + "ò": true, + "⧁": true, + "⦵": true, + "Ω": true, + "∮": true, + "↺": true, + "⦾": true, + "⦻": true, + "‾": true, + "⧀": true, + "ō": true, + "ω": true, + "ο": true, + "⦶": true, + "⊖": true, + "𝕠": true, + "⦷": true, + "⦹": true, + "⊕": true, + "∨": true, + "↻": true, + "⩝": true, + "ℴ": true, + "ℴ": true, + "ª": true, + "ª": true, + "º": true, + "º": true, + "⊶": true, + "⩖": true, + "⩗": true, + "⩛": true, + "ℴ": true, + "ø": true, + "ø": true, + "⊘": true, + "õ": true, + "õ": true, + "⊗": true, + "⨶": true, + "ö": true, + "ö": true, + "⌽": true, + "∥": true, + "¶": true, + "¶": true, + "∥": true, + "⫳": true, + "⫽": true, + "∂": true, + "п": true, + "%": true, + ".": true, + "‰": true, + "⊥": true, + "‱": true, + "𝔭": true, + "φ": true, + "ϕ": true, + "ℳ": true, + "☎": true, + "π": true, + "⋔": true, + "ϖ": true, + "ℏ": true, + "ℎ": true, + "ℏ": true, + "+": true, + "⨣": true, + "⊞": true, + "⨢": true, + "∔": true, + "⨥": true, + "⩲": true, + "±": true, + "±": true, + "⨦": true, + "⨧": true, + "±": true, + "⨕": true, + "𝕡": true, + "£": true, + "£": true, + "≺": true, + "⪳": true, + "⪷": true, + "≼": true, + "⪯": true, + "≺": true, + "⪷": true, + "≼": true, + "⪯": true, + "⪹": true, + "⪵": true, + "⋨": true, + "≾": true, + "′": true, + "ℙ": true, + "⪵": true, + "⪹": true, + "⋨": true, + "∏": true, + "⌮": true, + "⌒": true, + "⌓": true, + "∝": true, + "∝": true, + "≾": true, + "⊰": true, + "𝓅": true, + "ψ": true, + " ": true, + "𝔮": true, + "⨌": true, + "𝕢": true, + "⁗": true, + "𝓆": true, + "ℍ": true, + "⨖": true, + "?": true, + "≟": true, + """: true, + """: true, + "⇛": true, + "⇒": true, + "⤜": true, + "⤏": true, + "⥤": true, + "∽̱": true, + "ŕ": true, + "√": true, + "⦳": true, + "⟩": true, + "⦒": true, + "⦥": true, + "⟩": true, + "»": true, + "»": true, + "→": true, + "⥵": true, + "⇥": true, + "⤠": true, + "⤳": true, + "⤞": true, + "↪": true, + "↬": true, + "⥅": true, + "⥴": true, + "↣": true, + "↝": true, + "⤚": true, + "∶": true, + "ℚ": true, + "⤍": true, + "❳": true, + "}": true, + "]": true, + "⦌": true, + "⦎": true, + "⦐": true, + "ř": true, + "ŗ": true, + "⌉": true, + "}": true, + "р": true, + "⤷": true, + "⥩": true, + "”": true, + "”": true, + "↳": true, + "ℜ": true, + "ℛ": true, + "ℜ": true, + "ℝ": true, + "▭": true, + "®": true, + "®": true, + "⥽": true, + "⌋": true, + "𝔯": true, + "⇁": true, + "⇀": true, + "⥬": true, + "ρ": true, + "ϱ": true, + "→": true, + "↣": true, + "⇁": true, + "⇀": true, + "⇄": true, + "⇌": true, + "⇉": true, + "↝": true, + "⋌": true, + "˚": true, + "≓": true, + "⇄": true, + "⇌": true, + "‏": true, + "⎱": true, + "⎱": true, + "⫮": true, + "⟭": true, + "⇾": true, + "⟧": true, + "⦆": true, + "𝕣": true, + "⨮": true, + "⨵": true, + ")": true, + "⦔": true, + "⨒": true, + "⇉": true, + "›": true, + "𝓇": true, + "↱": true, + "]": true, + "’": true, + "’": true, + "⋌": true, + "⋊": true, + "▹": true, + "⊵": true, + "▸": true, + "⧎": true, + "⥨": true, + "℞": true, + "ś": true, + "‚": true, + "≻": true, + "⪴": true, + "⪸": true, + "š": true, + "≽": true, + "⪰": true, + "ş": true, + "ŝ": true, + "⪶": true, + "⪺": true, + "⋩": true, + "⨓": true, + "≿": true, + "с": true, + "⋅": true, + "⊡": true, + "⩦": true, + "⇘": true, + "⤥": true, + "↘": true, + "↘": true, + "§": true, + "§": true, + ";": true, + "⤩": true, + "∖": true, + "∖": true, + "✶": true, + "𝔰": true, + "⌢": true, + "♯": true, + "щ": true, + "ш": true, + "∣": true, + "∥": true, + "­": true, + "­": true, + "σ": true, + "ς": true, + "ς": true, + "∼": true, + "⩪": true, + "≃": true, + "≃": true, + "⪞": true, + "⪠": true, + "⪝": true, + "⪟": true, + "≆": true, + "⨤": true, + "⥲": true, + "←": true, + "∖": true, + "⨳": true, + "⧤": true, + "∣": true, + "⌣": true, + "⪪": true, + "⪬": true, + "⪬︀": true, + "ь": true, + "/": true, + "⧄": true, + "⌿": true, + "𝕤": true, + "♠": true, + "♠": true, + "∥": true, + "⊓": true, + "⊓︀": true, + "⊔": true, + "⊔︀": true, + "⊏": true, + "⊑": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊐": true, + "⊒": true, + "□": true, + "□": true, + "▪": true, + "▪": true, + "→": true, + "𝓈": true, + "∖": true, + "⌣": true, + "⋆": true, + "☆": true, + "★": true, + "ϵ": true, + "ϕ": true, + "¯": true, + "⊂": true, + "⫅": true, + "⪽": true, + "⊆": true, + "⫃": true, + "⫁": true, + "⫋": true, + "⊊": true, + "⪿": true, + "⥹": true, + "⊂": true, + "⊆": true, + "⫅": true, + "⊊": true, + "⫋": true, + "⫇": true, + "⫕": true, + "⫓": true, + "≻": true, + "⪸": true, + "≽": true, + "⪰": true, + "⪺": true, + "⪶": true, + "⋩": true, + "≿": true, + "∑": true, + "♪": true, + "¹": true, + "¹": true, + "²": true, + "²": true, + "³": true, + "³": true, + "⊃": true, + "⫆": true, + "⪾": true, + "⫘": true, + "⊇": true, + "⫄": true, + "⟉": true, + "⫗": true, + "⥻": true, + "⫂": true, + "⫌": true, + "⊋": true, + "⫀": true, + "⊃": true, + "⊇": true, + "⫆": true, + "⊋": true, + "⫌": true, + "⫈": true, + "⫔": true, + "⫖": true, + "⇙": true, + "⤦": true, + "↙": true, + "↙": true, + "⤪": true, + "ß": true, + "ß": true, + "⌖": true, + "τ": true, + "⎴": true, + "ť": true, + "ţ": true, + "т": true, + "⃛": true, + "⌕": true, + "𝔱": true, + "∴": true, + "∴": true, + "θ": true, + "ϑ": true, + "ϑ": true, + "≈": true, + "∼": true, + " ": true, + "≈": true, + "∼": true, + "þ": true, + "þ": true, + "˜": true, + "×": true, + "×": true, + "⊠": true, + "⨱": true, + "⨰": true, + "∭": true, + "⤨": true, + "⊤": true, + "⌶": true, + "⫱": true, + "𝕥": true, + "⫚": true, + "⤩": true, + "‴": true, + "™": true, + "▵": true, + "▿": true, + "◃": true, + "⊴": true, + "≜": true, + "▹": true, + "⊵": true, + "◬": true, + "≜": true, + "⨺": true, + "⨹": true, + "⧍": true, + "⨻": true, + "⏢": true, + "𝓉": true, + "ц": true, + "ћ": true, + "ŧ": true, + "≬": true, + "↞": true, + "↠": true, + "⇑": true, + "⥣": true, + "ú": true, + "ú": true, + "↑": true, + "ў": true, + "ŭ": true, + "û": true, + "û": true, + "у": true, + "⇅": true, + "ű": true, + "⥮": true, + "⥾": true, + "𝔲": true, + "ù": true, + "ù": true, + "↿": true, + "↾": true, + "▀": true, + "⌜": true, + "⌜": true, + "⌏": true, + "◸": true, + "ū": true, + "¨": true, + "¨": true, + "ų": true, + "𝕦": true, + "↑": true, + "↕": true, + "↿": true, + "↾": true, + "⊎": true, + "υ": true, + "ϒ": true, + "υ": true, + "⇈": true, + "⌝": true, + "⌝": true, + "⌎": true, + "ů": true, + "◹": true, + "𝓊": true, + "⋰": true, + "ũ": true, + "▵": true, + "▴": true, + "⇈": true, + "ü": true, + "ü": true, + "⦧": true, + "⇕": true, + "⫨": true, + "⫩": true, + "⊨": true, + "⦜": true, + "ϵ": true, + "ϰ": true, + "∅": true, + "ϕ": true, + "ϖ": true, + "∝": true, + "↕": true, + "ϱ": true, + "ς": true, + "⊊︀": true, + "⫋︀": true, + "⊋︀": true, + "⫌︀": true, + "ϑ": true, + "⊲": true, + "⊳": true, + "в": true, + "⊢": true, + "∨": true, + "⊻": true, + "≚": true, + "⋮": true, + "|": true, + "|": true, + "𝔳": true, + "⊲": true, + "⊂⃒": true, + "⊃⃒": true, + "𝕧": true, + "∝": true, + "⊳": true, + "𝓋": true, + "⫋︀": true, + "⊊︀": true, + "⫌︀": true, + "⊋︀": true, + "⦚": true, + "ŵ": true, + "⩟": true, + "∧": true, + "≙": true, + "℘": true, + "𝔴": true, + "𝕨": true, + "℘": true, + "≀": true, + "≀": true, + "𝓌": true, + "⋂": true, + "◯": true, + "⋃": true, + "▽": true, + "𝔵": true, + "⟺": true, + "⟷": true, + "ξ": true, + "⟸": true, + "⟵": true, + "⟼": true, + "⋻": true, + "⨀": true, + "𝕩": true, + "⨁": true, + "⨂": true, + "⟹": true, + "⟶": true, + "𝓍": true, + "⨆": true, + "⨄": true, + "△": true, + "⋁": true, + "⋀": true, + "ý": true, + "ý": true, + "я": true, + "ŷ": true, + "ы": true, + "¥": true, + "¥": true, + "𝔶": true, + "ї": true, + "𝕪": true, + "𝓎": true, + "ю": true, + "ÿ": true, + "ÿ": true, + "ź": true, + "ž": true, + "з": true, + "ż": true, + "ℨ": true, + "ζ": true, + "𝔷": true, + "ж": true, + "⇝": true, + "𝕫": true, + "𝓏": true, + "‍": true, + "‌": true, +} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go index 6385f27cb6a..6ab60102c9b 100644 --- a/vendor/github.com/russross/blackfriday/v2/esc.go +++ b/vendor/github.com/russross/blackfriday/v2/esc.go @@ -13,13 +13,27 @@ var htmlEscaper = [256][]byte{ } func escapeHTML(w io.Writer, s []byte) { + escapeEntities(w, s, false) +} + +func escapeAllHTML(w io.Writer, s []byte) { + escapeEntities(w, s, true) +} + +func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { var start, end int for end < len(s) { escSeq := htmlEscaper[s[end]] if escSeq != nil { - w.Write(s[start:end]) - w.Write(escSeq) - start = end + 1 + isEntity, entityEnd := nodeIsEntity(s, end) + if isEntity && !escapeValidEntities { + w.Write(s[start : entityEnd+1]) + start = entityEnd + 1 + } else { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } } end++ } @@ -28,6 +42,28 @@ func escapeHTML(w io.Writer, s []byte) { } } +func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { + isEntity = false + endEntityPos = end + 1 + + if s[end] == '&' { + for endEntityPos < len(s) { + if s[endEntityPos] == ';' { + if entities[string(s[end:endEntityPos+1])] { + isEntity = true + break + } + } + if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { + break + } + endEntityPos++ + } + } + + return isEntity, endEntityPos +} + func escLink(w io.Writer, text []byte) { unesc := html.UnescapeString(string(text)) escapeHTML(w, []byte(unesc)) diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go index 284c87184f7..cb4f26e30fd 100644 --- a/vendor/github.com/russross/blackfriday/v2/html.go +++ b/vendor/github.com/russross/blackfriday/v2/html.go @@ -132,7 +132,10 @@ func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { } if params.FootnoteReturnLinkContents == "" { - params.FootnoteReturnLinkContents = `[return]` + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" } return &HTMLRenderer{ @@ -616,7 +619,7 @@ func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkSt } case Code: r.out(w, codeTag) - escapeHTML(w, node.Literal) + escapeAllHTML(w, node.Literal) r.out(w, codeCloseTag) case Document: break @@ -762,7 +765,7 @@ func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkSt r.cr(w) r.out(w, preTag) r.tag(w, codeTag[:len(codeTag)-1], attrs) - escapeHTML(w, node.Literal) + escapeAllHTML(w, node.Literal) r.out(w, codeCloseTag) r.out(w, preCloseTag) if node.Parent.Type != Item { diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go index 4ed2907921e..d45bd941726 100644 --- a/vendor/github.com/russross/blackfriday/v2/inline.go +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -278,7 +278,7 @@ func link(p *Markdown, data []byte, offset int) (int, *Node) { case data[i] == '\n': textHasNl = true - case data[i-1] == '\\': + case isBackslashEscaped(data, i): continue case data[i] == '[': diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go index 51b9e8c1b53..04e6050ceea 100644 --- a/vendor/github.com/russross/blackfriday/v2/node.go +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -199,7 +199,8 @@ func (n *Node) InsertBefore(sibling *Node) { } } -func (n *Node) isContainer() bool { +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { switch n.Type { case Document: fallthrough @@ -238,6 +239,11 @@ func (n *Node) isContainer() bool { } } +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + func (n *Node) canContain(t NodeType) bool { if n.Type == List { return t == Item @@ -309,11 +315,11 @@ func newNodeWalker(root *Node) *nodeWalker { } func (nw *nodeWalker) next() { - if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { nw.current = nil return } - if nw.entering && nw.current.isContainer() { + if nw.entering && nw.current.IsContainer() { if nw.current.FirstChild != nil { nw.current = nw.current.FirstChild nw.entering = true diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml b/vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml deleted file mode 100644 index 93b1fcdb31a..00000000000 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false -language: go -go: - - 1.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/README.md b/vendor/github.com/shurcooL/sanitized_anchor_name/README.md deleted file mode 100644 index 670bf0fe6c7..00000000000 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/README.md +++ /dev/null @@ -1,36 +0,0 @@ -sanitized_anchor_name -===================== - -[![Build Status](https://travis-ci.org/shurcooL/sanitized_anchor_name.svg?branch=master)](https://travis-ci.org/shurcooL/sanitized_anchor_name) [![GoDoc](https://godoc.org/github.com/shurcooL/sanitized_anchor_name?status.svg)](https://godoc.org/github.com/shurcooL/sanitized_anchor_name) - -Package sanitized_anchor_name provides a func to create sanitized anchor names. - -Its logic can be reused by multiple packages to create interoperable anchor names -and links to those anchors. - -At this time, it does not try to ensure that generated anchor names -are unique, that responsibility falls on the caller. - -Installation ------------- - -```bash -go get -u github.com/shurcooL/sanitized_anchor_name -``` - -Example -------- - -```Go -anchorName := sanitized_anchor_name.Create("This is a header") - -fmt.Println(anchorName) - -// Output: -// this-is-a-header -``` - -License -------- - -- [MIT License](LICENSE) diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/go.mod b/vendor/github.com/shurcooL/sanitized_anchor_name/go.mod deleted file mode 100644 index 1e255347593..00000000000 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/shurcooL/sanitized_anchor_name diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/main.go b/vendor/github.com/shurcooL/sanitized_anchor_name/main.go deleted file mode 100644 index 6a77d124317..00000000000 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/main.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package sanitized_anchor_name provides a func to create sanitized anchor names. -// -// Its logic can be reused by multiple packages to create interoperable anchor names -// and links to those anchors. -// -// At this time, it does not try to ensure that generated anchor names -// are unique, that responsibility falls on the caller. -package sanitized_anchor_name // import "github.com/shurcooL/sanitized_anchor_name" - -import "unicode" - -// Create returns a sanitized anchor name for the given text. -func Create(text string) string { - var anchorName []rune - var futureDash = false - for _, r := range text { - switch { - case unicode.IsLetter(r) || unicode.IsNumber(r): - if futureDash && len(anchorName) > 0 { - anchorName = append(anchorName, '-') - } - futureDash = false - anchorName = append(anchorName, unicode.ToLower(r)) - default: - futureDash = true - } - } - return string(anchorName) -} diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go new file mode 100644 index 00000000000..59d34806930 --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/md4.go @@ -0,0 +1,122 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package md4 implements the MD4 hash algorithm as defined in RFC 1320. +// +// Deprecated: MD4 is cryptographically broken and should should only be used +// where compatibility with legacy systems, not security, is the goal. Instead, +// use a secure hash like SHA-256 (from crypto/sha256). +package md4 // import "golang.org/x/crypto/md4" + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.MD4, New) +} + +// The size of an MD4 checksum in bytes. +const Size = 16 + +// The blocksize of MD4 in bytes. +const BlockSize = 64 + +const ( + _Chunk = 64 + _Init0 = 0x67452301 + _Init1 = 0xEFCDAB89 + _Init2 = 0x98BADCFE + _Init3 = 0x10325476 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + s [4]uint32 + x [_Chunk]byte + nx int + len uint64 +} + +func (d *digest) Reset() { + d.s[0] = _Init0 + d.s[1] = _Init1 + d.s[2] = _Init2 + d.s[3] = _Init3 + d.nx = 0 + d.len = 0 +} + +// New returns a new hash.Hash computing the MD4 checksum. +func New() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > _Chunk-d.nx { + n = _Chunk - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == _Chunk { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0, so that caller can keep writing and summing. + d := new(digest) + *d = *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + len := d.len + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + d.Write(tmp[0 : 56-len%64]) + } else { + d.Write(tmp[0 : 64+56-len%64]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + for _, s := range d.s { + in = append(in, byte(s>>0)) + in = append(in, byte(s>>8)) + in = append(in, byte(s>>16)) + in = append(in, byte(s>>24)) + } + return in +} diff --git a/vendor/golang.org/x/crypto/md4/md4block.go b/vendor/golang.org/x/crypto/md4/md4block.go new file mode 100644 index 00000000000..3fed475f3f6 --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/md4block.go @@ -0,0 +1,89 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// MD4 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package md4 + +var shift1 = []uint{3, 7, 11, 19} +var shift2 = []uint{3, 5, 9, 13} +var shift3 = []uint{3, 9, 11, 15} + +var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15} +var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15} + +func _Block(dig *digest, p []byte) int { + a := dig.s[0] + b := dig.s[1] + c := dig.s[2] + d := dig.s[3] + n := 0 + var X [16]uint32 + for len(p) >= _Chunk { + aa, bb, cc, dd := a, b, c, d + + j := 0 + for i := 0; i < 16; i++ { + X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // If this needs to be made faster in the future, + // the usual trick is to unroll each of these + // loops by a factor of 4; that lets you replace + // the shift[] lookups with constants and, + // with suitable variable renaming in each + // unrolled body, delete the a, b, c, d = d, a, b, c + // (or you can let the optimizer do the renaming). + // + // The index variables are uint so that % by a power + // of two can be optimized easily by a compiler. + + // Round 1. + for i := uint(0); i < 16; i++ { + x := i + s := shift1[i%4] + f := ((c ^ d) & b) ^ d + a += f + X[x] + a = a<>(32-s) + a, b, c, d = d, a, b, c + } + + // Round 2. + for i := uint(0); i < 16; i++ { + x := xIndex2[i] + s := shift2[i%4] + g := (b & c) | (b & d) | (c & d) + a += g + X[x] + 0x5a827999 + a = a<>(32-s) + a, b, c, d = d, a, b, c + } + + // Round 3. + for i := uint(0); i < 16; i++ { + x := xIndex3[i] + s := shift3[i%4] + h := b ^ c ^ d + a += h + X[x] + 0x6ed9eba1 + a = a<>(32-s) + a, b, c, d = d, a, b, c + } + + a += aa + b += bb + c += cc + d += dd + + p = p[_Chunk:] + n += _Chunk + } + + dig.s[0] = a + dig.s[1] = b + dig.s[2] = c + dig.s[3] = d + return n +} diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go new file mode 100644 index 00000000000..c2fef30afff --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -0,0 +1,66 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// +// Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// +// Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// +// The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// +// Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 // import "golang.org/x/crypto/sha3" diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go new file mode 100644 index 00000000000..0d8043fd2a1 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +import ( + "hash" +) + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() hash.Hash { + if h := new224Asm(); h != nil { + return h + } + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() hash.Hash { + if h := new256Asm(); h != nil { + return h + } + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() hash.Hash { + if h := new384Asm(); h != nil { + return h + } + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() hash.Hash { + if h := new512Asm(); h != nil { + return h + } + return &state{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } + +// NewLegacyKeccak512 creates a new Keccak-512 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New512 instead. +func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go new file mode 100644 index 00000000000..c74fc20fcb3 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x +// +build !gc purego !s390x + +package sha3 + +import ( + "hash" +) + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { return nil } + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { return nil } + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { return nil } + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { return nil } diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go new file mode 100644 index 00000000000..0f4ae8bacff --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -0,0 +1,413 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package sha3 + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[12] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[18] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[24] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[16] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[22] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[3] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[1] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[7] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[19] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[11] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[23] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[4] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[2] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[8] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[14] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[7] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[23] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[14] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[11] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[2] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[18] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[6] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[22] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[4] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[1] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[8] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[24] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[12] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[3] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[19] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[22] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[8] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[19] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[1] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[12] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[23] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[16] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[2] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[24] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[6] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[3] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[14] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[7] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[18] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[4] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[2] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[3] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[4] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[6] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[7] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[8] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[11] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[12] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[14] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[16] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[18] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[19] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[22] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[23] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[24] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go new file mode 100644 index 00000000000..248a38241ff --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc +// +build amd64,!purego,gc + +package sha3 + +// This function is implemented in keccakf_amd64.s. + +//go:noescape + +func keccakF1600(a *[25]uint64) diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s new file mode 100644 index 00000000000..4cfa54383bf --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -0,0 +1,391 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc +// +build amd64,!purego,gc + +// This code was translated into a form compatible with 6a from the public +// domain sources at https://github.com/gvanas/KeccakCodePackage + +// Offsets in state +#define _ba (0*8) +#define _be (1*8) +#define _bi (2*8) +#define _bo (3*8) +#define _bu (4*8) +#define _ga (5*8) +#define _ge (6*8) +#define _gi (7*8) +#define _go (8*8) +#define _gu (9*8) +#define _ka (10*8) +#define _ke (11*8) +#define _ki (12*8) +#define _ko (13*8) +#define _ku (14*8) +#define _ma (15*8) +#define _me (16*8) +#define _mi (17*8) +#define _mo (18*8) +#define _mu (19*8) +#define _sa (20*8) +#define _se (21*8) +#define _si (22*8) +#define _so (23*8) +#define _su (24*8) + +// Temporary registers +#define rT1 AX + +// Round vars +#define rpState DI +#define rpStack SP + +#define rDa BX +#define rDe CX +#define rDi DX +#define rDo R8 +#define rDu R9 + +#define rBa R10 +#define rBe R11 +#define rBi R12 +#define rBo R13 +#define rBu R14 + +#define rCa SI +#define rCe BP +#define rCi rBi +#define rCo rBo +#define rCu R15 + +#define MOVQ_RBI_RCE MOVQ rBi, rCe +#define XORQ_RT1_RCA XORQ rT1, rCa +#define XORQ_RT1_RCE XORQ rT1, rCe +#define XORQ_RBA_RCU XORQ rBa, rCu +#define XORQ_RBE_RCU XORQ rBe, rCu +#define XORQ_RDU_RCU XORQ rDu, rCu +#define XORQ_RDA_RCA XORQ rDa, rCa +#define XORQ_RDE_RCE XORQ rDe, rCe + +#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ + /* Prepare round */ \ + MOVQ rCe, rDa; \ + ROLQ $1, rDa; \ + \ + MOVQ _bi(iState), rCi; \ + XORQ _gi(iState), rDi; \ + XORQ rCu, rDa; \ + XORQ _ki(iState), rCi; \ + XORQ _mi(iState), rDi; \ + XORQ rDi, rCi; \ + \ + MOVQ rCi, rDe; \ + ROLQ $1, rDe; \ + \ + MOVQ _bo(iState), rCo; \ + XORQ _go(iState), rDo; \ + XORQ rCa, rDe; \ + XORQ _ko(iState), rCo; \ + XORQ _mo(iState), rDo; \ + XORQ rDo, rCo; \ + \ + MOVQ rCo, rDi; \ + ROLQ $1, rDi; \ + \ + MOVQ rCu, rDo; \ + XORQ rCe, rDi; \ + ROLQ $1, rDo; \ + \ + MOVQ rCa, rDu; \ + XORQ rCi, rDo; \ + ROLQ $1, rDu; \ + \ + /* Result b */ \ + MOVQ _ba(iState), rBa; \ + MOVQ _ge(iState), rBe; \ + XORQ rCo, rDu; \ + MOVQ _ki(iState), rBi; \ + MOVQ _mo(iState), rBo; \ + MOVQ _su(iState), rBu; \ + XORQ rDe, rBe; \ + ROLQ $44, rBe; \ + XORQ rDi, rBi; \ + XORQ rDa, rBa; \ + ROLQ $43, rBi; \ + \ + MOVQ rBe, rCa; \ + MOVQ rc, rT1; \ + ORQ rBi, rCa; \ + XORQ rBa, rT1; \ + XORQ rT1, rCa; \ + MOVQ rCa, _ba(oState); \ + \ + XORQ rDu, rBu; \ + ROLQ $14, rBu; \ + MOVQ rBa, rCu; \ + ANDQ rBe, rCu; \ + XORQ rBu, rCu; \ + MOVQ rCu, _bu(oState); \ + \ + XORQ rDo, rBo; \ + ROLQ $21, rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _bi(oState); \ + \ + NOTQ rBi; \ + ORQ rBa, rBu; \ + ORQ rBo, rBi; \ + XORQ rBo, rBu; \ + XORQ rBe, rBi; \ + MOVQ rBu, _bo(oState); \ + MOVQ rBi, _be(oState); \ + B_RBI_RCE; \ + \ + /* Result g */ \ + MOVQ _gu(iState), rBe; \ + XORQ rDu, rBe; \ + MOVQ _ka(iState), rBi; \ + ROLQ $20, rBe; \ + XORQ rDa, rBi; \ + ROLQ $3, rBi; \ + MOVQ _bo(iState), rBa; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDo, rBa; \ + MOVQ _me(iState), rBo; \ + MOVQ _si(iState), rBu; \ + ROLQ $28, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ga(oState); \ + G_RT1_RCA; \ + \ + XORQ rDe, rBo; \ + ROLQ $45, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ge(oState); \ + G_RT1_RCE; \ + \ + XORQ rDi, rBu; \ + ROLQ $61, rBu; \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _go(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _gu(oState); \ + NOTQ rBu; \ + G_RBA_RCU; \ + \ + ORQ rBu, rBo; \ + XORQ rBi, rBo; \ + MOVQ rBo, _gi(oState); \ + \ + /* Result k */ \ + MOVQ _be(iState), rBa; \ + MOVQ _gi(iState), rBe; \ + MOVQ _ko(iState), rBi; \ + MOVQ _mu(iState), rBo; \ + MOVQ _sa(iState), rBu; \ + XORQ rDi, rBe; \ + ROLQ $6, rBe; \ + XORQ rDo, rBi; \ + ROLQ $25, rBi; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDe, rBa; \ + ROLQ $1, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ka(oState); \ + K_RT1_RCA; \ + \ + XORQ rDu, rBo; \ + ROLQ $8, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ke(oState); \ + K_RT1_RCE; \ + \ + XORQ rDa, rBu; \ + ROLQ $18, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _ki(oState); \ + \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _ko(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _ku(oState); \ + K_RBA_RCU; \ + \ + /* Result m */ \ + MOVQ _ga(iState), rBe; \ + XORQ rDa, rBe; \ + MOVQ _ke(iState), rBi; \ + ROLQ $36, rBe; \ + XORQ rDe, rBi; \ + MOVQ _bu(iState), rBa; \ + ROLQ $10, rBi; \ + MOVQ rBe, rT1; \ + MOVQ _mi(iState), rBo; \ + ANDQ rBi, rT1; \ + XORQ rDu, rBa; \ + MOVQ _so(iState), rBu; \ + ROLQ $27, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ma(oState); \ + M_RT1_RCA; \ + \ + XORQ rDi, rBo; \ + ROLQ $15, rBo; \ + MOVQ rBi, rT1; \ + ORQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _me(oState); \ + M_RT1_RCE; \ + \ + XORQ rDo, rBu; \ + ROLQ $56, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ORQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _mi(oState); \ + \ + ORQ rBa, rBe; \ + XORQ rBu, rBe; \ + MOVQ rBe, _mu(oState); \ + \ + ANDQ rBa, rBu; \ + XORQ rBo, rBu; \ + MOVQ rBu, _mo(oState); \ + M_RBE_RCU; \ + \ + /* Result s */ \ + MOVQ _bi(iState), rBa; \ + MOVQ _go(iState), rBe; \ + MOVQ _ku(iState), rBi; \ + XORQ rDi, rBa; \ + MOVQ _ma(iState), rBo; \ + ROLQ $62, rBa; \ + XORQ rDo, rBe; \ + MOVQ _se(iState), rBu; \ + ROLQ $55, rBe; \ + \ + XORQ rDu, rBi; \ + MOVQ rBa, rDu; \ + XORQ rDe, rBu; \ + ROLQ $2, rBu; \ + ANDQ rBe, rDu; \ + XORQ rBu, rDu; \ + MOVQ rDu, _su(oState); \ + \ + ROLQ $39, rBi; \ + S_RDU_RCU; \ + NOTQ rBe; \ + XORQ rDa, rBo; \ + MOVQ rBe, rDa; \ + ANDQ rBi, rDa; \ + XORQ rBa, rDa; \ + MOVQ rDa, _sa(oState); \ + S_RDA_RCA; \ + \ + ROLQ $41, rBo; \ + MOVQ rBi, rDe; \ + ORQ rBo, rDe; \ + XORQ rBe, rDe; \ + MOVQ rDe, _se(oState); \ + S_RDE_RCE; \ + \ + MOVQ rBo, rDi; \ + MOVQ rBu, rDo; \ + ANDQ rBu, rDi; \ + ORQ rBa, rDo; \ + XORQ rBi, rDi; \ + XORQ rBo, rDo; \ + MOVQ rDi, _si(oState); \ + MOVQ rDo, _so(oState) \ + +// func keccakF1600(state *[25]uint64) +TEXT ·keccakF1600(SB), 0, $200-8 + MOVQ state+0(FP), rpState + + // Convert the user state into an internal state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + // Execute the KeccakF permutation + MOVQ _ba(rpState), rCa + MOVQ _be(rpState), rCe + MOVQ _bu(rpState), rCu + + XORQ _ga(rpState), rCa + XORQ _ge(rpState), rCe + XORQ _gu(rpState), rCu + + XORQ _ka(rpState), rCa + XORQ _ke(rpState), rCe + XORQ _ku(rpState), rCu + + XORQ _ma(rpState), rCa + XORQ _me(rpState), rCe + XORQ _mu(rpState), rCu + + XORQ _sa(rpState), rCa + XORQ _se(rpState), rCe + MOVQ _si(rpState), rDi + MOVQ _so(rpState), rDo + XORQ _su(rpState), rCu + + mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + + // Revert the internal state to the user state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + RET diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go new file mode 100644 index 00000000000..8b4453aac3c --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -0,0 +1,19 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.4 +// +build go1.4 + +package sha3 + +import ( + "crypto" +) + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go new file mode 100644 index 00000000000..ba269a07300 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -0,0 +1,193 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +type state struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + buf []byte // points into storage + rate int // the number of bytes of state to use + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + storage storageBuf + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.buf = d.storage.asBytes()[:0] +} + +func (d *state) clone() *state { + ret := *d + if ret.state == spongeAbsorbing { + ret.buf = ret.storage.asBytes()[:len(ret.buf)] + } else { + ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] + } + + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *state) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf) + d.buf = d.storage.asBytes()[:0] + keccakF1600(&d.a) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutatin before + // copying more output. + keccakF1600(&d.a) + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute(dsbyte byte) { + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.buf = append(d.buf, dsbyte) + zerosStart := len(d.buf) + d.buf = d.storage.asBytes()[:d.rate] + for i := zerosStart; i < d.rate; i++ { + d.buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) +} + +// Write absorbs more data into the hash's state. It produces an error +// if more data is written to the ShakeHash after writing +func (d *state) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + written = len(p) + + for len(p) > 0 { + if len(d.buf) == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + keccakF1600(&d.a) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - len(d.buf) + if todo > len(p) { + todo = len(p) + } + d.buf = append(d.buf, p[:todo]...) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if len(d.buf) == d.rate { + d.permute() + } + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + n := copy(out, d.buf) + d.buf = d.buf[n:] + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if len(d.buf) == 0 { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. +func (d *state) Sum(in []byte) []byte { + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen) + dup.Read(hash) + return append(in, hash...) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go new file mode 100644 index 00000000000..4fcfc924ef6 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package sha3 + +// This file contains code for using the 'compute intermediate +// message digest' (KIMD) and 'compute last message digest' (KLMD) +// instructions to compute SHA-3 and SHAKE hashes on IBM Z. + +import ( + "hash" + + "golang.org/x/sys/cpu" +) + +// codes represent 7-bit KIMD/KLMD function codes as defined in +// the Principles of Operation. +type code uint64 + +const ( + // function codes for KIMD/KLMD + sha3_224 code = 32 + sha3_256 = 33 + sha3_384 = 34 + sha3_512 = 35 + shake_128 = 36 + shake_256 = 37 + nopad = 0x100 +) + +// kimd is a wrapper for the 'compute intermediate message digest' instruction. +// src must be a multiple of the rate for the given function code. +//go:noescape +func kimd(function code, chain *[200]byte, src []byte) + +// klmd is a wrapper for the 'compute last message digest' instruction. +// src padding is handled by the instruction. +//go:noescape +func klmd(function code, chain *[200]byte, dst, src []byte) + +type asmState struct { + a [200]byte // 1600 bit state + buf []byte // care must be taken to ensure cap(buf) is a multiple of rate + rate int // equivalent to block size + storage [3072]byte // underlying storage for buf + outputLen int // output length if fixed, 0 if not + function code // KIMD/KLMD function code + state spongeDirection // whether the sponge is absorbing or squeezing +} + +func newAsmState(function code) *asmState { + var s asmState + s.function = function + switch function { + case sha3_224: + s.rate = 144 + s.outputLen = 28 + case sha3_256: + s.rate = 136 + s.outputLen = 32 + case sha3_384: + s.rate = 104 + s.outputLen = 48 + case sha3_512: + s.rate = 72 + s.outputLen = 64 + case shake_128: + s.rate = 168 + case shake_256: + s.rate = 136 + default: + panic("sha3: unrecognized function code") + } + + // limit s.buf size to a multiple of s.rate + s.resetBuf() + return &s +} + +func (s *asmState) clone() *asmState { + c := *s + c.buf = c.storage[:len(s.buf):cap(s.buf)] + return &c +} + +// copyIntoBuf copies b into buf. It will panic if there is not enough space to +// store all of b. +func (s *asmState) copyIntoBuf(b []byte) { + bufLen := len(s.buf) + s.buf = s.buf[:len(s.buf)+len(b)] + copy(s.buf[bufLen:], b) +} + +// resetBuf points buf at storage, sets the length to 0 and sets cap to be a +// multiple of the rate. +func (s *asmState) resetBuf() { + max := (cap(s.storage) / s.rate) * s.rate + s.buf = s.storage[:0:max] +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (s *asmState) Write(b []byte) (int, error) { + if s.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + length := len(b) + for len(b) > 0 { + if len(s.buf) == 0 && len(b) >= cap(s.buf) { + // Hash the data directly and push any remaining bytes + // into the buffer. + remainder := len(b) % s.rate + kimd(s.function, &s.a, b[:len(b)-remainder]) + if remainder != 0 { + s.copyIntoBuf(b[len(b)-remainder:]) + } + return length, nil + } + + if len(s.buf) == cap(s.buf) { + // flush the buffer + kimd(s.function, &s.a, s.buf) + s.buf = s.buf[:0] + } + + // copy as much as we can into the buffer + n := len(b) + if len(b) > cap(s.buf)-len(s.buf) { + n = cap(s.buf) - len(s.buf) + } + s.copyIntoBuf(b[:n]) + b = b[n:] + } + return length, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (s *asmState) Read(out []byte) (n int, err error) { + n = len(out) + + // need to pad if we were absorbing + if s.state == spongeAbsorbing { + s.state = spongeSqueezing + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 + s.buf = s.buf[:0] + return + } + + // write hash into buffer + max := cap(s.buf) + if max > len(out) { + max = (len(out)/s.rate)*s.rate + s.rate + } + klmd(s.function, &s.a, s.buf[:max], s.buf) + s.buf = s.buf[:max] + } + + for len(out) > 0 { + // flush the buffer + if len(s.buf) != 0 { + c := copy(out, s.buf) + out = out[c:] + s.buf = s.buf[c:] + continue + } + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function|nopad, &s.a, out, nil) + return + } + + // write hash into buffer + s.resetBuf() + if cap(s.buf) > len(out) { + s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] + } + klmd(s.function|nopad, &s.a, s.buf, nil) + } + return +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (s *asmState) Sum(b []byte) []byte { + if s.outputLen == 0 { + panic("sha3: cannot call Sum on SHAKE functions") + } + + // Copy the state to preserve the original. + a := s.a + + // Hash the buffer. Note that we don't clear it because we + // aren't updating the state. + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) +} + +// Reset resets the Hash to its initial state. +func (s *asmState) Reset() { + for i := range s.a { + s.a[i] = 0 + } + s.resetBuf() + s.state = spongeAbsorbing +} + +// Size returns the number of bytes Sum will return. +func (s *asmState) Size() int { + return s.outputLen +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (s *asmState) BlockSize() int { + return s.rate +} + +// Clone returns a copy of the ShakeHash in its current state. +func (s *asmState) Clone() ShakeHash { + return s.clone() +} + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_224) + } + return nil +} + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_256) + } + return nil +} + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_384) + } + return nil +} + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_512) + } + return nil +} + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_128) + } + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_256) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s new file mode 100644 index 00000000000..a0e051b0451 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -0,0 +1,34 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go new file mode 100644 index 00000000000..d7be2954ab2 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "encoding/binary" + "io" +) + +// ShakeHash defines the interface to hash functions that +// support arbitrary-length output. +type ShakeHash interface { + // Write absorbs more data into the hash's state. It panics if input is + // written to it after output has been read from it. + io.Writer + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash + + // Reset resets the ShakeHash to its initial state. + Reset() +} + +// cSHAKE specific context +type cshakeState struct { + *state // SHA-3 state context and Read/Write operations + + // initBlock is the cSHAKE specific initialization set of bytes. It is initialized + // by newCShake function and stores concatenation of N followed by S, encoded + // by the method specified in 3.3 of [1]. + // It is stored here in order for Reset() to be able to put context into + // initial state. + initBlock []byte +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + dsbyteCShake = 0x04 + rate128 = 168 + rate256 = 136 +) + +func bytepad(input []byte, w int) []byte { + // leftEncode always returns max 9 bytes + buf := make([]byte, 0, 9+len(input)+w) + buf = append(buf, leftEncode(uint64(w))...) + buf = append(buf, input...) + padlen := w - (len(buf) % w) + return append(buf, make([]byte, padlen)...) +} + +func leftEncode(value uint64) []byte { + var b [9]byte + binary.BigEndian.PutUint64(b[1:], value) + // Trim all but last leading zero bytes + i := byte(1) + for i < 8 && b[i] == 0 { + i++ + } + // Prepend number of encoded bytes + b[i-1] = 9 - i + return b[i-1:] +} + +func newCShake(N, S []byte, rate int, dsbyte byte) ShakeHash { + c := cshakeState{state: &state{rate: rate, dsbyte: dsbyte}} + + // leftEncode returns max 9 bytes + c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, N...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, S...) + c.Write(bytepad(c.initBlock, c.rate)) + return &c +} + +// Reset resets the hash to initial state. +func (c *cshakeState) Reset() { + c.state.Reset() + c.Write(bytepad(c.initBlock, c.rate)) +} + +// Clone returns copy of a cSHAKE context within its current state. +func (c *cshakeState) Clone() ShakeHash { + b := make([]byte, len(c.initBlock)) + copy(b, c.initBlock) + return &cshakeState{state: c.clone(), initBlock: b} +} + +// Clone returns copy of SHAKE context within its current state. +func (c *state) Clone() ShakeHash { + return c.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() ShakeHash { + if h := newShake128Asm(); h != nil { + return h + } + return &state{rate: rate128, dsbyte: dsbyteShake} +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() ShakeHash { + if h := newShake256Asm(); h != nil { + return h + } + return &state{rate: rate256, dsbyte: dsbyteShake} +} + +// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, +// a customizable variant of SHAKE128. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake128. +func NewCShake128(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake128() + } + return newCShake(N, S, rate128, dsbyteCShake) +} + +// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, +// a customizable variant of SHAKE256. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake256. +func NewCShake256(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake256() + } + return newCShake(N, S, rate256, dsbyteCShake) +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + h.Write(data) + h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + h.Write(data) + h.Read(hash) +} diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go new file mode 100644 index 00000000000..5c0710ef98f --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x +// +build !gc purego !s390x + +package sha3 + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go new file mode 100644 index 00000000000..59c8eb94186 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !386 && !ppc64le) || purego +// +build !amd64,!386,!ppc64le purego + +package sha3 + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate]byte + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(b) +} + +var ( + xorIn = xorInGeneric + copyOut = copyOutGeneric + xorInUnaligned = xorInGeneric + copyOutUnaligned = copyOutGeneric +) + +const xorImplementationUnaligned = "generic" diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go new file mode 100644 index 00000000000..8d947711272 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +import "encoding/binary" + +// xorInGeneric xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorInGeneric(d *state, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOutGeneric copies uint64s to a byte buffer. +func copyOutGeneric(d *state, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go new file mode 100644 index 00000000000..1ce606246d5 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -0,0 +1,68 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || 386 || ppc64le) && !purego +// +build amd64 386 ppc64le +// +build !purego + +package sha3 + +import "unsafe" + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate / 8]uint64 + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(unsafe.Pointer(b)) +} + +// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a +// XOR buf. +func xorInUnaligned(d *state, buf []byte) { + n := len(buf) + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] + if n >= 72 { + d.a[0] ^= bw[0] + d.a[1] ^= bw[1] + d.a[2] ^= bw[2] + d.a[3] ^= bw[3] + d.a[4] ^= bw[4] + d.a[5] ^= bw[5] + d.a[6] ^= bw[6] + d.a[7] ^= bw[7] + d.a[8] ^= bw[8] + } + if n >= 104 { + d.a[9] ^= bw[9] + d.a[10] ^= bw[10] + d.a[11] ^= bw[11] + d.a[12] ^= bw[12] + } + if n >= 136 { + d.a[13] ^= bw[13] + d.a[14] ^= bw[14] + d.a[15] ^= bw[15] + d.a[16] ^= bw[16] + } + if n >= 144 { + d.a[17] ^= bw[17] + } + if n >= 168 { + d.a[18] ^= bw[18] + d.a[19] ^= bw[19] + d.a[20] ^= bw[20] + } +} + +func copyOutUnaligned(d *state, buf []byte) { + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + copy(buf, ab[:]) +} + +var ( + xorIn = xorInUnaligned + copyOut = copyOutUnaligned +) + +const xorImplementationUnaligned = "unaligned" diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 8cfd6063e72..1473e1296d0 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -1,7 +1,7 @@ # OAuth2 for Go +[![Go Reference](https://pkg.go.dev/badge/golang.org/x/oauth2.svg)](https://pkg.go.dev/golang.org/x/oauth2) [![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) -[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) oauth2 package contains a client implementation for OAuth 2.0 spec. @@ -14,17 +14,17 @@ go get golang.org/x/oauth2 Or you can manually git clone the repository to `$(go env GOPATH)/src/golang.org/x/oauth2`. -See godoc for further documentation and examples. +See pkg.go.dev for further documentation and examples. -* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) +* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) +* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) ## Policy for new packages We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a single endpoint, add it to the -[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints) +[pkg.go.dev/golang.org/x/oauth2/endpoints](https://pkg.go.dev/golang.org/x/oauth2/endpoints) package. ## Report Issues / Send Patches diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod index b3457815528..2b13f0b34cb 100644 --- a/vendor/golang.org/x/oauth2/go.mod +++ b/vendor/golang.org/x/oauth2/go.mod @@ -3,8 +3,7 @@ module golang.org/x/oauth2 go 1.11 require ( - cloud.google.com/go v0.34.0 - golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - google.golang.org/appengine v1.4.0 + cloud.google.com/go v0.65.0 + golang.org/x/net v0.0.0-20200822124328-c89045814202 + google.golang.org/appengine v1.6.6 ) diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum index 6f0079b0d7f..eab5833c421 100644 --- a/vendor/golang.org/x/oauth2/go.sum +++ b/vendor/golang.org/x/oauth2/go.sum @@ -1,12 +1,361 @@ -cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index 7434871880a..e1755d1d9ac 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/golang.org/x/term/codereview.cfg b/vendor/golang.org/x/term/codereview.cfg new file mode 100644 index 00000000000..3f8b14b64e8 --- /dev/null +++ b/vendor/golang.org/x/term/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/term/go.mod b/vendor/golang.org/x/term/go.mod index d45f52851ec..edf0e5b1db4 100644 --- a/vendor/golang.org/x/term/go.mod +++ b/vendor/golang.org/x/term/go.mod @@ -1,5 +1,5 @@ module golang.org/x/term -go 1.11 +go 1.17 -require golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 +require golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 diff --git a/vendor/golang.org/x/term/go.sum b/vendor/golang.org/x/term/go.sum index de9e09c6547..ff132135ec6 100644 --- a/vendor/golang.org/x/term/go.sum +++ b/vendor/golang.org/x/term/go.sum @@ -1,2 +1,2 @@ -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/golang.org/x/term/term.go b/vendor/golang.org/x/term/term.go index 2a4ccf80121..d5927088082 100644 --- a/vendor/golang.org/x/term/term.go +++ b/vendor/golang.org/x/term/term.go @@ -7,11 +7,13 @@ // // Putting a terminal into raw mode is the most common requirement: // -// oldState, err := term.MakeRaw(0) +// oldState, err := term.MakeRaw(int(os.Stdin.Fd())) // if err != nil { // panic(err) // } -// defer term.Restore(0, oldState) +// defer term.Restore(int(os.Stdin.Fd()), oldState) +// +// Note that on non-Unix systems os.Stdin.Fd() may not be 0. package term // State contains the state of a terminal. diff --git a/vendor/golang.org/x/term/term_solaris.go b/vendor/golang.org/x/term/term_solaris.go deleted file mode 100644 index b9da29744b7..00000000000 --- a/vendor/golang.org/x/term/term_solaris.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import ( - "io" - "syscall" - - "golang.org/x/sys/unix" -) - -// State contains the state of a terminal. -type state struct { - termios unix.Termios -} - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermio(fd, unix.TCGETA) - return err == nil -} - -func readPassword(fd int) ([]byte, error) { - // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c - val, err := unix.IoctlGetTermios(fd, unix.TCGETS) - if err != nil { - return nil, err - } - oldState := *val - - newState := oldState - newState.Lflag &^= syscall.ECHO - newState.Lflag |= syscall.ICANON | syscall.ISIG - newState.Iflag |= syscall.ICRNL - err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) - if err != nil { - return nil, err - } - - defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(fd, buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} - -func makeRaw(fd int) (*State, error) { - // see http://cr.illumos.org/~webrev/andy_js/1060/ - termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) - if err != nil { - return nil, err - } - - oldState := State{state{termios: *termios}} - - termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON - termios.Oflag &^= unix.OPOST - termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN - termios.Cflag &^= unix.CSIZE | unix.PARENB - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - - if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { - return nil, err - } - - return &oldState, nil -} - -func restore(fd int, oldState *State) error { - return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) -} - -func getState(fd int) (*State, error) { - termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) - if err != nil { - return nil, err - } - - return &State{state{termios: *termios}}, nil -} - -func getSize(fd int) (width, height int, err error) { - ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return 0, 0, err - } - return int(ws.Col), int(ws.Row), nil -} diff --git a/vendor/golang.org/x/term/term_unix.go b/vendor/golang.org/x/term/term_unix.go index 6849b6ee5ba..a4e31ab1b29 100644 --- a/vendor/golang.org/x/term/term_unix.go +++ b/vendor/golang.org/x/term/term_unix.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd zos +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package term diff --git a/vendor/golang.org/x/term/term_unix_linux.go b/vendor/golang.org/x/term/term_unix_linux.go deleted file mode 100644 index 2d5efd26ad4..00000000000 --- a/vendor/golang.org/x/term/term_unix_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS -const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/term/term_unix_aix.go b/vendor/golang.org/x/term/term_unix_other.go similarity index 63% rename from vendor/golang.org/x/term/term_unix_aix.go rename to vendor/golang.org/x/term/term_unix_other.go index 2d5efd26ad4..1e8955c934f 100644 --- a/vendor/golang.org/x/term/term_unix_aix.go +++ b/vendor/golang.org/x/term/term_unix_other.go @@ -1,7 +1,10 @@ -// Copyright 2019 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || linux || solaris || zos +// +build aix linux solaris zos + package term import "golang.org/x/sys/unix" diff --git a/vendor/golang.org/x/term/term_unix_zos.go b/vendor/golang.org/x/term/term_unix_zos.go deleted file mode 100644 index b85ab899893..00000000000 --- a/vendor/golang.org/x/term/term_unix_zos.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS -const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go index f41aedcfc8a..6105bc7fadc 100644 --- a/vendor/golang.org/x/text/internal/language/language.go +++ b/vendor/golang.org/x/text/internal/language/language.go @@ -251,6 +251,13 @@ func (t Tag) Parent() Tag { // ParseExtension parses s as an extension and returns it on success. func ParseExtension(s string) (ext string, err error) { + defer func() { + if recover() != nil { + ext = "" + err = ErrSyntax + } + }() + scan := makeScannerString(s) var end int if n := len(scan.token); n != 1 { @@ -461,7 +468,14 @@ func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { // ParseBase parses a 2- or 3-letter ISO 639 code. // It returns a ValueError if s is a well-formed but unknown language identifier // or another error if another error occurred. -func ParseBase(s string) (Language, error) { +func ParseBase(s string) (l Language, err error) { + defer func() { + if recover() != nil { + l = 0 + err = ErrSyntax + } + }() + if n := len(s); n < 2 || 3 < n { return 0, ErrSyntax } @@ -472,7 +486,14 @@ func ParseBase(s string) (Language, error) { // ParseScript parses a 4-letter ISO 15924 code. // It returns a ValueError if s is a well-formed but unknown script identifier // or another error if another error occurred. -func ParseScript(s string) (Script, error) { +func ParseScript(s string) (scr Script, err error) { + defer func() { + if recover() != nil { + scr = 0 + err = ErrSyntax + } + }() + if len(s) != 4 { return 0, ErrSyntax } @@ -489,7 +510,14 @@ func EncodeM49(r int) (Region, error) { // ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. // It returns a ValueError if s is a well-formed but unknown region identifier // or another error if another error occurred. -func ParseRegion(s string) (Region, error) { +func ParseRegion(s string) (r Region, err error) { + defer func() { + if recover() != nil { + r = 0 + err = ErrSyntax + } + }() + if n := len(s); n < 2 || 3 < n { return 0, ErrSyntax } @@ -578,7 +606,14 @@ type Variant struct { // ParseVariant parses and returns a Variant. An error is returned if s is not // a valid variant. -func ParseVariant(s string) (Variant, error) { +func ParseVariant(s string) (v Variant, err error) { + defer func() { + if recover() != nil { + v = Variant{} + err = ErrSyntax + } + }() + s = strings.ToLower(s) if id, ok := variantIndex[s]; ok { return Variant{id, s}, nil diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go index c696fd0bd86..47ee0fed174 100644 --- a/vendor/golang.org/x/text/internal/language/parse.go +++ b/vendor/golang.org/x/text/internal/language/parse.go @@ -232,6 +232,13 @@ func Parse(s string) (t Tag, err error) { if s == "" { return Und, ErrSyntax } + defer func() { + if recover() != nil { + t = Und + err = ErrSyntax + return + } + }() if len(s) <= maxAltTaglen { b := [maxAltTaglen]byte{} for i, c := range s { diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go index 11acfd88562..59b04100806 100644 --- a/vendor/golang.org/x/text/language/parse.go +++ b/vendor/golang.org/x/text/language/parse.go @@ -43,6 +43,13 @@ func Parse(s string) (t Tag, err error) { // https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. // The resulting tag is canonicalized using the canonicalization type c. func (c CanonType) Parse(s string) (t Tag, err error) { + defer func() { + if recover() != nil { + t = Tag{} + err = language.ErrSyntax + } + }() + tt, err := language.Parse(s) if err != nil { return makeTag(tt), err @@ -79,6 +86,13 @@ func Compose(part ...interface{}) (t Tag, err error) { // tag is returned after canonicalizing using CanonType c. If one or more errors // are encountered, one of the errors is returned. func (c CanonType) Compose(part ...interface{}) (t Tag, err error) { + defer func() { + if recover() != nil { + t = Tag{} + err = language.ErrSyntax + } + }() + var b language.Builder if err = update(&b, part...); err != nil { return und, err @@ -142,6 +156,14 @@ var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight") // Tags with a weight of zero will be dropped. An error will be returned if the // input could not be parsed. func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) { + defer func() { + if recover() != nil { + tag = nil + q = nil + err = language.ErrSyntax + } + }() + var entry string for s != "" { if entry, s = split(s, ','); entry == "" { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index a98fe77827a..0cfcc8463c2 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -145,7 +145,6 @@ func (r *Reservation) DelayFrom(now time.Time) time.Duration { // Cancel is shorthand for CancelAt(time.Now()). func (r *Reservation) Cancel() { r.CancelAt(time.Now()) - return } // CancelAt indicates that the reservation holder will not perform the reserved action @@ -186,8 +185,6 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.lastEvent = prevEvent } } - - return } // Reserve is shorthand for ReserveN(time.Now(), 1). @@ -367,20 +364,13 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, last = now } - // Avoid making delta overflow below when last is very old. - maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) - elapsed := now.Sub(last) - if elapsed > maxElapsed { - elapsed = maxElapsed - } - // Calculate the new number of tokens, due to time that passed. + elapsed := now.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens } @@ -388,15 +378,11 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, // of time it takes to accumulate them at a rate of limit tokens per second. func (limit Limit) durationFromTokens(tokens float64) time.Duration { seconds := tokens / float64(limit) - return time.Nanosecond * time.Duration(1e9*seconds) + return time.Duration(float64(time.Second) * seconds) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens // which could be accumulated during that duration at a rate of limit tokens per second. func (limit Limit) tokensFromDuration(d time.Duration) float64 { - // Split the integer and fractional parts ourself to minimize rounding errors. - // See golang.org/issues/34861. - sec := float64(d/time.Second) * float64(limit) - nsec := float64(d%time.Second) * float64(limit) - return sec + nsec/1e9 + return d.Seconds() * float64(limit) } diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index 380e514f280..ad5063fdf10 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -15,5 +15,5 @@ approvers: - tallclair - piosz - brancz - - DirectXMan12 - lavalamp + - serathius diff --git a/vendor/k8s.io/klog/v2/go.mod b/vendor/k8s.io/klog/v2/go.mod index eb297b6a1ed..08a2d0f3177 100644 --- a/vendor/k8s.io/klog/v2/go.mod +++ b/vendor/k8s.io/klog/v2/go.mod @@ -2,4 +2,4 @@ module k8s.io/klog/v2 go 1.13 -require github.com/go-logr/logr v0.4.0 +require github.com/go-logr/logr v1.0.0 diff --git a/vendor/k8s.io/klog/v2/go.sum b/vendor/k8s.io/klog/v2/go.sum index 5778f81742b..a1b90e4bc1c 100644 --- a/vendor/k8s.io/klog/v2/go.sum +++ b/vendor/k8s.io/klog/v2/go.sum @@ -1,2 +1,4 @@ -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.0.0-rc1 h1:+ul9F74rBkPajeP8m4o3o0tiglmzNFsPnuhYyBCQ0Sc= +github.com/go-logr/logr v1.0.0-rc1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.0.0 h1:kH951GinvFVaQgy/ki/B3YYmQtRpExGigSJg6O8z5jo= +github.com/go-logr/logr v1.0.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 25483fad138..60c88d3c3b4 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -284,6 +284,7 @@ func (m *moduleSpec) Get() interface{} { var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") +// Set will sets module value // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { var filter []modulePat @@ -362,6 +363,7 @@ func (t *traceLocation) Get() interface{} { var errTraceSyntax = errors.New("syntax error: expect file.go:234") +// Set will sets backtrace value // Syntax: -log_backtrace_at=gopherflakes.go:234 // Note that unlike vmodule the file extension is included here. func (t *traceLocation) Set(value string) error { @@ -507,7 +509,7 @@ type loggingT struct { addDirHeader bool // If set, all output will be redirected unconditionally to the provided logr.Logger - logr logr.Logger + logr *logr.Logger // If true, messages will not be propagated to lower severity log levels oneOutput bool @@ -696,11 +698,11 @@ func (buf *buffer) someDigits(i, d int) int { return copy(buf.tmp[i:], buf.tmp[j:]) } -func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) println(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { buf, file, line := l.header(s, 0) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logr != nil { + // if logger is set, we clear the generated header as we rely on the backing + // logger implementation to print headers + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -708,18 +710,18 @@ func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args args = filter.Filter(args) } fmt.Fprintln(buf, args...) - l.output(s, logr, buf, file, line, false) + l.output(s, logger, buf, 0 /* depth */, file, line, false) } -func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { - l.printDepth(s, logr, filter, 1, args...) +func (l *loggingT) print(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { + l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -730,14 +732,14 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, de if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) { +func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -748,17 +750,17 @@ func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, file, line, false) + l.output(s, logger, buf, 0 /* depth */, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -769,28 +771,28 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFil if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, file, line, alsoToStderr) + l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } - if loggr != nil { - loggr.Error(err, msg, keysAndValues...) + if logger != nil { + logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...) return } l.printS(err, errorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } - if loggr != nil { - loggr.Info(msg, keysAndValues...) + if logger != nil { + logger.WithCallDepth(depth+2).Info(msg, keysAndValues...) return } l.printS(nil, infoLog, depth+1, msg, keysAndValues...) @@ -825,6 +827,8 @@ func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { switch v.(type) { case string, error: b.WriteString(fmt.Sprintf("%s=%q", k, v)) + case []byte: + b.WriteString(fmt.Sprintf("%s=%+q", k, v)) default: if _, ok := v.(fmt.Stringer); ok { b.WriteString(fmt.Sprintf("%s=%q", k, v)) @@ -855,13 +859,26 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // SetLogger will set the backing logr implementation for klog. // If set, all log lines will be suppressed from the regular Output, and // redirected to the logr implementation. -// All log lines include the 'severity', 'file' and 'line' values attached as -// structured logging values. // Use as: // ... // klog.SetLogger(zapr.NewLogger(zapLog)) +// +// To remove a backing logr implemention, use ClearLogger. Setting an +// empty logger with SetLogger(logr.Logger{}) does not work. func SetLogger(logr logr.Logger) { - logging.logr = logr + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.logr = &logr +} + +// ClearLogger removes a backing logr implementation if one was set earlier +// with SetLogger. +func ClearLogger() { + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.logr = nil } // SetOutput sets the output destination for all severities @@ -899,7 +916,7 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { l.mu.Lock() if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { @@ -911,9 +928,9 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == errorLog { - l.logr.Error(nil, string(data)) + l.logr.WithCallDepth(depth+3).Error(nil, string(data)) } else { - log.Info(string(data)) + log.WithCallDepth(depth + 3).Info(string(data)) } } else if l.toStderr { os.Stderr.Write(data) @@ -1264,7 +1281,7 @@ func (l *loggingT) setV(pc uintptr) Level { // See the documentation of V for more information. type Verbose struct { enabled bool - logr logr.Logger + logr *logr.Logger filter LogFilter } @@ -1272,7 +1289,8 @@ func newVerbose(level Level, b bool) Verbose { if logging.logr == nil { return Verbose{b, nil, logging.filter} } - return Verbose{b, logging.logr.V(int(level)), logging.filter} + v := logging.logr.V(int(level)) + return Verbose{b, &v, logging.filter} } // V reports whether verbosity at the call site is at least the requested level. @@ -1310,9 +1328,14 @@ func V(level Level) Verbose { if runtime.Callers(2, logging.pcs[:]) == 0 { return newVerbose(level, false) } - v, ok := logging.vmap[logging.pcs[0]] + // runtime.Callers returns "return PCs", but we want + // to look up the symbolic information for the call, + // so subtract 1 from the PC. runtime.CallersFrames + // would be cleaner, but allocates. + pc := logging.pcs[0] - 1 + v, ok := logging.vmap[pc] if !ok { - v = logging.setV(logging.pcs[0]) + v = logging.setV(pc) } return newVerbose(level, v >= level) } @@ -1598,3 +1621,20 @@ func KRef(namespace, name string) ObjectRef { Namespace: namespace, } } + +// KObjs returns slice of ObjectRef from an slice of ObjectMeta +func KObjs(arg interface{}) []ObjectRef { + s := reflect.ValueOf(arg) + if s.Kind() != reflect.Slice { + return nil + } + objectRefs := make([]ObjectRef, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, ok := s.Index(i).Interface().(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil + } + } + return objectRefs +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b998c53e8a6..a68d912db8f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,10 +1,29 @@ # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm +# github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e +github.com/Azure/go-ntlmssp # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml +# github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 +github.com/ChrisTrenkamp/goxpath +github.com/ChrisTrenkamp/goxpath/internal/execxp +github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil +github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns +github.com/ChrisTrenkamp/goxpath/internal/xsort +github.com/ChrisTrenkamp/goxpath/lexer +github.com/ChrisTrenkamp/goxpath/parser +github.com/ChrisTrenkamp/goxpath/parser/pathexpr +github.com/ChrisTrenkamp/goxpath/tree +github.com/ChrisTrenkamp/goxpath/tree/xmltree +github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder +github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele +github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode +github.com/ChrisTrenkamp/goxpath/xconst # github.com/Masterminds/goutils v1.1.1 github.com/Masterminds/goutils +# github.com/Masterminds/semver v1.5.0 +github.com/Masterminds/semver # github.com/Masterminds/semver/v3 v3.1.1 ## explicit github.com/Masterminds/semver/v3 @@ -34,8 +53,12 @@ github.com/ProtonMail/go-crypto/openpgp/s2k github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 github.com/PuerkitoBio/urlesc +# github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d +github.com/acarl005/stripansi # github.com/acomagu/bufpipe v1.0.3 github.com/acomagu/bufpipe +# github.com/alessio/shellescape v1.4.1 +github.com/alessio/shellescape # github.com/aliyun/alibaba-cloud-sdk-go v1.61.985 ## explicit github.com/aliyun/alibaba-cloud-sdk-go/sdk @@ -50,8 +73,14 @@ github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils github.com/aliyun/alibaba-cloud-sdk-go/services/ecs github.com/aliyun/alibaba-cloud-sdk-go/services/vpc +# github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 +github.com/asaskevich/govalidator +# github.com/avast/retry-go v3.0.0+incompatible +github.com/avast/retry-go # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile +# github.com/bmatcuk/doublestar/v4 v4.0.2 +github.com/bmatcuk/doublestar/v4 # github.com/cavaliergopher/grab/v3 v3.0.1 ## explicit github.com/cavaliergopher/grab/v3 @@ -67,8 +96,10 @@ github.com/containerd/continuity/sysx github.com/coreos/go-semver/semver # github.com/coreos/go-systemd/v22 v22.3.2 github.com/coreos/go-systemd/v22/journal -# github.com/cpuguy83/go-md2man/v2 v2.0.0 +# github.com/cpuguy83/go-md2man/v2 v2.0.1 github.com/cpuguy83/go-md2man/v2/md2man +# github.com/creasty/defaults v1.5.2 +github.com/creasty/defaults # github.com/cyphar/filepath-securejoin v0.2.2 github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 @@ -235,7 +266,7 @@ github.com/go-git/go-git/v5/utils/merkletrie/filesystem github.com/go-git/go-git/v5/utils/merkletrie/index github.com/go-git/go-git/v5/utils/merkletrie/internal/frame github.com/go-git/go-git/v5/utils/merkletrie/noder -# github.com/go-logr/logr v0.4.0 +# github.com/go-logr/logr v1.1.0 github.com/go-logr/logr # github.com/go-ole/go-ole v1.2.6 github.com/go-ole/go-ole @@ -248,6 +279,17 @@ github.com/go-openapi/jsonreference github.com/go-openapi/spec # github.com/go-openapi/swag v0.19.5 github.com/go-openapi/swag +# github.com/go-ozzo/ozzo-validation/v4 v4.3.0 +## explicit +github.com/go-ozzo/ozzo-validation/v4 +github.com/go-ozzo/ozzo-validation/v4/is +# github.com/go-playground/locales v0.14.0 +github.com/go-playground/locales +github.com/go-playground/locales/currency +# github.com/go-playground/universal-translator v0.18.0 +github.com/go-playground/universal-translator +# github.com/go-playground/validator/v10 v10.9.0 +github.com/go-playground/validator/v10 # github.com/gobwas/glob v0.2.3 github.com/gobwas/glob github.com/gobwas/glob/compiler @@ -257,6 +299,8 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings +# github.com/gofrs/uuid v4.2.0+incompatible +github.com/gofrs/uuid # github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto @@ -276,8 +320,9 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/gofuzz v1.1.0 +# github.com/google/gofuzz v1.2.0 github.com/google/gofuzz +github.com/google/gofuzz/bytesource # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/shlex # github.com/google/uuid v1.2.0 @@ -293,6 +338,10 @@ github.com/gorilla/mux # github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 github.com/gregjones/httpcache github.com/gregjones/httpcache/diskcache +# github.com/hashicorp/go-uuid v1.0.2 +github.com/hashicorp/go-uuid +# github.com/hashicorp/go-version v1.4.0 +github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -316,14 +365,81 @@ github.com/imdario/mergo github.com/inconshreveable/mousetrap # github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 github.com/jbenet/go-context/io +# github.com/jcmturner/aescts/v2 v2.0.0 +github.com/jcmturner/aescts/v2 +# github.com/jcmturner/dnsutils/v2 v2.0.0 +github.com/jcmturner/dnsutils/v2 +# github.com/jcmturner/gofork v1.0.0 +github.com/jcmturner/gofork/encoding/asn1 +github.com/jcmturner/gofork/x/crypto/pbkdf2 +# github.com/jcmturner/goidentity/v6 v6.0.1 +github.com/jcmturner/goidentity/v6 +# github.com/jcmturner/gokrb5/v8 v8.4.2 +github.com/jcmturner/gokrb5/v8/asn1tools +github.com/jcmturner/gokrb5/v8/client +github.com/jcmturner/gokrb5/v8/config +github.com/jcmturner/gokrb5/v8/credentials +github.com/jcmturner/gokrb5/v8/crypto +github.com/jcmturner/gokrb5/v8/crypto/common +github.com/jcmturner/gokrb5/v8/crypto/etype +github.com/jcmturner/gokrb5/v8/crypto/rfc3961 +github.com/jcmturner/gokrb5/v8/crypto/rfc3962 +github.com/jcmturner/gokrb5/v8/crypto/rfc4757 +github.com/jcmturner/gokrb5/v8/crypto/rfc8009 +github.com/jcmturner/gokrb5/v8/gssapi +github.com/jcmturner/gokrb5/v8/iana +github.com/jcmturner/gokrb5/v8/iana/addrtype +github.com/jcmturner/gokrb5/v8/iana/adtype +github.com/jcmturner/gokrb5/v8/iana/asnAppTag +github.com/jcmturner/gokrb5/v8/iana/chksumtype +github.com/jcmturner/gokrb5/v8/iana/errorcode +github.com/jcmturner/gokrb5/v8/iana/etypeID +github.com/jcmturner/gokrb5/v8/iana/flags +github.com/jcmturner/gokrb5/v8/iana/keyusage +github.com/jcmturner/gokrb5/v8/iana/msgtype +github.com/jcmturner/gokrb5/v8/iana/nametype +github.com/jcmturner/gokrb5/v8/iana/patype +github.com/jcmturner/gokrb5/v8/kadmin +github.com/jcmturner/gokrb5/v8/keytab +github.com/jcmturner/gokrb5/v8/krberror +github.com/jcmturner/gokrb5/v8/messages +github.com/jcmturner/gokrb5/v8/pac +github.com/jcmturner/gokrb5/v8/service +github.com/jcmturner/gokrb5/v8/spnego +github.com/jcmturner/gokrb5/v8/types +# github.com/jcmturner/rpc/v2 v2.0.3 +github.com/jcmturner/rpc/v2/mstypes +github.com/jcmturner/rpc/v2/ndr # github.com/jmespath/go-jmespath v0.3.0 github.com/jmespath/go-jmespath -# github.com/json-iterator/go v1.1.11 +# github.com/json-iterator/go v1.1.12 github.com/json-iterator/go +# github.com/k0sproject/dig v0.2.0 +## explicit +github.com/k0sproject/dig +# github.com/k0sproject/k0sctl v0.13.1 +## explicit +github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster +github.com/k0sproject/k0sctl/version +# github.com/k0sproject/rig v0.6.4 +## explicit +github.com/k0sproject/rig +github.com/k0sproject/rig/exec +github.com/k0sproject/rig/log +github.com/k0sproject/rig/os +github.com/k0sproject/rig/os/initsystem +github.com/k0sproject/rig/os/registry +github.com/k0sproject/rig/powershell +# github.com/k0sproject/version v0.3.0 +github.com/k0sproject/version +# github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 +github.com/kballard/go-shellquote # github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 github.com/kevinburke/ssh_config # github.com/kr/fs v0.1.0 github.com/kr/fs +# github.com/leodido/go-urn v1.2.1 +github.com/leodido/go-urn # github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible ## explicit github.com/lestrrat-go/file-rotatelogs @@ -342,6 +458,11 @@ github.com/magiconair/properties github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 +github.com/masterzen/simplexml/dom +# github.com/masterzen/winrm v0.0.0-20211231115050-232efb40349e +github.com/masterzen/winrm +github.com/masterzen/winrm/soap # github.com/mattn/go-runewidth v0.0.7 github.com/mattn/go-runewidth # github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 @@ -369,7 +490,7 @@ github.com/moby/term github.com/moby/term/windows # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent -# github.com/modern-go/reflect2 v1.0.1 +# github.com/modern-go/reflect2 v1.0.2 github.com/modern-go/reflect2 # github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 github.com/monochromegane/go-gitignore @@ -458,7 +579,7 @@ github.com/prometheus/procfs/internal/util # github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 ## explicit github.com/rifflock/lfshook -# github.com/russross/blackfriday/v2 v2.0.1 +# github.com/russross/blackfriday/v2 v2.1.0 github.com/russross/blackfriday/v2 # github.com/sealyun/lvscare v1.1.2-alpha.2 ## explicit @@ -478,8 +599,6 @@ github.com/shirou/gopsutil/disk github.com/shirou/gopsutil/internal/common # github.com/shopspring/decimal v1.2.0 github.com/shopspring/decimal -# github.com/shurcooL/sanitized_anchor_name v1.0.0 -github.com/shurcooL/sanitized_anchor_name # github.com/sirupsen/logrus v1.8.1 ## explicit github.com/sirupsen/logrus @@ -574,7 +693,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 +# golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd => golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 ## explicit golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -585,14 +704,16 @@ golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/internal/subtle +golang.org/x/crypto/md4 golang.org/x/crypto/pbkdf2 golang.org/x/crypto/poly1305 golang.org/x/crypto/scrypt +golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/net v0.0.0-20210510120150-4163338589ed +# golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd => golang.org/x/net v0.0.0-20210510120150-4163338589ed ## explicit golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -607,13 +728,13 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d +# golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.0.0-20210220032951-036812b2e83c ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 +# golang.org/x/sys v0.0.0-20220209214540-3681064d5158 => golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 ## explicit golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -622,9 +743,9 @@ golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d +# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/term -# golang.org/x/text v0.3.6 +# golang.org/x/text v0.3.7 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -646,9 +767,9 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba +# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac golang.org/x/time/rate -# google.golang.org/appengine v1.6.6 +# google.golang.org/appengine v1.6.7 google.golang.org/appengine/internal google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore @@ -765,7 +886,7 @@ helm.sh/helm/v3/pkg/chart helm.sh/helm/v3/pkg/chart/loader helm.sh/helm/v3/pkg/chartutil helm.sh/helm/v3/pkg/engine -# k8s.io/api v0.21.0 +# k8s.io/api v0.21.0 => k8s.io/api v0.21.0 ## explicit k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 @@ -814,7 +935,7 @@ k8s.io/api/storage/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 -# k8s.io/apimachinery v0.21.0 +# k8s.io/apimachinery v0.22.1 => k8s.io/apimachinery v0.21.0 ## explicit k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -869,7 +990,7 @@ k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.21.0 +# k8s.io/client-go v0.22.1 => k8s.io/client-go v0.21.0 ## explicit k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -993,7 +1114,7 @@ k8s.io/client-go/util/workqueue # k8s.io/component-base v0.21.0 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 -# k8s.io/klog/v2 v2.8.0 +# k8s.io/klog/v2 v2.20.0 k8s.io/klog/v2 # k8s.io/kube-proxy v0.21.0 ## explicit @@ -1001,7 +1122,7 @@ k8s.io/kube-proxy/config/v1alpha1 # k8s.io/kubelet v0.21.0 ## explicit k8s.io/kubelet/config/v1beta1 -# k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 +# k8s.io/utils v0.0.0-20210820185131-d34e5cb4466e => k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 ## explicit k8s.io/utils/buffer k8s.io/utils/integer @@ -1088,7 +1209,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/merge3 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/structured-merge-diff/v4 v4.1.0 +# sigs.k8s.io/structured-merge-diff/v4 v4.1.2 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed @@ -1097,3 +1218,10 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit sigs.k8s.io/yaml # github.com/docker/docker => github.com/docker/docker v20.10.3-0.20211208011758-87521affb077+incompatible +# golang.org/x/crypto => golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 +# golang.org/x/net => golang.org/x/net v0.0.0-20210510120150-4163338589ed +# golang.org/x/sys => golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 +# k8s.io/api => k8s.io/api v0.21.0 +# k8s.io/apimachinery => k8s.io/apimachinery v0.21.0 +# k8s.io/client-go => k8s.io/client-go v0.21.0 +# k8s.io/utils => k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go index 5a8214ae2d4..2b98b729cac 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go @@ -124,13 +124,6 @@ func ReconcileFieldSetWithSchema(fieldset *fieldpath.Set, tv *TypedValue) (*fiel v.schema = tv.schema v.typeRef = tv.typeRef - // We don't reconcile deduced types, which are primarily for use by unstructured CRDs. Deduced - // types do not support atomic or granular tags. Nor does the dynamic schema deduction - // interact well with the reconcile logic. - if v.schema == DeducedParseableType.Schema { - return nil, nil - } - defer v.finished() errs := v.reconcile() @@ -187,19 +180,17 @@ func (v *reconcileWithSchemaWalker) visitListItems(t *schema.List, element *fiel } func (v *reconcileWithSchemaWalker) doList(t *schema.List) (errs ValidationErrors) { - // reconcile lists changed from granular to atomic + // reconcile lists changed from granular to atomic. + // Note that migrations from atomic to granular are not recommended and will + // be treated as if they were always granular. + // + // In this case, the manager that owned the previously atomic field (and all subfields), + // will now own just the top-level field and none of the subfields. if !v.isAtomic && t.ElementRelationship == schema.Atomic { v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic return errs } - // reconcile lists changed from atomic to granular - if v.isAtomic && t.ElementRelationship == schema.Associative { - v.toAdd, errs = buildGranularFieldSet(v.path, v.value) - if errs != nil { - return errs - } - } if v.fieldSet != nil { errs = v.visitListItems(t, v.fieldSet) } @@ -231,7 +222,18 @@ func (v *reconcileWithSchemaWalker) visitMapItems(t *schema.Map, element *fieldp } func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) { - // reconcile maps and structs changed from granular to atomic + // We don't currently reconcile deduced types (unstructured CRDs) or maps that contain only unknown + // fields since deduced types do not yet support atomic or granular tags. + if isUntypedDeducedMap(t) { + return errs + } + + // reconcile maps and structs changed from granular to atomic. + // Note that migrations from atomic to granular are not recommended and will + // be treated as if they were always granular. + // + // In this case the manager that owned the previously atomic field (and all subfields), + // will now own just the top-level field and none of the subfields. if !v.isAtomic && t.ElementRelationship == schema.Atomic { if v.fieldSet != nil && v.fieldSet.Size() > 0 { v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields @@ -239,34 +241,12 @@ func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) } return errs } - // reconcile maps changed from atomic to granular - if v.isAtomic && (t.ElementRelationship == schema.Separable || t.ElementRelationship == "") { - v.toAdd, errs = buildGranularFieldSet(v.path, v.value) - if errs != nil { - return errs - } - } if v.fieldSet != nil { errs = v.visitMapItems(t, v.fieldSet) } return errs } -func buildGranularFieldSet(path fieldpath.Path, value *TypedValue) (*fieldpath.Set, ValidationErrors) { - - valueFieldSet, err := value.ToFieldSet() - if err != nil { - return nil, errorf("toFieldSet: %v", err) - } - if valueFieldSetAtPath, ok := fieldSetAtPath(valueFieldSet, path); ok { - result := fieldpath.NewSet(path) - resultAtPath := descendToPath(result, path) - *resultAtPath = *valueFieldSetAtPath - return result, nil - } - return nil, nil -} - func fieldSetAtPath(node *fieldpath.Set, path fieldpath.Path) (*fieldpath.Set, bool) { ok := true for _, pe := range path { @@ -293,3 +273,18 @@ func typeRefAtPath(t *schema.Map, pe fieldpath.PathElement) (schema.TypeRef, boo } return tr, tr != schema.TypeRef{} } + +// isUntypedDeducedMap returns true if m has no fields defined, but allows untyped elements. +// This is equivalent to a openAPI object that has x-kubernetes-preserve-unknown-fields=true +// but does not have any properties defined on the object. +func isUntypedDeducedMap(m *schema.Map) bool { + return isUntypedDeducedRef(m.ElementType) && m.Fields == nil +} + +func isUntypedDeducedRef(t schema.TypeRef) bool { + if t.NamedType != nil { + return *t.NamedType == "__untyped_deduced_" + } + atom := t.Inlined + return atom.Scalar != nil && *atom.Scalar == "untyped" +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index c3e15180a7b..a338d761d43 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -51,10 +51,22 @@ func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { + if !w.value.IsList() { + return nil + } l := w.value.AsListUsing(w.allocator) defer w.allocator.Free(l) - // If list is null, empty, or atomic just return - if l == nil || l.Length() == 0 || t.ElementRelationship == schema.Atomic { + // If list is null or empty just return + if l == nil || l.Length() == 0 { + return nil + } + + // atomic lists should return everything in the case of extract + // and nothing in the case of remove (!w.shouldExtract) + if t.ElementRelationship == schema.Atomic { + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -70,7 +82,7 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { // but ignore them when we are removing (i.e. !w.shouldExtract) if w.toRemove.Has(path) { if w.shouldExtract { - newItems = append(newItems, item.Unstructured()) + newItems = append(newItems, removeItemsWithSchema(item, w.toRemove, w.schema, t.ElementType, w.shouldExtract).Unstructured()) } else { continue } @@ -92,12 +104,24 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { } func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { + if !w.value.IsMap() { + return nil + } m := w.value.AsMapUsing(w.allocator) if m != nil { defer w.allocator.Free(m) } - // If map is null, empty, or atomic just return - if m == nil || m.Empty() || t.ElementRelationship == schema.Atomic { + // If map is null or empty just return + if m == nil || m.Empty() { + return nil + } + + // atomic maps should return everything in the case of extract + // and nothing in the case of remove (!w.shouldExtract) + if t.ElementRelationship == schema.Atomic { + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -118,7 +142,8 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { // but ignore them when we are removing (i.e. !w.shouldExtract) if w.toRemove.Has(path) { if w.shouldExtract { - newMap[k] = val.Unstructured() + newMap[k] = removeItemsWithSchema(val, w.toRemove, w.schema, fieldType, w.shouldExtract).Unstructured() + } return true } From 7681037fc77c69330a7d8c402b5110a1b7d29264 Mon Sep 17 00:00:00 2001 From: starComingup <1225067236@qq.com> Date: Thu, 11 Aug 2022 17:48:47 +0800 Subject: [PATCH 2/3] add README about k0s runtime module Signed-off-by: starComingup <1225067236@qq.com> --- pkg/runtime/k0s/README.md | 44 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 pkg/runtime/k0s/README.md diff --git a/pkg/runtime/k0s/README.md b/pkg/runtime/k0s/README.md new file mode 100644 index 00000000000..8f532911946 --- /dev/null +++ b/pkg/runtime/k0s/README.md @@ -0,0 +1,44 @@ +# k0s Runtime Design +## basefs + +```shell +. +├── amd64 +│   ├── bin +│   │   ├── k0s +│   │   ├── k0sctl +│   │   ├── kubectl +│   │   ├── nerdctl +│   │   └── seautil +│   ├── images +│   │   └── registry.tar.gz +│   └── Metadata +├── imageList +├── Kubefile +└── rootfs + ├── etc + │   ├── dump-config.toml + │   └── registry.yml + └── scripts + ├── containerd.sh + ├── init-registry.sh + └── init.sh +``` + +## introduce +We define the k0s runtime have 5 phases to install/scale/reset the cluster. + +basefs contain binary、shell script、config file and image. see more about [sealerio/basefs](https://github.com/sealerio/basefs) + +Runtime before filesystem lead cluster install through execute [k0sctl](https://github.com/k0sproject/k0sctl) command. + ++ Init + + When sealer lead to cluster install first, init phase copy rootfs/bin to /usr/bin in init.sh script, make host ssh environment with no password, convert cluster file to k0sctl config, install the private registry. ++ Join + + Join phase prepare the registry certs, and use `k0sctl apply` to reconcile the cluster. ++ Delete + + Delete is same as join, but it recycles anything installed by join phase. ++ Reset + + Reset through k0sctl to remove this cluster and remove anything about this cluster generated by sealer. ++ Upgrade + + Upgrade can upgrade the k0s cluster. \ No newline at end of file From b5b47d8e1614d9aec09022948862eb6ef4d2c8b3 Mon Sep 17 00:00:00 2001 From: starComingup <1225067236@qq.com> Date: Thu, 11 Aug 2022 18:14:46 +0800 Subject: [PATCH 3/3] modify k0s cluster cert. Signed-off-by: starComingup <1225067236@qq.com> --- pkg/runtime/k0s/registry.go | 22 ----------------- pkg/runtime/k0s/utils.go | 48 +++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 22 deletions(-) create mode 100644 pkg/runtime/k0s/utils.go diff --git a/pkg/runtime/k0s/registry.go b/pkg/runtime/k0s/registry.go index e9836f0ac7a..4a1f14b4eac 100644 --- a/pkg/runtime/k0s/registry.go +++ b/pkg/runtime/k0s/registry.go @@ -18,9 +18,6 @@ import ( "fmt" "net" "path/filepath" - - "github.com/sealerio/sealer/common" - "github.com/sealerio/sealer/pkg/cert" ) const ( @@ -96,25 +93,6 @@ func (k *Runtime) GenerateRegistryCert() error { return GenerateRegistryCert(k.getCertsDir(), k.RegConfig.Domain) } -func GenerateRegistryCert(registryCertPath string, BaseName string) error { - regCertConfig := cert.Config{ - Path: registryCertPath, - BaseName: BaseName, - CommonName: BaseName, - DNSNames: []string{BaseName}, - Organization: []string{common.ExecBinaryFileName}, - Year: 100, - } - if BaseName != SeaHub { - regCertConfig.DNSNames = append(regCertConfig.DNSNames, SeaHub) - } - crt, key, err := cert.NewCaCertAndKey(regCertConfig) - if err != nil { - return err - } - return cert.WriteCertAndKey(regCertConfig.Path, regCertConfig.BaseName, crt, key) -} - func (k *Runtime) SendRegistryCert(host []net.IP) error { err := k.sendRegistryCertAndKey() if err != nil { diff --git a/pkg/runtime/k0s/utils.go b/pkg/runtime/k0s/utils.go new file mode 100644 index 00000000000..9143f471d98 --- /dev/null +++ b/pkg/runtime/k0s/utils.go @@ -0,0 +1,48 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k0s + +import ( + "fmt" + + "github.com/sealerio/sealer/common" + "github.com/sealerio/sealer/pkg/clustercert/cert" +) + +func GenerateRegistryCert(registryCertPath string, baseName string) error { + regCertConfig := cert.CertificateDescriptor{ + CommonName: baseName, + DNSNames: []string{baseName}, + Organization: []string{common.ExecBinaryFileName}, + Year: 100, + } + if baseName != SeaHub { + regCertConfig.DNSNames = append(regCertConfig.DNSNames, SeaHub) + } + + caGenerator := cert.NewAuthorityCertificateGenerator(regCertConfig) + caCert, caKey, err := caGenerator.Generate() + if err != nil { + return fmt.Errorf("unable to generate %s cert: %v", baseName, err) + } + + // write cert file to disk + err = cert.NewCertificateFileManger(registryCertPath, baseName).Write(caCert, caKey) + if err != nil { + return fmt.Errorf("unable to save %s cert: %v", baseName, err) + } + + return nil +}